diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AccessScope.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AccessScope.cs deleted file mode 100644 index 82f23d054230..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/AccessScope.cs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.ComponentModel; - -namespace Azure.Compute.Batch -{ - /// AccessScope enums. - public readonly partial struct AccessScope : IEquatable - { - private readonly string _value; - - /// Initializes a new instance of . - /// is null. - public AccessScope(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string JobValue = "job"; - - /// Grants access to perform all operations on the Job containing the Task. - public static AccessScope Job { get; } = new AccessScope(JobValue); - /// Determines if two values are the same. - public static bool operator ==(AccessScope left, AccessScope right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(AccessScope left, AccessScope right) => !left.Equals(right); - /// Converts a to a . - public static implicit operator AccessScope(string value) => new AccessScope(value); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is AccessScope other && Equals(other); - /// - public bool Equals(AccessScope other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; - /// - public override string ToString() => _value; - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AuthenticationTokenSettings.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AuthenticationTokenSettings.Serialization.cs index 1c9308bd4f8b..8f8d0d8793cb 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/AuthenticationTokenSettings.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AuthenticationTokenSettings.Serialization.cs @@ -81,7 +81,7 @@ internal static AuthenticationTokenSettings DeserializeAuthenticationTokenSettin { return null; } - IList access = default; + IList access = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -92,10 +92,10 @@ internal static AuthenticationTokenSettings DeserializeAuthenticationTokenSettin { continue; } - List array = new List(); + List array = new List(); foreach (var item in property.Value.EnumerateArray()) { - array.Add(new AccessScope(item.GetString())); + array.Add(new BatchAccessScope(item.GetString())); } access = array; continue; @@ -106,7 +106,7 @@ internal static AuthenticationTokenSettings DeserializeAuthenticationTokenSettin } } serializedAdditionalRawData = rawDataDictionary; - return new AuthenticationTokenSettings(access ?? new ChangeTrackingList(), serializedAdditionalRawData); + return new AuthenticationTokenSettings(access ?? new ChangeTrackingList(), serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AuthenticationTokenSettings.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AuthenticationTokenSettings.cs index 400d876d185f..03248ad8055a 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/AuthenticationTokenSettings.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AuthenticationTokenSettings.cs @@ -51,19 +51,19 @@ public partial class AuthenticationTokenSettings /// Initializes a new instance of . public AuthenticationTokenSettings() { - Access = new ChangeTrackingList(); + Access = new ChangeTrackingList(); } /// Initializes a new instance of . /// The Batch resources to which the token grants access. The authentication token grants access to a limited set of Batch service operations. Currently the only supported value for the access property is 'job', which grants access to all operations related to the Job which contains the Task. /// Keeps track of any properties unknown to the library. - internal AuthenticationTokenSettings(IList access, IDictionary serializedAdditionalRawData) + internal AuthenticationTokenSettings(IList access, IDictionary serializedAdditionalRawData) { Access = access; _serializedAdditionalRawData = serializedAdditionalRawData; } /// The Batch resources to which the token grants access. The authentication token grants access to a limited set of Batch service operations. Currently the only supported value for the access property is 'job', which grants access to all operations related to the Job which contains the Task. - public IList Access { get; } + public IList Access { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AzureFileShareConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AzureFileShareConfiguration.Serialization.cs index 7ac9dff41aa4..fcd866a843d3 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/AzureFileShareConfiguration.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AzureFileShareConfiguration.Serialization.cs @@ -36,10 +36,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("accountName"u8); writer.WriteStringValue(AccountName); - writer.WritePropertyName("azureFileUrl"u8); - writer.WriteStringValue(AzureFileUrl); writer.WritePropertyName("accountKey"u8); writer.WriteStringValue(AccountKey); + writer.WritePropertyName("azureFileUrl"u8); + writer.WriteStringValue(AzureFileUri.AbsoluteUri); writer.WritePropertyName("relativeMountPath"u8); writer.WriteStringValue(RelativeMountPath); if (Optional.IsDefined(MountOptions)) @@ -85,8 +85,8 @@ internal static AzureFileShareConfiguration DeserializeAzureFileShareConfigurati return null; } string accountName = default; - string azureFileUrl = default; string accountKey = default; + Uri azureFileUrl = default; string relativeMountPath = default; string mountOptions = default; IDictionary serializedAdditionalRawData = default; @@ -98,14 +98,14 @@ internal static AzureFileShareConfiguration DeserializeAzureFileShareConfigurati accountName = property.Value.GetString(); continue; } - if (property.NameEquals("azureFileUrl"u8)) + if (property.NameEquals("accountKey"u8)) { - azureFileUrl = property.Value.GetString(); + accountKey = property.Value.GetString(); continue; } - if (property.NameEquals("accountKey"u8)) + if (property.NameEquals("azureFileUrl"u8)) { - accountKey = property.Value.GetString(); + azureFileUrl = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("relativeMountPath"u8)) @@ -126,8 +126,8 @@ internal static AzureFileShareConfiguration DeserializeAzureFileShareConfigurati serializedAdditionalRawData = rawDataDictionary; return new AzureFileShareConfiguration( accountName, - azureFileUrl, accountKey, + azureFileUrl, relativeMountPath, mountOptions, serializedAdditionalRawData); diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AzureFileShareConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/AzureFileShareConfiguration.cs index 76049a45e428..ebb2140fc899 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/AzureFileShareConfiguration.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/AzureFileShareConfiguration.cs @@ -47,35 +47,35 @@ public partial class AzureFileShareConfiguration /// Initializes a new instance of . /// The Azure Storage account name. - /// The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. /// The Azure Storage account key. + /// The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. /// The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. - /// , , or is null. - public AzureFileShareConfiguration(string accountName, string azureFileUrl, string accountKey, string relativeMountPath) + /// , , or is null. + public AzureFileShareConfiguration(string accountName, string accountKey, Uri azureFileUri, string relativeMountPath) { Argument.AssertNotNull(accountName, nameof(accountName)); - Argument.AssertNotNull(azureFileUrl, nameof(azureFileUrl)); Argument.AssertNotNull(accountKey, nameof(accountKey)); + Argument.AssertNotNull(azureFileUri, nameof(azureFileUri)); Argument.AssertNotNull(relativeMountPath, nameof(relativeMountPath)); AccountName = accountName; - AzureFileUrl = azureFileUrl; AccountKey = accountKey; + AzureFileUri = azureFileUri; RelativeMountPath = relativeMountPath; } /// Initializes a new instance of . /// The Azure Storage account name. - /// The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. /// The Azure Storage account key. + /// The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. /// The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. /// Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. /// Keeps track of any properties unknown to the library. - internal AzureFileShareConfiguration(string accountName, string azureFileUrl, string accountKey, string relativeMountPath, string mountOptions, IDictionary serializedAdditionalRawData) + internal AzureFileShareConfiguration(string accountName, string accountKey, Uri azureFileUri, string relativeMountPath, string mountOptions, IDictionary serializedAdditionalRawData) { AccountName = accountName; - AzureFileUrl = azureFileUrl; AccountKey = accountKey; + AzureFileUri = azureFileUri; RelativeMountPath = relativeMountPath; MountOptions = mountOptions; _serializedAdditionalRawData = serializedAdditionalRawData; @@ -88,10 +88,10 @@ internal AzureFileShareConfiguration() /// The Azure Storage account name. public string AccountName { get; set; } - /// The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. - public string AzureFileUrl { get; set; } /// The Azure Storage account key. public string AccountKey { get; set; } + /// The Azure Files URL. This is of the form 'https://{account}.file.core.windows.net/'. + public Uri AzureFileUri { get; set; } /// The relative path on the compute node where the file system will be mounted. All file systems are mounted relative to the Batch mounts directory, accessible via the AZ_BATCH_NODE_MOUNTS_DIR environment variable. public string RelativeMountPath { get; set; } /// Additional command line options to pass to the mount command. These are 'net use' options in Windows and 'mount' options in Linux. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchAccessScope.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchAccessScope.cs new file mode 100644 index 000000000000..a8cd89965ae6 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchAccessScope.cs @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchAccessScope enums. + public readonly partial struct BatchAccessScope : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchAccessScope(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string JobValue = "job"; + + /// Grants access to perform all operations on the Job containing the Task. + public static BatchAccessScope Job { get; } = new BatchAccessScope(JobValue); + /// Determines if two values are the same. + public static bool operator ==(BatchAccessScope left, BatchAccessScope right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchAccessScope left, BatchAccessScope right) => !left.Equals(right); + /// Converts a to a . + public static implicit operator BatchAccessScope(string value) => new BatchAccessScope(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchAccessScope other && Equals(other); + /// + public bool Equals(BatchAccessScope other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AffinityInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchAffinityInfo.Serialization.cs similarity index 67% rename from sdk/batch/Azure.Compute.Batch/src/Generated/AffinityInfo.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchAffinityInfo.Serialization.cs index 6ca014a7fed0..371d5c9e3b36 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/AffinityInfo.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchAffinityInfo.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class AffinityInfo : IUtf8JsonSerializable, IJsonModel + public partial class BatchAffinityInfo : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOpti /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(AffinityInfo)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchAffinityInfo)} does not support writing '{format}' format."); } writer.WritePropertyName("affinityId"u8); @@ -53,19 +53,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - AffinityInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchAffinityInfo IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(AffinityInfo)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchAffinityInfo)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeAffinityInfo(document.RootElement, options); + return DeserializeBatchAffinityInfo(document.RootElement, options); } - internal static AffinityInfo DeserializeAffinityInfo(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchAffinityInfo DeserializeBatchAffinityInfo(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -89,46 +89,46 @@ internal static AffinityInfo DeserializeAffinityInfo(JsonElement element, ModelR } } serializedAdditionalRawData = rawDataDictionary; - return new AffinityInfo(affinityId, serializedAdditionalRawData); + return new BatchAffinityInfo(affinityId, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(AffinityInfo)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchAffinityInfo)} does not support writing '{options.Format}' format."); } } - AffinityInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchAffinityInfo IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeAffinityInfo(document.RootElement, options); + return DeserializeBatchAffinityInfo(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(AffinityInfo)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchAffinityInfo)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static AffinityInfo FromResponse(Response response) + internal static BatchAffinityInfo FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeAffinityInfo(document.RootElement); + return DeserializeBatchAffinityInfo(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/AffinityInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchAffinityInfo.cs similarity index 86% rename from sdk/batch/Azure.Compute.Batch/src/Generated/AffinityInfo.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchAffinityInfo.cs index 340f6ac3f4eb..b5de8d985ca3 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/AffinityInfo.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchAffinityInfo.cs @@ -14,7 +14,7 @@ namespace Azure.Compute.Batch /// A locality hint that can be used by the Batch service to select a Compute Node /// on which to start a Task. /// - public partial class AffinityInfo + public partial class BatchAffinityInfo { /// /// Keeps track of any properties unknown to the library. @@ -48,27 +48,27 @@ public partial class AffinityInfo /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// An opaque string representing the location of a Compute Node or a Task that has run previously. You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. /// is null. - public AffinityInfo(string affinityId) + public BatchAffinityInfo(string affinityId) { Argument.AssertNotNull(affinityId, nameof(affinityId)); AffinityId = affinityId; } - /// Initializes a new instance of . + /// Initializes a new instance of . /// An opaque string representing the location of a Compute Node or a Task that has run previously. You can pass the affinityId of a Node to indicate that this Task needs to run on that Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. /// Keeps track of any properties unknown to the library. - internal AffinityInfo(string affinityId, IDictionary serializedAdditionalRawData) + internal BatchAffinityInfo(string affinityId, IDictionary serializedAdditionalRawData) { AffinityId = affinityId; _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal AffinityInfo() + /// Initializes a new instance of for deserialization. + internal BatchAffinityInfo() { } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OnAllBatchTasksComplete.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchAllTasksCompleteMode.cs similarity index 50% rename from sdk/batch/Azure.Compute.Batch/src/Generated/OnAllBatchTasksComplete.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchAllTasksCompleteMode.cs index e4be41307326..069ea7c44ad1 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/OnAllBatchTasksComplete.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchAllTasksCompleteMode.cs @@ -11,13 +11,13 @@ namespace Azure.Compute.Batch { /// The action the Batch service should take when all Tasks in the Job are in the completed state. - public readonly partial struct OnAllBatchTasksComplete : IEquatable + public readonly partial struct BatchAllTasksCompleteMode : IEquatable { private readonly string _value; - /// Initializes a new instance of . + /// Initializes a new instance of . /// is null. - public OnAllBatchTasksComplete(string value) + public BatchAllTasksCompleteMode(string value) { _value = value ?? throw new ArgumentNullException(nameof(value)); } @@ -26,21 +26,21 @@ public OnAllBatchTasksComplete(string value) private const string TerminateJobValue = "terminatejob"; /// Do nothing. The Job remains active unless terminated or disabled by some other means. - public static OnAllBatchTasksComplete NoAction { get; } = new OnAllBatchTasksComplete(NoActionValue); + public static BatchAllTasksCompleteMode NoAction { get; } = new BatchAllTasksCompleteMode(NoActionValue); /// Terminate the Job. The Job's terminationReason is set to 'AllTasksComplete'. - public static OnAllBatchTasksComplete TerminateJob { get; } = new OnAllBatchTasksComplete(TerminateJobValue); - /// Determines if two values are the same. - public static bool operator ==(OnAllBatchTasksComplete left, OnAllBatchTasksComplete right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(OnAllBatchTasksComplete left, OnAllBatchTasksComplete right) => !left.Equals(right); - /// Converts a to a . - public static implicit operator OnAllBatchTasksComplete(string value) => new OnAllBatchTasksComplete(value); + public static BatchAllTasksCompleteMode TerminateJob { get; } = new BatchAllTasksCompleteMode(TerminateJobValue); + /// Determines if two values are the same. + public static bool operator ==(BatchAllTasksCompleteMode left, BatchAllTasksCompleteMode right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchAllTasksCompleteMode left, BatchAllTasksCompleteMode right) => !left.Equals(right); + /// Converts a to a . + public static implicit operator BatchAllTasksCompleteMode(string value) => new BatchAllTasksCompleteMode(value); /// [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is OnAllBatchTasksComplete other && Equals(other); + public override bool Equals(object obj) => obj is BatchAllTasksCompleteMode other && Equals(other); /// - public bool Equals(OnAllBatchTasksComplete other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + public bool Equals(BatchAllTasksCompleteMode other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); /// [EditorBrowsable(EditorBrowsableState.Never)] diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.Serialization.cs index 175d08f2924d..7ebcdc4fd7b8 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.Serialization.cs @@ -38,10 +38,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WriteStringValue(Thumbprint); writer.WritePropertyName("thumbprintAlgorithm"u8); writer.WriteStringValue(ThumbprintAlgorithm); - if (options.Format != "W" && Optional.IsDefined(Url)) + if (options.Format != "W" && Optional.IsDefined(Uri)) { writer.WritePropertyName("url"u8); - writer.WriteStringValue(Url); + writer.WriteStringValue(Uri.AbsoluteUri); } if (options.Format != "W" && Optional.IsDefined(State)) { @@ -74,7 +74,7 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WriteObjectValue(DeleteCertificateError, options); } writer.WritePropertyName("data"u8); - writer.WriteStringValue(Data); + writer.WriteBase64StringValue(Data.ToArray(), "D"); if (Optional.IsDefined(CertificateFormat)) { writer.WritePropertyName("certificateFormat"u8); @@ -124,14 +124,14 @@ internal static BatchCertificate DeserializeBatchCertificate(JsonElement element } string thumbprint = default; string thumbprintAlgorithm = default; - string url = default; + Uri url = default; BatchCertificateState? state = default; DateTimeOffset? stateTransitionTime = default; BatchCertificateState? previousState = default; DateTimeOffset? previousStateTransitionTime = default; string publicData = default; - DeleteBatchCertificateError deleteCertificateError = default; - string data = default; + BatchCertificateDeleteError deleteCertificateError = default; + BinaryData data = default; BatchCertificateFormat? certificateFormat = default; string password = default; IDictionary serializedAdditionalRawData = default; @@ -150,7 +150,11 @@ internal static BatchCertificate DeserializeBatchCertificate(JsonElement element } if (property.NameEquals("url"u8)) { - url = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + url = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("state"u8)) @@ -200,12 +204,12 @@ internal static BatchCertificate DeserializeBatchCertificate(JsonElement element { continue; } - deleteCertificateError = DeleteBatchCertificateError.DeserializeDeleteBatchCertificateError(property.Value, options); + deleteCertificateError = BatchCertificateDeleteError.DeserializeBatchCertificateDeleteError(property.Value, options); continue; } if (property.NameEquals("data"u8)) { - data = property.Value.GetString(); + data = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D")); continue; } if (property.NameEquals("certificateFormat"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.cs index 6e858116505e..1f9cc653afbe 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificate.cs @@ -53,7 +53,7 @@ public partial class BatchCertificate /// The algorithm used to derive the thumbprint. This must be sha1. /// The base64-encoded contents of the Certificate. The maximum size is 10KB. /// , or is null. - public BatchCertificate(string thumbprint, string thumbprintAlgorithm, string data) + public BatchCertificate(string thumbprint, string thumbprintAlgorithm, BinaryData data) { Argument.AssertNotNull(thumbprint, nameof(thumbprint)); Argument.AssertNotNull(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); @@ -67,7 +67,7 @@ public BatchCertificate(string thumbprint, string thumbprintAlgorithm, string da /// Initializes a new instance of . /// The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). /// The algorithm used to derive the thumbprint. This must be sha1. - /// The URL of the Certificate. + /// The URL of the Certificate. /// The state of the Certificate. /// The time at which the Certificate entered its current state. /// The previous state of the Certificate. This property is not set if the Certificate is in its initial active state. @@ -78,11 +78,11 @@ public BatchCertificate(string thumbprint, string thumbprintAlgorithm, string da /// The format of the Certificate data. /// The password to access the Certificate's private key. This must be omitted if the Certificate format is cer. /// Keeps track of any properties unknown to the library. - internal BatchCertificate(string thumbprint, string thumbprintAlgorithm, string url, BatchCertificateState? state, DateTimeOffset? stateTransitionTime, BatchCertificateState? previousState, DateTimeOffset? previousStateTransitionTime, string publicData, DeleteBatchCertificateError deleteCertificateError, string data, BatchCertificateFormat? certificateFormat, string password, IDictionary serializedAdditionalRawData) + internal BatchCertificate(string thumbprint, string thumbprintAlgorithm, Uri uri, BatchCertificateState? state, DateTimeOffset? stateTransitionTime, BatchCertificateState? previousState, DateTimeOffset? previousStateTransitionTime, string publicData, BatchCertificateDeleteError deleteCertificateError, BinaryData data, BatchCertificateFormat? certificateFormat, string password, IDictionary serializedAdditionalRawData) { Thumbprint = thumbprint; ThumbprintAlgorithm = thumbprintAlgorithm; - Url = url; + Uri = uri; State = state; StateTransitionTime = stateTransitionTime; PreviousState = previousState; @@ -105,7 +105,7 @@ internal BatchCertificate() /// The algorithm used to derive the thumbprint. This must be sha1. public string ThumbprintAlgorithm { get; set; } /// The URL of the Certificate. - public string Url { get; } + public Uri Uri { get; } /// The state of the Certificate. public BatchCertificateState? State { get; } /// The time at which the Certificate entered its current state. @@ -117,9 +117,24 @@ internal BatchCertificate() /// The public part of the Certificate as a base-64 encoded .cer file. public string PublicData { get; } /// The error that occurred on the last attempt to delete this Certificate. This property is set only if the Certificate is in the DeleteFailed state. - public DeleteBatchCertificateError DeleteCertificateError { get; } - /// The base64-encoded contents of the Certificate. The maximum size is 10KB. - public string Data { get; set; } + public BatchCertificateDeleteError DeleteCertificateError { get; } + /// + /// The base64-encoded contents of the Certificate. The maximum size is 10KB. + /// + /// To assign a byte[] to this property use . + /// The byte[] will be serialized to a Base64 encoded string. + /// + /// + /// Examples: + /// + /// + /// BinaryData.FromBytes(new byte[] { 1, 2, 3 }) + /// Creates a payload of "AQID". + /// + /// + /// + /// + public BinaryData Data { get; set; } /// The format of the Certificate data. public BatchCertificateFormat? CertificateFormat { get; set; } /// The password to access the Certificate's private key. This must be omitted if the Certificate format is cer. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DeleteBatchCertificateError.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateDeleteError.Serialization.cs similarity index 78% rename from sdk/batch/Azure.Compute.Batch/src/Generated/DeleteBatchCertificateError.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateDeleteError.Serialization.cs index 0c139eeb767a..bff448acedbe 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/DeleteBatchCertificateError.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateDeleteError.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class DeleteBatchCertificateError : IUtf8JsonSerializable, IJsonModel + public partial class BatchCertificateDeleteError : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelR /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(DeleteBatchCertificateError)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchCertificateDeleteError)} does not support writing '{format}' format."); } if (Optional.IsDefined(Code)) @@ -71,19 +71,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - DeleteBatchCertificateError IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchCertificateDeleteError IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(DeleteBatchCertificateError)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchCertificateDeleteError)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeDeleteBatchCertificateError(document.RootElement, options); + return DeserializeBatchCertificateDeleteError(document.RootElement, options); } - internal static DeleteBatchCertificateError DeserializeDeleteBatchCertificateError(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchCertificateDeleteError DeserializeBatchCertificateDeleteError(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -128,46 +128,46 @@ internal static DeleteBatchCertificateError DeserializeDeleteBatchCertificateErr } } serializedAdditionalRawData = rawDataDictionary; - return new DeleteBatchCertificateError(code, message, values ?? new ChangeTrackingList(), serializedAdditionalRawData); + return new BatchCertificateDeleteError(code, message, values ?? new ChangeTrackingList(), serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(DeleteBatchCertificateError)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchCertificateDeleteError)} does not support writing '{options.Format}' format."); } } - DeleteBatchCertificateError IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchCertificateDeleteError IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeDeleteBatchCertificateError(document.RootElement, options); + return DeserializeBatchCertificateDeleteError(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(DeleteBatchCertificateError)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchCertificateDeleteError)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static DeleteBatchCertificateError FromResponse(Response response) + internal static BatchCertificateDeleteError FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeDeleteBatchCertificateError(document.RootElement); + return DeserializeBatchCertificateDeleteError(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DeleteBatchCertificateError.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateDeleteError.cs similarity index 90% rename from sdk/batch/Azure.Compute.Batch/src/Generated/DeleteBatchCertificateError.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateDeleteError.cs index 78ecee1fc422..a731bc507c15 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/DeleteBatchCertificateError.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCertificateDeleteError.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// An error encountered by the Batch service when deleting a Certificate. - public partial class DeleteBatchCertificateError + public partial class BatchCertificateDeleteError { /// /// Keeps track of any properties unknown to the library. @@ -45,18 +45,18 @@ public partial class DeleteBatchCertificateError /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - internal DeleteBatchCertificateError() + /// Initializes a new instance of . + internal BatchCertificateDeleteError() { Values = new ChangeTrackingList(); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// An identifier for the Certificate deletion error. Codes are invariant and are intended to be consumed programmatically. /// A message describing the Certificate deletion error, intended to be suitable for display in a user interface. /// A list of additional error details related to the Certificate deletion error. This list includes details such as the active Pools and Compute Nodes referencing this Certificate. However, if a large number of resources reference the Certificate, the list contains only about the first hundred. /// Keeps track of any properties unknown to the library. - internal DeleteBatchCertificateError(string code, string message, IReadOnlyList values, IDictionary serializedAdditionalRawData) + internal BatchCertificateDeleteError(string code, string message, IReadOnlyList values, IDictionary serializedAdditionalRawData) { Code = code; Message = message; diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClient.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClient.cs index a206ff327f1e..db61c05082a9 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClient.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchClient.cs @@ -65,7 +65,7 @@ public BatchClient(Uri endpoint, TokenCredential credential, BatchClientOptions /// Gets information about the specified Application. /// The ID of the Application. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -80,20 +80,20 @@ public BatchClient(Uri endpoint, TokenCredential credential, BatchClientOptions /// available to Compute Nodes, use the Azure portal or the Azure Resource Manager /// API. /// - /// - public virtual async Task> GetApplicationAsync(string applicationId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual async Task> GetApplicationAsync(string applicationId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(applicationId, nameof(applicationId)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await GetApplicationAsync(applicationId, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await GetApplicationAsync(applicationId, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); return Response.FromValue(BatchApplication.FromResponse(response), response); } /// Gets information about the specified Application. /// The ID of the Application. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -108,13 +108,13 @@ public virtual async Task> GetApplicationAsync(string /// available to Compute Nodes, use the Azure portal or the Azure Resource Manager /// API. /// - /// - public virtual Response GetApplication(string applicationId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual Response GetApplication(string applicationId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(applicationId, nameof(applicationId)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = GetApplication(applicationId, timeOutInSeconds, ocpdate, context); + Response response = GetApplication(applicationId, timeOutInSeconds, ocpDate, context); return Response.FromValue(BatchApplication.FromResponse(response), response); } @@ -128,14 +128,14 @@ public virtual Response GetApplication(string applicationId, i /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Application. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -145,8 +145,8 @@ public virtual Response GetApplication(string applicationId, i /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task GetApplicationAsync(string applicationId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + /// + public virtual async Task GetApplicationAsync(string applicationId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { Argument.AssertNotNullOrEmpty(applicationId, nameof(applicationId)); @@ -154,7 +154,7 @@ public virtual async Task GetApplicationAsync(string applicationId, in scope.Start(); try { - using HttpMessage message = CreateGetApplicationRequest(applicationId, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateGetApplicationRequest(applicationId, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -174,14 +174,14 @@ public virtual async Task GetApplicationAsync(string applicationId, in /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Application. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -191,8 +191,8 @@ public virtual async Task GetApplicationAsync(string applicationId, in /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response GetApplication(string applicationId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + /// + public virtual Response GetApplication(string applicationId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { Argument.AssertNotNullOrEmpty(applicationId, nameof(applicationId)); @@ -200,7 +200,7 @@ public virtual Response GetApplication(string applicationId, int? timeOutInSecon scope.Start(); try { - using HttpMessage message = CreateGetApplicationRequest(applicationId, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateGetApplicationRequest(applicationId, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -213,7 +213,7 @@ public virtual Response GetApplication(string applicationId, int? timeOutInSecon /// Creates a Pool to the specified Account. /// The Pool to be created. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -225,21 +225,21 @@ public virtual Response GetApplication(string applicationId, int? timeOutInSecon /// secret project names. This information may appear in telemetry logs accessible /// to Microsoft Support engineers. /// - /// - public virtual async Task CreatePoolAsync(BatchPoolCreateContent pool, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual async Task CreatePoolAsync(BatchPoolCreateOptions pool, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(pool, nameof(pool)); using RequestContent content = pool.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await CreatePoolAsync(content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await CreatePoolAsync(content, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); return response; } /// Creates a Pool to the specified Account. /// The Pool to be created. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -251,14 +251,14 @@ public virtual async Task CreatePoolAsync(BatchPoolCreateContent pool, /// secret project names. This information may appear in telemetry logs accessible /// to Microsoft Support engineers. /// - /// - public virtual Response CreatePool(BatchPoolCreateContent pool, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual Response CreatePool(BatchPoolCreateOptions pool, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(pool, nameof(pool)); using RequestContent content = pool.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = CreatePool(content, timeOutInSeconds, ocpdate, context); + Response response = CreatePool(content, timeOutInSeconds, ocpDate, context); return response; } @@ -272,14 +272,14 @@ public virtual Response CreatePool(BatchPoolCreateContent pool, int? timeOutInSe /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -288,8 +288,8 @@ public virtual Response CreatePool(BatchPoolCreateContent pool, int? timeOutInSe /// is null. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task CreatePoolAsync(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task CreatePoolAsync(RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNull(content, nameof(content)); @@ -297,7 +297,7 @@ public virtual async Task CreatePoolAsync(RequestContent content, int? scope.Start(); try { - using HttpMessage message = CreateCreatePoolRequest(content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateCreatePoolRequest(content, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -317,14 +317,14 @@ public virtual async Task CreatePoolAsync(RequestContent content, int? /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -333,8 +333,8 @@ public virtual async Task CreatePoolAsync(RequestContent content, int? /// is null. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response CreatePool(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response CreatePool(RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNull(content, nameof(content)); @@ -342,7 +342,7 @@ public virtual Response CreatePool(RequestContent content, int? timeOutInSeconds scope.Start(); try { - using HttpMessage message = CreateCreatePoolRequest(content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateCreatePoolRequest(content, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -365,7 +365,7 @@ public virtual Response CreatePool(RequestContent content, int? timeOutInSeconds /// /// The ID of the Pool to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -376,16 +376,15 @@ public virtual Response CreatePool(RequestContent content, int? timeOutInSeconds /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DeletePoolAsync(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual async Task DeletePoolInternalAsync(string poolId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeletePool"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeletePoolInternal"); scope.Start(); try { - using HttpMessage message = CreateDeletePoolRequest(poolId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateDeletePoolInternalRequest(poolId, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -408,7 +407,7 @@ public virtual async Task DeletePoolAsync(string poolId, int? timeOutI /// /// The ID of the Pool to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -419,16 +418,99 @@ public virtual async Task DeletePoolAsync(string poolId, int? timeOutI /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DeletePool(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual Response DeletePoolInternal(string poolId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeletePool"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeletePoolInternal"); scope.Start(); try { - using HttpMessage message = CreateDeletePoolRequest(poolId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateDeletePoolInternalRequest(poolId, timeOutInSeconds, ocpDate, requestConditions, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Gets basic properties of a Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + internal virtual async Task PoolExistsAsync(string poolId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.PoolExists"); + scope.Start(); + try + { + using HttpMessage message = CreatePoolExistsRequest(poolId, timeOutInSeconds, ocpDate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Gets basic properties of a Pool. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Pool to get. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + internal virtual Response PoolExists(string poolId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.PoolExists"); + scope.Start(); + try + { + using HttpMessage message = CreatePoolExistsRequest(poolId, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -441,7 +523,7 @@ public virtual Response DeletePool(string poolId, int? timeOutInSeconds = null, /// Gets information about the specified Pool. /// The ID of the Pool to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -452,20 +534,20 @@ public virtual Response DeletePool(string poolId, int? timeOutInSeconds = null, /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - /// - public virtual async Task> GetPoolAsync(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual async Task> GetPoolAsync(string poolId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await GetPoolAsync(poolId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context).ConfigureAwait(false); + Response response = await GetPoolAsync(poolId, timeOutInSeconds, ocpDate, select, expand, requestConditions, context).ConfigureAwait(false); return Response.FromValue(BatchPool.FromResponse(response), response); } /// Gets information about the specified Pool. /// The ID of the Pool to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -476,13 +558,13 @@ public virtual async Task> GetPoolAsync(string poolId, int? /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - /// - public virtual Response GetPool(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual Response GetPool(string poolId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = GetPool(poolId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + Response response = GetPool(poolId, timeOutInSeconds, ocpDate, select, expand, requestConditions, context); return Response.FromValue(BatchPool.FromResponse(response), response); } @@ -496,14 +578,14 @@ public virtual Response GetPool(string poolId, int? timeOutInSeconds /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Pool to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -516,8 +598,8 @@ public virtual Response GetPool(string poolId, int? timeOutInSeconds /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task GetPoolAsync(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + /// + public virtual async Task GetPoolAsync(string poolId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); @@ -525,7 +607,7 @@ public virtual async Task GetPoolAsync(string poolId, int? timeOutInSe scope.Start(); try { - using HttpMessage message = CreateGetPoolRequest(poolId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + using HttpMessage message = CreateGetPoolRequest(poolId, timeOutInSeconds, ocpDate, select, expand, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -545,14 +627,14 @@ public virtual async Task GetPoolAsync(string poolId, int? timeOutInSe /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Pool to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -565,8 +647,8 @@ public virtual async Task GetPoolAsync(string poolId, int? timeOutInSe /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response GetPool(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + /// + public virtual Response GetPool(string poolId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); @@ -574,7 +656,7 @@ public virtual Response GetPool(string poolId, int? timeOutInSeconds, DateTimeOf scope.Start(); try { - using HttpMessage message = CreateGetPoolRequest(poolId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + using HttpMessage message = CreateGetPoolRequest(poolId, timeOutInSeconds, ocpDate, select, expand, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -597,7 +679,7 @@ public virtual Response GetPool(string poolId, int? timeOutInSeconds, DateTimeOf /// The ID of the Pool to get. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -608,8 +690,8 @@ public virtual Response GetPool(string poolId, int? timeOutInSeconds, DateTimeOf /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task UpdatePoolAsync(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task UpdatePoolAsync(string poolId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNull(content, nameof(content)); @@ -618,7 +700,7 @@ public virtual async Task UpdatePoolAsync(string poolId, RequestConten scope.Start(); try { - using HttpMessage message = CreateUpdatePoolRequest(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateUpdatePoolRequest(poolId, content, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -641,7 +723,7 @@ public virtual async Task UpdatePoolAsync(string poolId, RequestConten /// The ID of the Pool to get. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -652,8 +734,8 @@ public virtual async Task UpdatePoolAsync(string poolId, RequestConten /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response UpdatePool(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response UpdatePool(string poolId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNull(content, nameof(content)); @@ -662,7 +744,7 @@ public virtual Response UpdatePool(string poolId, RequestContent content, int? t scope.Start(); try { - using HttpMessage message = CreateUpdatePoolRequest(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateUpdatePoolRequest(poolId, content, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -685,7 +767,7 @@ public virtual Response UpdatePool(string poolId, RequestContent content, int? t /// /// The ID of the Pool on which to disable automatic scaling. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -695,8 +777,8 @@ public virtual Response UpdatePool(string poolId, RequestContent content, int? t /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DisablePoolAutoScaleAsync(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task DisablePoolAutoScaleAsync(string poolId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); @@ -704,7 +786,7 @@ public virtual async Task DisablePoolAutoScaleAsync(string poolId, int scope.Start(); try { - using HttpMessage message = CreateDisablePoolAutoScaleRequest(poolId, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateDisablePoolAutoScaleRequest(poolId, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -727,7 +809,7 @@ public virtual async Task DisablePoolAutoScaleAsync(string poolId, int /// /// The ID of the Pool on which to disable automatic scaling. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -737,8 +819,8 @@ public virtual async Task DisablePoolAutoScaleAsync(string poolId, int /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DisablePoolAutoScale(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response DisablePoolAutoScale(string poolId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); @@ -746,7 +828,7 @@ public virtual Response DisablePoolAutoScale(string poolId, int? timeOutInSecond scope.Start(); try { - using HttpMessage message = CreateDisablePoolAutoScaleRequest(poolId, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateDisablePoolAutoScaleRequest(poolId, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -758,16 +840,16 @@ public virtual Response DisablePoolAutoScale(string poolId, int? timeOutInSecond /// Enables automatic scaling for a Pool. /// The ID of the Pool to get. - /// The options to use for enabling automatic scaling. + /// The options to use for enabling automatic scaling. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The content to send as the request conditions of the request. /// The cancellation token to use. - /// or is null. + /// or is null. /// is an empty string, and was expected to be non-empty. /// /// You cannot enable automatic scaling on a Pool if a resize operation is in @@ -777,30 +859,30 @@ public virtual Response DisablePoolAutoScale(string poolId, int? timeOutInSecond /// and/or a new evaluation interval. You cannot call this API for the same Pool /// more than once every 30 seconds. /// - /// - public virtual async Task EnablePoolAutoScaleAsync(string poolId, BatchPoolEnableAutoScaleContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual async Task EnablePoolAutoScaleAsync(string poolId, BatchPoolAutoScaleEnableOptions enableAutoScaleOptions, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNull(enableAutoScaleOptions, nameof(enableAutoScaleOptions)); - using RequestContent content0 = content.ToRequestContent(); + using RequestContent content = enableAutoScaleOptions.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await EnablePoolAutoScaleAsync(poolId, content0, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + Response response = await EnablePoolAutoScaleAsync(poolId, content, timeOutInSeconds, ocpDate, requestConditions, context).ConfigureAwait(false); return response; } /// Enables automatic scaling for a Pool. /// The ID of the Pool to get. - /// The options to use for enabling automatic scaling. + /// The options to use for enabling automatic scaling. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The content to send as the request conditions of the request. /// The cancellation token to use. - /// or is null. + /// or is null. /// is an empty string, and was expected to be non-empty. /// /// You cannot enable automatic scaling on a Pool if a resize operation is in @@ -810,15 +892,15 @@ public virtual async Task EnablePoolAutoScaleAsync(string poolId, Batc /// and/or a new evaluation interval. You cannot call this API for the same Pool /// more than once every 30 seconds. /// - /// - public virtual Response EnablePoolAutoScale(string poolId, BatchPoolEnableAutoScaleContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual Response EnablePoolAutoScale(string poolId, BatchPoolAutoScaleEnableOptions enableAutoScaleOptions, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNull(enableAutoScaleOptions, nameof(enableAutoScaleOptions)); - using RequestContent content0 = content.ToRequestContent(); + using RequestContent content = enableAutoScaleOptions.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = EnablePoolAutoScale(poolId, content0, timeOutInSeconds, ocpdate, requestConditions, context); + Response response = EnablePoolAutoScale(poolId, content, timeOutInSeconds, ocpDate, requestConditions, context); return response; } @@ -832,7 +914,7 @@ public virtual Response EnablePoolAutoScale(string poolId, BatchPoolEnableAutoSc /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -840,7 +922,7 @@ public virtual Response EnablePoolAutoScale(string poolId, BatchPoolEnableAutoSc /// The ID of the Pool to get. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -851,8 +933,8 @@ public virtual Response EnablePoolAutoScale(string poolId, BatchPoolEnableAutoSc /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task EnablePoolAutoScaleAsync(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task EnablePoolAutoScaleAsync(string poolId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNull(content, nameof(content)); @@ -861,7 +943,7 @@ public virtual async Task EnablePoolAutoScaleAsync(string poolId, Requ scope.Start(); try { - using HttpMessage message = CreateEnablePoolAutoScaleRequest(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateEnablePoolAutoScaleRequest(poolId, content, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -881,7 +963,7 @@ public virtual async Task EnablePoolAutoScaleAsync(string poolId, Requ /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -889,7 +971,7 @@ public virtual async Task EnablePoolAutoScaleAsync(string poolId, Requ /// The ID of the Pool to get. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -900,8 +982,8 @@ public virtual async Task EnablePoolAutoScaleAsync(string poolId, Requ /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response EnablePoolAutoScale(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response EnablePoolAutoScale(string poolId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNull(content, nameof(content)); @@ -910,7 +992,7 @@ public virtual Response EnablePoolAutoScale(string poolId, RequestContent conten scope.Start(); try { - using HttpMessage message = CreateEnablePoolAutoScaleRequest(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateEnablePoolAutoScaleRequest(poolId, content, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -922,59 +1004,59 @@ public virtual Response EnablePoolAutoScale(string poolId, RequestContent conten /// Gets the result of evaluating an automatic scaling formula on the Pool. /// The ID of the Pool on which to evaluate the automatic scaling formula. - /// The options to use for evaluating the automatic scaling formula. + /// The options to use for evaluating the automatic scaling formula. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The cancellation token to use. - /// or is null. + /// or is null. /// is an empty string, and was expected to be non-empty. /// /// This API is primarily for validating an autoscale formula, as it simply returns /// the result without applying the formula to the Pool. The Pool must have auto /// scaling enabled in order to evaluate a formula. /// - /// - public virtual async Task> EvaluatePoolAutoScaleAsync(string poolId, BatchPoolEvaluateAutoScaleContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual async Task> EvaluatePoolAutoScaleAsync(string poolId, BatchPoolAutoScaleEvaluateOptions evaluateAutoScaleOptions, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNull(evaluateAutoScaleOptions, nameof(evaluateAutoScaleOptions)); - using RequestContent content0 = content.ToRequestContent(); + using RequestContent content = evaluateAutoScaleOptions.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await EvaluatePoolAutoScaleAsync(poolId, content0, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await EvaluatePoolAutoScaleAsync(poolId, content, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); return Response.FromValue(AutoScaleRun.FromResponse(response), response); } /// Gets the result of evaluating an automatic scaling formula on the Pool. /// The ID of the Pool on which to evaluate the automatic scaling formula. - /// The options to use for evaluating the automatic scaling formula. + /// The options to use for evaluating the automatic scaling formula. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The cancellation token to use. - /// or is null. + /// or is null. /// is an empty string, and was expected to be non-empty. /// /// This API is primarily for validating an autoscale formula, as it simply returns /// the result without applying the formula to the Pool. The Pool must have auto /// scaling enabled in order to evaluate a formula. /// - /// - public virtual Response EvaluatePoolAutoScale(string poolId, BatchPoolEvaluateAutoScaleContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual Response EvaluatePoolAutoScale(string poolId, BatchPoolAutoScaleEvaluateOptions evaluateAutoScaleOptions, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNull(evaluateAutoScaleOptions, nameof(evaluateAutoScaleOptions)); - using RequestContent content0 = content.ToRequestContent(); + using RequestContent content = evaluateAutoScaleOptions.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = EvaluatePoolAutoScale(poolId, content0, timeOutInSeconds, ocpdate, context); + Response response = EvaluatePoolAutoScale(poolId, content, timeOutInSeconds, ocpDate, context); return Response.FromValue(AutoScaleRun.FromResponse(response), response); } @@ -988,7 +1070,7 @@ public virtual Response EvaluatePoolAutoScale(string poolId, Batch /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -996,7 +1078,7 @@ public virtual Response EvaluatePoolAutoScale(string poolId, Batch /// The ID of the Pool on which to evaluate the automatic scaling formula. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1006,8 +1088,8 @@ public virtual Response EvaluatePoolAutoScale(string poolId, Batch /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task EvaluatePoolAutoScaleAsync(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task EvaluatePoolAutoScaleAsync(string poolId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNull(content, nameof(content)); @@ -1016,7 +1098,7 @@ public virtual async Task EvaluatePoolAutoScaleAsync(string poolId, Re scope.Start(); try { - using HttpMessage message = CreateEvaluatePoolAutoScaleRequest(poolId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateEvaluatePoolAutoScaleRequest(poolId, content, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -1036,7 +1118,7 @@ public virtual async Task EvaluatePoolAutoScaleAsync(string poolId, Re /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -1044,7 +1126,7 @@ public virtual async Task EvaluatePoolAutoScaleAsync(string poolId, Re /// The ID of the Pool on which to evaluate the automatic scaling formula. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1054,8 +1136,8 @@ public virtual async Task EvaluatePoolAutoScaleAsync(string poolId, Re /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response EvaluatePoolAutoScale(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response EvaluatePoolAutoScale(string poolId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNull(content, nameof(content)); @@ -1064,7 +1146,7 @@ public virtual Response EvaluatePoolAutoScale(string poolId, RequestContent cont scope.Start(); try { - using HttpMessage message = CreateEvaluatePoolAutoScaleRequest(poolId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateEvaluatePoolAutoScaleRequest(poolId, content, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -1076,16 +1158,16 @@ public virtual Response EvaluatePoolAutoScale(string poolId, RequestContent cont /// Changes the number of Compute Nodes that are assigned to a Pool. /// The ID of the Pool to get. - /// The options to use for resizing the pool. + /// The options to use for resizing the pool. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The content to send as the request conditions of the request. /// The cancellation token to use. - /// or is null. + /// or is null. /// is an empty string, and was expected to be non-empty. /// /// You can only resize a Pool when its allocation state is steady. If the Pool is @@ -1096,30 +1178,29 @@ public virtual Response EvaluatePoolAutoScale(string poolId, RequestContent cont /// Batch service chooses which Compute Nodes to remove. To remove specific Compute /// Nodes, use the Pool remove Compute Nodes API instead. /// - /// - public virtual async Task ResizePoolAsync(string poolId, BatchPoolResizeContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + internal virtual async Task ResizePoolInternalAsync(string poolId, BatchPoolResizeOptions resizeOptions, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNull(resizeOptions, nameof(resizeOptions)); - using RequestContent content0 = content.ToRequestContent(); + using RequestContent content = resizeOptions.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await ResizePoolAsync(poolId, content0, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + Response response = await ResizePoolInternalAsync(poolId, content, timeOutInSeconds, ocpDate, requestConditions, context).ConfigureAwait(false); return response; } /// Changes the number of Compute Nodes that are assigned to a Pool. /// The ID of the Pool to get. - /// The options to use for resizing the pool. + /// The options to use for resizing the pool. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The content to send as the request conditions of the request. /// The cancellation token to use. - /// or is null. + /// or is null. /// is an empty string, and was expected to be non-empty. /// /// You can only resize a Pool when its allocation state is steady. If the Pool is @@ -1130,15 +1211,14 @@ public virtual async Task ResizePoolAsync(string poolId, BatchPoolResi /// Batch service chooses which Compute Nodes to remove. To remove specific Compute /// Nodes, use the Pool remove Compute Nodes API instead. /// - /// - public virtual Response ResizePool(string poolId, BatchPoolResizeContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + internal virtual Response ResizePoolInternal(string poolId, BatchPoolResizeOptions resizeOptions, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNull(resizeOptions, nameof(resizeOptions)); - using RequestContent content0 = content.ToRequestContent(); + using RequestContent content = resizeOptions.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = ResizePool(poolId, content0, timeOutInSeconds, ocpdate, requestConditions, context); + Response response = ResizePoolInternal(poolId, content, timeOutInSeconds, ocpDate, requestConditions, context); return response; } @@ -1152,7 +1232,7 @@ public virtual Response ResizePool(string poolId, BatchPoolResizeContent content /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -1160,7 +1240,7 @@ public virtual Response ResizePool(string poolId, BatchPoolResizeContent content /// The ID of the Pool to get. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1171,17 +1251,16 @@ public virtual Response ResizePool(string poolId, BatchPoolResizeContent content /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task ResizePoolAsync(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual async Task ResizePoolInternalAsync(string poolId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.ResizePool"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.ResizePoolInternal"); scope.Start(); try { - using HttpMessage message = CreateResizePoolRequest(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateResizePoolInternalRequest(poolId, content, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -1201,7 +1280,7 @@ public virtual async Task ResizePoolAsync(string poolId, RequestConten /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -1209,7 +1288,7 @@ public virtual async Task ResizePoolAsync(string poolId, RequestConten /// The ID of the Pool to get. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1220,17 +1299,16 @@ public virtual async Task ResizePoolAsync(string poolId, RequestConten /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response ResizePool(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual Response ResizePoolInternal(string poolId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.ResizePool"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.ResizePoolInternal"); scope.Start(); try { - using HttpMessage message = CreateResizePoolRequest(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateResizePoolInternalRequest(poolId, content, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -1253,7 +1331,7 @@ public virtual Response ResizePool(string poolId, RequestContent content, int? t /// /// The ID of the Pool to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1264,16 +1342,15 @@ public virtual Response ResizePool(string poolId, RequestContent content, int? t /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task StopPoolResizeAsync(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual async Task StopPoolResizeInternalAsync(string poolId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.StopPoolResize"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.StopPoolResizeInternal"); scope.Start(); try { - using HttpMessage message = CreateStopPoolResizeRequest(poolId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateStopPoolResizeInternalRequest(poolId, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -1296,7 +1373,7 @@ public virtual async Task StopPoolResizeAsync(string poolId, int? time /// /// The ID of the Pool to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1307,16 +1384,15 @@ public virtual async Task StopPoolResizeAsync(string poolId, int? time /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response StopPoolResize(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual Response StopPoolResizeInternal(string poolId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.StopPoolResize"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.StopPoolResizeInternal"); scope.Start(); try { - using HttpMessage message = CreateStopPoolResizeRequest(poolId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateStopPoolResizeInternalRequest(poolId, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -1330,7 +1406,7 @@ public virtual Response StopPoolResize(string poolId, int? timeOutInSeconds = nu /// The ID of the Pool to update. /// The options to use for replacing properties on the pool. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1343,15 +1419,15 @@ public virtual Response StopPoolResize(string poolId, int? timeOutInSeconds = nu /// the Pool has a StartTask associated with it and if StartTask is not specified /// with this request, then the Batch service will remove the existing StartTask. /// - /// - public virtual async Task ReplacePoolPropertiesAsync(string poolId, BatchPoolReplaceContent pool, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual async Task ReplacePoolPropertiesAsync(string poolId, BatchPoolReplaceOptions pool, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNull(pool, nameof(pool)); using RequestContent content = pool.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await ReplacePoolPropertiesAsync(poolId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await ReplacePoolPropertiesAsync(poolId, content, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); return response; } @@ -1359,7 +1435,7 @@ public virtual async Task ReplacePoolPropertiesAsync(string poolId, Ba /// The ID of the Pool to update. /// The options to use for replacing properties on the pool. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1372,15 +1448,15 @@ public virtual async Task ReplacePoolPropertiesAsync(string poolId, Ba /// the Pool has a StartTask associated with it and if StartTask is not specified /// with this request, then the Batch service will remove the existing StartTask. /// - /// - public virtual Response ReplacePoolProperties(string poolId, BatchPoolReplaceContent pool, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual Response ReplacePoolProperties(string poolId, BatchPoolReplaceOptions pool, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNull(pool, nameof(pool)); using RequestContent content = pool.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = ReplacePoolProperties(poolId, content, timeOutInSeconds, ocpdate, context); + Response response = ReplacePoolProperties(poolId, content, timeOutInSeconds, ocpDate, context); return response; } @@ -1394,7 +1470,7 @@ public virtual Response ReplacePoolProperties(string poolId, BatchPoolReplaceCon /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -1402,7 +1478,7 @@ public virtual Response ReplacePoolProperties(string poolId, BatchPoolReplaceCon /// The ID of the Pool to update. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1412,8 +1488,8 @@ public virtual Response ReplacePoolProperties(string poolId, BatchPoolReplaceCon /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task ReplacePoolPropertiesAsync(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task ReplacePoolPropertiesAsync(string poolId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNull(content, nameof(content)); @@ -1422,7 +1498,7 @@ public virtual async Task ReplacePoolPropertiesAsync(string poolId, Re scope.Start(); try { - using HttpMessage message = CreateReplacePoolPropertiesRequest(poolId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateReplacePoolPropertiesRequest(poolId, content, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -1442,7 +1518,7 @@ public virtual async Task ReplacePoolPropertiesAsync(string poolId, Re /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -1450,7 +1526,7 @@ public virtual async Task ReplacePoolPropertiesAsync(string poolId, Re /// The ID of the Pool to update. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1460,8 +1536,8 @@ public virtual async Task ReplacePoolPropertiesAsync(string poolId, Re /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response ReplacePoolProperties(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response ReplacePoolProperties(string poolId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNull(content, nameof(content)); @@ -1470,7 +1546,7 @@ public virtual Response ReplacePoolProperties(string poolId, RequestContent cont scope.Start(); try { - using HttpMessage message = CreateReplacePoolPropertiesRequest(poolId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateReplacePoolPropertiesRequest(poolId, content, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -1482,61 +1558,59 @@ public virtual Response ReplacePoolProperties(string poolId, RequestContent cont /// Removes Compute Nodes from the specified Pool. /// The ID of the Pool to get. - /// The options to use for removing the node. + /// The options to use for removing the node. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The content to send as the request conditions of the request. /// The cancellation token to use. - /// or is null. + /// or is null. /// is an empty string, and was expected to be non-empty. /// /// This operation can only run when the allocation state of the Pool is steady. /// When this operation runs, the allocation state changes from steady to resizing. /// Each request may remove up to 100 nodes. /// - /// - public virtual async Task RemoveNodesAsync(string poolId, BatchNodeRemoveContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + internal virtual async Task RemoveNodesInternalAsync(string poolId, BatchNodeRemoveOptions removeOptions, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNull(removeOptions, nameof(removeOptions)); - using RequestContent content0 = content.ToRequestContent(); + using RequestContent content = removeOptions.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await RemoveNodesAsync(poolId, content0, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + Response response = await RemoveNodesInternalAsync(poolId, content, timeOutInSeconds, ocpDate, requestConditions, context).ConfigureAwait(false); return response; } /// Removes Compute Nodes from the specified Pool. /// The ID of the Pool to get. - /// The options to use for removing the node. + /// The options to use for removing the node. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The content to send as the request conditions of the request. /// The cancellation token to use. - /// or is null. + /// or is null. /// is an empty string, and was expected to be non-empty. /// /// This operation can only run when the allocation state of the Pool is steady. /// When this operation runs, the allocation state changes from steady to resizing. /// Each request may remove up to 100 nodes. /// - /// - public virtual Response RemoveNodes(string poolId, BatchNodeRemoveContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + internal virtual Response RemoveNodesInternal(string poolId, BatchNodeRemoveOptions removeOptions, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNull(removeOptions, nameof(removeOptions)); - using RequestContent content0 = content.ToRequestContent(); + using RequestContent content = removeOptions.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = RemoveNodes(poolId, content0, timeOutInSeconds, ocpdate, requestConditions, context); + Response response = RemoveNodesInternal(poolId, content, timeOutInSeconds, ocpDate, requestConditions, context); return response; } @@ -1550,7 +1624,7 @@ public virtual Response RemoveNodes(string poolId, BatchNodeRemoveContent conten /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -1558,7 +1632,7 @@ public virtual Response RemoveNodes(string poolId, BatchNodeRemoveContent conten /// The ID of the Pool to get. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1569,17 +1643,16 @@ public virtual Response RemoveNodes(string poolId, BatchNodeRemoveContent conten /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task RemoveNodesAsync(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual async Task RemoveNodesInternalAsync(string poolId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.RemoveNodes"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.RemoveNodesInternal"); scope.Start(); try { - using HttpMessage message = CreateRemoveNodesRequest(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateRemoveNodesInternalRequest(poolId, content, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -1599,7 +1672,7 @@ public virtual async Task RemoveNodesAsync(string poolId, RequestConte /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -1607,7 +1680,7 @@ public virtual async Task RemoveNodesAsync(string poolId, RequestConte /// The ID of the Pool to get. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1618,17 +1691,16 @@ public virtual async Task RemoveNodesAsync(string poolId, RequestConte /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response RemoveNodes(string poolId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual Response RemoveNodesInternal(string poolId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.RemoveNodes"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.RemoveNodesInternal"); scope.Start(); try { - using HttpMessage message = CreateRemoveNodesRequest(poolId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateRemoveNodesInternalRequest(poolId, content, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -1651,7 +1723,7 @@ public virtual Response RemoveNodes(string poolId, RequestContent content, int? /// /// The ID of the Job to delete. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1663,16 +1735,15 @@ public virtual Response RemoveNodes(string poolId, RequestContent content, int? /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DeleteJobAsync(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual async Task DeleteJobInternalAsync(string jobId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteJob"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteJobInternal"); scope.Start(); try { - using HttpMessage message = CreateDeleteJobRequest(jobId, timeOutInSeconds, ocpdate, force, requestConditions, context); + using HttpMessage message = CreateDeleteJobInternalRequest(jobId, timeOutInSeconds, ocpDate, force, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -1695,7 +1766,7 @@ public virtual async Task DeleteJobAsync(string jobId, int? timeOutInS /// /// The ID of the Job to delete. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1707,16 +1778,15 @@ public virtual async Task DeleteJobAsync(string jobId, int? timeOutInS /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DeleteJob(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual Response DeleteJobInternal(string jobId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteJob"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteJobInternal"); scope.Start(); try { - using HttpMessage message = CreateDeleteJobRequest(jobId, timeOutInSeconds, ocpdate, force, requestConditions, context); + using HttpMessage message = CreateDeleteJobInternalRequest(jobId, timeOutInSeconds, ocpDate, force, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -1729,7 +1799,7 @@ public virtual Response DeleteJob(string jobId, int? timeOutInSeconds = null, Da /// Gets information about the specified Job. /// The ID of the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1740,20 +1810,20 @@ public virtual Response DeleteJob(string jobId, int? timeOutInSeconds = null, Da /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - /// - public virtual async Task> GetJobAsync(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual async Task> GetJobAsync(string jobId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await GetJobAsync(jobId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context).ConfigureAwait(false); + Response response = await GetJobAsync(jobId, timeOutInSeconds, ocpDate, select, expand, requestConditions, context).ConfigureAwait(false); return Response.FromValue(BatchJob.FromResponse(response), response); } /// Gets information about the specified Job. /// The ID of the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1764,13 +1834,13 @@ public virtual async Task> GetJobAsync(string jobId, int? tim /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - /// - public virtual Response GetJob(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual Response GetJob(string jobId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = GetJob(jobId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + Response response = GetJob(jobId, timeOutInSeconds, ocpDate, select, expand, requestConditions, context); return Response.FromValue(BatchJob.FromResponse(response), response); } @@ -1784,14 +1854,14 @@ public virtual Response GetJob(string jobId, int? timeOutInSeconds = n /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1804,8 +1874,8 @@ public virtual Response GetJob(string jobId, int? timeOutInSeconds = n /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task GetJobAsync(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + /// + public virtual async Task GetJobAsync(string jobId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); @@ -1813,7 +1883,7 @@ public virtual async Task GetJobAsync(string jobId, int? timeOutInSeco scope.Start(); try { - using HttpMessage message = CreateGetJobRequest(jobId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + using HttpMessage message = CreateGetJobRequest(jobId, timeOutInSeconds, ocpDate, select, expand, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -1833,14 +1903,14 @@ public virtual async Task GetJobAsync(string jobId, int? timeOutInSeco /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1853,8 +1923,8 @@ public virtual async Task GetJobAsync(string jobId, int? timeOutInSeco /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response GetJob(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + /// + public virtual Response GetJob(string jobId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); @@ -1862,7 +1932,7 @@ public virtual Response GetJob(string jobId, int? timeOutInSeconds, DateTimeOffs scope.Start(); try { - using HttpMessage message = CreateGetJobRequest(jobId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + using HttpMessage message = CreateGetJobRequest(jobId, timeOutInSeconds, ocpDate, select, expand, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -1885,7 +1955,7 @@ public virtual Response GetJob(string jobId, int? timeOutInSeconds, DateTimeOffs /// The ID of the Job whose properties you want to update. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1896,8 +1966,8 @@ public virtual Response GetJob(string jobId, int? timeOutInSeconds, DateTimeOffs /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task UpdateJobAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task UpdateJobAsync(string jobId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNull(content, nameof(content)); @@ -1906,7 +1976,7 @@ public virtual async Task UpdateJobAsync(string jobId, RequestContent scope.Start(); try { - using HttpMessage message = CreateUpdateJobRequest(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateUpdateJobRequest(jobId, content, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -1929,7 +1999,7 @@ public virtual async Task UpdateJobAsync(string jobId, RequestContent /// The ID of the Job whose properties you want to update. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1940,8 +2010,8 @@ public virtual async Task UpdateJobAsync(string jobId, RequestContent /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response UpdateJob(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response UpdateJob(string jobId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNull(content, nameof(content)); @@ -1950,7 +2020,7 @@ public virtual Response UpdateJob(string jobId, RequestContent content, int? tim scope.Start(); try { - using HttpMessage message = CreateUpdateJobRequest(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateUpdateJobRequest(jobId, content, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -1964,7 +2034,7 @@ public virtual Response UpdateJob(string jobId, RequestContent content, int? tim /// The ID of the Job whose properties you want to update. /// A job with updated properties. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -1978,15 +2048,15 @@ public virtual Response UpdateJob(string jobId, RequestContent content, int? tim /// the Job has constraints associated with it and if constraints is not specified /// with this request, then the Batch service will remove the existing constraints. /// - /// - public virtual async Task ReplaceJobAsync(string jobId, BatchJob job, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual async Task ReplaceJobAsync(string jobId, BatchJob job, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNull(job, nameof(job)); using RequestContent content = job.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await ReplaceJobAsync(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + Response response = await ReplaceJobAsync(jobId, content, timeOutInSeconds, ocpDate, requestConditions, context).ConfigureAwait(false); return response; } @@ -1994,7 +2064,7 @@ public virtual async Task ReplaceJobAsync(string jobId, BatchJob job, /// The ID of the Job whose properties you want to update. /// A job with updated properties. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2008,15 +2078,15 @@ public virtual async Task ReplaceJobAsync(string jobId, BatchJob job, /// the Job has constraints associated with it and if constraints is not specified /// with this request, then the Batch service will remove the existing constraints. /// - /// - public virtual Response ReplaceJob(string jobId, BatchJob job, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual Response ReplaceJob(string jobId, BatchJob job, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNull(job, nameof(job)); using RequestContent content = job.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = ReplaceJob(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + Response response = ReplaceJob(jobId, content, timeOutInSeconds, ocpDate, requestConditions, context); return response; } @@ -2030,7 +2100,7 @@ public virtual Response ReplaceJob(string jobId, BatchJob job, int? timeOutInSec /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -2038,7 +2108,7 @@ public virtual Response ReplaceJob(string jobId, BatchJob job, int? timeOutInSec /// The ID of the Job whose properties you want to update. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2049,8 +2119,8 @@ public virtual Response ReplaceJob(string jobId, BatchJob job, int? timeOutInSec /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task ReplaceJobAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task ReplaceJobAsync(string jobId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNull(content, nameof(content)); @@ -2059,7 +2129,7 @@ public virtual async Task ReplaceJobAsync(string jobId, RequestContent scope.Start(); try { - using HttpMessage message = CreateReplaceJobRequest(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateReplaceJobRequest(jobId, content, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -2079,7 +2149,7 @@ public virtual async Task ReplaceJobAsync(string jobId, RequestContent /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -2087,7 +2157,7 @@ public virtual async Task ReplaceJobAsync(string jobId, RequestContent /// The ID of the Job whose properties you want to update. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2098,8 +2168,8 @@ public virtual async Task ReplaceJobAsync(string jobId, RequestContent /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response ReplaceJob(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response ReplaceJob(string jobId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNull(content, nameof(content)); @@ -2108,7 +2178,7 @@ public virtual Response ReplaceJob(string jobId, RequestContent content, int? ti scope.Start(); try { - using HttpMessage message = CreateReplaceJobRequest(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateReplaceJobRequest(jobId, content, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -2120,16 +2190,16 @@ public virtual Response ReplaceJob(string jobId, RequestContent content, int? ti /// Disables the specified Job, preventing new Tasks from running. /// The ID of the Job to disable. - /// The options to use for disabling the Job. + /// The options to use for disabling the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The content to send as the request conditions of the request. /// The cancellation token to use. - /// or is null. + /// or is null. /// is an empty string, and was expected to be non-empty. /// /// The Batch Service immediately moves the Job to the disabling state. Batch then @@ -2141,30 +2211,29 @@ public virtual Response ReplaceJob(string jobId, RequestContent content, int? ti /// disable a Job that is in any state other than active, disabling, or disabled, /// the request fails with status code 409. /// - /// - public virtual async Task DisableJobAsync(string jobId, BatchJobDisableContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + internal virtual async Task DisableJobInternalAsync(string jobId, BatchJobDisableOptions disableOptions, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNull(disableOptions, nameof(disableOptions)); - using RequestContent content0 = content.ToRequestContent(); + using RequestContent content = disableOptions.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await DisableJobAsync(jobId, content0, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + Response response = await DisableJobInternalAsync(jobId, content, timeOutInSeconds, ocpDate, requestConditions, context).ConfigureAwait(false); return response; } /// Disables the specified Job, preventing new Tasks from running. /// The ID of the Job to disable. - /// The options to use for disabling the Job. + /// The options to use for disabling the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The content to send as the request conditions of the request. /// The cancellation token to use. - /// or is null. + /// or is null. /// is an empty string, and was expected to be non-empty. /// /// The Batch Service immediately moves the Job to the disabling state. Batch then @@ -2176,15 +2245,14 @@ public virtual async Task DisableJobAsync(string jobId, BatchJobDisabl /// disable a Job that is in any state other than active, disabling, or disabled, /// the request fails with status code 409. /// - /// - public virtual Response DisableJob(string jobId, BatchJobDisableContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + internal virtual Response DisableJobInternal(string jobId, BatchJobDisableOptions disableOptions, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNull(disableOptions, nameof(disableOptions)); - using RequestContent content0 = content.ToRequestContent(); + using RequestContent content = disableOptions.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = DisableJob(jobId, content0, timeOutInSeconds, ocpdate, requestConditions, context); + Response response = DisableJobInternal(jobId, content, timeOutInSeconds, ocpDate, requestConditions, context); return response; } @@ -2198,7 +2266,7 @@ public virtual Response DisableJob(string jobId, BatchJobDisableContent content, /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -2206,7 +2274,7 @@ public virtual Response DisableJob(string jobId, BatchJobDisableContent content, /// The ID of the Job to disable. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2217,17 +2285,16 @@ public virtual Response DisableJob(string jobId, BatchJobDisableContent content, /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DisableJobAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual async Task DisableJobInternalAsync(string jobId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DisableJob"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DisableJobInternal"); scope.Start(); try { - using HttpMessage message = CreateDisableJobRequest(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateDisableJobInternalRequest(jobId, content, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -2247,7 +2314,7 @@ public virtual async Task DisableJobAsync(string jobId, RequestContent /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -2255,7 +2322,7 @@ public virtual async Task DisableJobAsync(string jobId, RequestContent /// The ID of the Job to disable. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2266,17 +2333,16 @@ public virtual async Task DisableJobAsync(string jobId, RequestContent /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DisableJob(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual Response DisableJobInternal(string jobId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNull(content, nameof(content)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DisableJob"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DisableJobInternal"); scope.Start(); try { - using HttpMessage message = CreateDisableJobRequest(jobId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateDisableJobInternalRequest(jobId, content, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -2299,7 +2365,7 @@ public virtual Response DisableJob(string jobId, RequestContent content, int? ti /// /// The ID of the Job to enable. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2310,16 +2376,15 @@ public virtual Response DisableJob(string jobId, RequestContent content, int? ti /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task EnableJobAsync(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual async Task EnableJobInternalAsync(string jobId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.EnableJob"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.EnableJobInternal"); scope.Start(); try { - using HttpMessage message = CreateEnableJobRequest(jobId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateEnableJobInternalRequest(jobId, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -2342,7 +2407,7 @@ public virtual async Task EnableJobAsync(string jobId, int? timeOutInS /// /// The ID of the Job to enable. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2353,16 +2418,15 @@ public virtual async Task EnableJobAsync(string jobId, int? timeOutInS /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response EnableJob(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual Response EnableJobInternal(string jobId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.EnableJob"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.EnableJobInternal"); scope.Start(); try { - using HttpMessage message = CreateEnableJobRequest(jobId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateEnableJobInternalRequest(jobId, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -2374,9 +2438,9 @@ public virtual Response EnableJob(string jobId, int? timeOutInSeconds = null, Da /// Terminates the specified Job, marking it as completed. /// The ID of the Job to terminate. - /// The options to use for terminating the Job. + /// The options to use for terminating the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2394,22 +2458,21 @@ public virtual Response EnableJob(string jobId, int? timeOutInSeconds = null, Da /// state, they will remain in the active state. Once a Job is terminated, new /// Tasks cannot be added and any remaining active Tasks will not be scheduled. /// - /// - public virtual async Task TerminateJobAsync(string jobId, BatchJobTerminateContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + internal virtual async Task TerminateJobInternalAsync(string jobId, BatchJobTerminateOptions options = null, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, bool? force = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - using RequestContent content = parameters?.ToRequestContent(); + using RequestContent content = options?.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await TerminateJobAsync(jobId, content, timeOutInSeconds, ocpdate, force, requestConditions, context).ConfigureAwait(false); + Response response = await TerminateJobInternalAsync(jobId, content, timeOutInSeconds, ocpDate, force, requestConditions, context).ConfigureAwait(false); return response; } /// Terminates the specified Job, marking it as completed. /// The ID of the Job to terminate. - /// The options to use for terminating the Job. + /// The options to use for terminating the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2427,14 +2490,13 @@ public virtual async Task TerminateJobAsync(string jobId, BatchJobTerm /// state, they will remain in the active state. Once a Job is terminated, new /// Tasks cannot be added and any remaining active Tasks will not be scheduled. /// - /// - public virtual Response TerminateJob(string jobId, BatchJobTerminateContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + internal virtual Response TerminateJobInternal(string jobId, BatchJobTerminateOptions options = null, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, bool? force = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - using RequestContent content = parameters?.ToRequestContent(); + using RequestContent content = options?.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = TerminateJob(jobId, content, timeOutInSeconds, ocpdate, force, requestConditions, context); + Response response = TerminateJobInternal(jobId, content, timeOutInSeconds, ocpDate, force, requestConditions, context); return response; } @@ -2448,7 +2510,7 @@ public virtual Response TerminateJob(string jobId, BatchJobTerminateContent para /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -2456,7 +2518,7 @@ public virtual Response TerminateJob(string jobId, BatchJobTerminateContent para /// The ID of the Job to terminate. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2468,16 +2530,15 @@ public virtual Response TerminateJob(string jobId, BatchJobTerminateContent para /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task TerminateJobAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual async Task TerminateJobInternalAsync(string jobId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateJob"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateJobInternal"); scope.Start(); try { - using HttpMessage message = CreateTerminateJobRequest(jobId, content, timeOutInSeconds, ocpdate, force, requestConditions, context); + using HttpMessage message = CreateTerminateJobInternalRequest(jobId, content, timeOutInSeconds, ocpDate, force, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -2497,7 +2558,7 @@ public virtual async Task TerminateJobAsync(string jobId, RequestConte /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -2505,7 +2566,7 @@ public virtual async Task TerminateJobAsync(string jobId, RequestConte /// The ID of the Job to terminate. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2517,16 +2578,15 @@ public virtual async Task TerminateJobAsync(string jobId, RequestConte /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response TerminateJob(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual Response TerminateJobInternal(string jobId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateJob"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateJobInternal"); scope.Start(); try { - using HttpMessage message = CreateTerminateJobRequest(jobId, content, timeOutInSeconds, ocpdate, force, requestConditions, context); + using HttpMessage message = CreateTerminateJobInternalRequest(jobId, content, timeOutInSeconds, ocpDate, force, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -2539,7 +2599,7 @@ public virtual Response TerminateJob(string jobId, RequestContent content, int? /// Creates a Job to the specified Account. /// The Job to be created. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2557,21 +2617,21 @@ public virtual Response TerminateJob(string jobId, RequestContent content, int? /// This information may appear in telemetry logs accessible to Microsoft Support /// engineers. /// - /// - public virtual async Task CreateJobAsync(BatchJobCreateContent job, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual async Task CreateJobAsync(BatchJobCreateOptions job, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(job, nameof(job)); using RequestContent content = job.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await CreateJobAsync(content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await CreateJobAsync(content, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); return response; } /// Creates a Job to the specified Account. /// The Job to be created. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2589,14 +2649,14 @@ public virtual async Task CreateJobAsync(BatchJobCreateContent job, in /// This information may appear in telemetry logs accessible to Microsoft Support /// engineers. /// - /// - public virtual Response CreateJob(BatchJobCreateContent job, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual Response CreateJob(BatchJobCreateOptions job, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(job, nameof(job)); using RequestContent content = job.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = CreateJob(content, timeOutInSeconds, ocpdate, context); + Response response = CreateJob(content, timeOutInSeconds, ocpDate, context); return response; } @@ -2610,14 +2670,14 @@ public virtual Response CreateJob(BatchJobCreateContent job, int? timeOutInSecon /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2626,8 +2686,8 @@ public virtual Response CreateJob(BatchJobCreateContent job, int? timeOutInSecon /// is null. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task CreateJobAsync(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task CreateJobAsync(RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNull(content, nameof(content)); @@ -2635,7 +2695,7 @@ public virtual async Task CreateJobAsync(RequestContent content, int? scope.Start(); try { - using HttpMessage message = CreateCreateJobRequest(content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateCreateJobRequest(content, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -2655,14 +2715,14 @@ public virtual async Task CreateJobAsync(RequestContent content, int? /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2671,8 +2731,8 @@ public virtual async Task CreateJobAsync(RequestContent content, int? /// is null. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response CreateJob(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response CreateJob(RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNull(content, nameof(content)); @@ -2680,7 +2740,7 @@ public virtual Response CreateJob(RequestContent content, int? timeOutInSeconds scope.Start(); try { - using HttpMessage message = CreateCreateJobRequest(content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateCreateJobRequest(content, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -2693,7 +2753,7 @@ public virtual Response CreateJob(RequestContent content, int? timeOutInSeconds /// Gets the Task counts for the specified Job. /// The ID of the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2707,20 +2767,20 @@ public virtual Response CreateJob(RequestContent content, int? timeOutInSeconds /// state are counted as running. Note that the numbers returned may not always be /// up to date. If you need exact task counts, use a list query. /// - /// - public virtual async Task> GetJobTaskCountsAsync(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual async Task> GetJobTaskCountsAsync(string jobId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await GetJobTaskCountsAsync(jobId, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await GetJobTaskCountsAsync(jobId, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); return Response.FromValue(BatchTaskCountsResult.FromResponse(response), response); } /// Gets the Task counts for the specified Job. /// The ID of the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2734,13 +2794,13 @@ public virtual async Task> GetJobTaskCountsAsync /// state are counted as running. Note that the numbers returned may not always be /// up to date. If you need exact task counts, use a list query. /// - /// - public virtual Response GetJobTaskCounts(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual Response GetJobTaskCounts(string jobId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = GetJobTaskCounts(jobId, timeOutInSeconds, ocpdate, context); + Response response = GetJobTaskCounts(jobId, timeOutInSeconds, ocpDate, context); return Response.FromValue(BatchTaskCountsResult.FromResponse(response), response); } @@ -2754,14 +2814,14 @@ public virtual Response GetJobTaskCounts(string jobId, in /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2771,8 +2831,8 @@ public virtual Response GetJobTaskCounts(string jobId, in /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task GetJobTaskCountsAsync(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + /// + public virtual async Task GetJobTaskCountsAsync(string jobId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); @@ -2780,7 +2840,7 @@ public virtual async Task GetJobTaskCountsAsync(string jobId, int? tim scope.Start(); try { - using HttpMessage message = CreateGetJobTaskCountsRequest(jobId, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateGetJobTaskCountsRequest(jobId, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -2800,14 +2860,14 @@ public virtual async Task GetJobTaskCountsAsync(string jobId, int? tim /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2817,8 +2877,8 @@ public virtual async Task GetJobTaskCountsAsync(string jobId, int? tim /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response GetJobTaskCounts(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + /// + public virtual Response GetJobTaskCounts(string jobId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); @@ -2826,7 +2886,7 @@ public virtual Response GetJobTaskCounts(string jobId, int? timeOutInSeconds, Da scope.Start(); try { - using HttpMessage message = CreateGetJobTaskCountsRequest(jobId, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateGetJobTaskCountsRequest(jobId, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -2839,42 +2899,42 @@ public virtual Response GetJobTaskCounts(string jobId, int? timeOutInSeconds, Da /// Creates a Certificate to the specified Account. /// The Certificate to be created. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The cancellation token to use. /// is null. - /// - public virtual async Task CreateCertificateAsync(BatchCertificate certificate, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual async Task CreateCertificateAsync(BatchCertificate certificate, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(certificate, nameof(certificate)); using RequestContent content = certificate.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await CreateCertificateAsync(content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await CreateCertificateAsync(content, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); return response; } /// Creates a Certificate to the specified Account. /// The Certificate to be created. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The cancellation token to use. /// is null. - /// - public virtual Response CreateCertificate(BatchCertificate certificate, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual Response CreateCertificate(BatchCertificate certificate, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(certificate, nameof(certificate)); using RequestContent content = certificate.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = CreateCertificate(content, timeOutInSeconds, ocpdate, context); + Response response = CreateCertificate(content, timeOutInSeconds, ocpDate, context); return response; } @@ -2888,14 +2948,14 @@ public virtual Response CreateCertificate(BatchCertificate certificate, int? tim /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2904,8 +2964,8 @@ public virtual Response CreateCertificate(BatchCertificate certificate, int? tim /// is null. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task CreateCertificateAsync(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task CreateCertificateAsync(RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNull(content, nameof(content)); @@ -2913,7 +2973,7 @@ public virtual async Task CreateCertificateAsync(RequestContent conten scope.Start(); try { - using HttpMessage message = CreateCreateCertificateRequest(content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateCreateCertificateRequest(content, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -2933,14 +2993,14 @@ public virtual async Task CreateCertificateAsync(RequestContent conten /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2949,8 +3009,8 @@ public virtual async Task CreateCertificateAsync(RequestContent conten /// is null. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response CreateCertificate(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response CreateCertificate(RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNull(content, nameof(content)); @@ -2958,7 +3018,7 @@ public virtual Response CreateCertificate(RequestContent content, int? timeOutIn scope.Start(); try { - using HttpMessage message = CreateCreateCertificateRequest(content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateCreateCertificateRequest(content, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -2982,7 +3042,7 @@ public virtual Response CreateCertificate(RequestContent content, int? timeOutIn /// The algorithm used to derive the thumbprint parameter. This must be sha1. /// The thumbprint of the Certificate being deleted. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -2992,8 +3052,8 @@ public virtual Response CreateCertificate(RequestContent content, int? timeOutIn /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task CancelCertificateDeletionAsync(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task CancelCertificateDeletionAsync(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); @@ -3002,7 +3062,7 @@ public virtual async Task CancelCertificateDeletionAsync(string thumbp scope.Start(); try { - using HttpMessage message = CreateCancelCertificateDeletionRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateCancelCertificateDeletionRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -3026,7 +3086,7 @@ public virtual async Task CancelCertificateDeletionAsync(string thumbp /// The algorithm used to derive the thumbprint parameter. This must be sha1. /// The thumbprint of the Certificate being deleted. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3036,8 +3096,8 @@ public virtual async Task CancelCertificateDeletionAsync(string thumbp /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response CancelCertificateDeletion(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response CancelCertificateDeletion(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); @@ -3046,7 +3106,7 @@ public virtual Response CancelCertificateDeletion(string thumbprintAlgorithm, st scope.Start(); try { - using HttpMessage message = CreateCancelCertificateDeletionRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateCancelCertificateDeletionRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -3070,7 +3130,7 @@ public virtual Response CancelCertificateDeletion(string thumbprintAlgorithm, st /// The algorithm used to derive the thumbprint parameter. This must be sha1. /// The thumbprint of the Certificate to be deleted. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3080,17 +3140,16 @@ public virtual Response CancelCertificateDeletion(string thumbprintAlgorithm, st /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DeleteCertificateAsync(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + internal virtual async Task DeleteCertificateInternalAsync(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteCertificate"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteCertificateInternal"); scope.Start(); try { - using HttpMessage message = CreateDeleteCertificateRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateDeleteCertificateInternalRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -3114,7 +3173,7 @@ public virtual async Task DeleteCertificateAsync(string thumbprintAlgo /// The algorithm used to derive the thumbprint parameter. This must be sha1. /// The thumbprint of the Certificate to be deleted. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3124,17 +3183,16 @@ public virtual async Task DeleteCertificateAsync(string thumbprintAlgo /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DeleteCertificate(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + internal virtual Response DeleteCertificateInternal(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteCertificate"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteCertificateInternal"); scope.Start(); try { - using HttpMessage message = CreateDeleteCertificateRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateDeleteCertificateInternalRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -3148,7 +3206,7 @@ public virtual Response DeleteCertificate(string thumbprintAlgorithm, string thu /// The algorithm used to derive the thumbprint parameter. This must be sha1. /// The thumbprint of the Certificate to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3157,14 +3215,14 @@ public virtual Response DeleteCertificate(string thumbprintAlgorithm, string thu /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// - public virtual async Task> GetCertificateAsync(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + public virtual async Task> GetCertificateAsync(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await GetCertificateAsync(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpdate, select, context).ConfigureAwait(false); + Response response = await GetCertificateAsync(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, select, context).ConfigureAwait(false); return Response.FromValue(BatchCertificate.FromResponse(response), response); } @@ -3172,7 +3230,7 @@ public virtual async Task> GetCertificateAsync(string /// The algorithm used to derive the thumbprint parameter. This must be sha1. /// The thumbprint of the Certificate to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3181,14 +3239,14 @@ public virtual async Task> GetCertificateAsync(string /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// - public virtual Response GetCertificate(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + public virtual Response GetCertificate(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = GetCertificate(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpdate, select, context); + Response response = GetCertificate(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, select, context); return Response.FromValue(BatchCertificate.FromResponse(response), response); } @@ -3202,7 +3260,7 @@ public virtual Response GetCertificate(string thumbprintAlgori /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -3210,7 +3268,7 @@ public virtual Response GetCertificate(string thumbprintAlgori /// The algorithm used to derive the thumbprint parameter. This must be sha1. /// The thumbprint of the Certificate to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3221,8 +3279,8 @@ public virtual Response GetCertificate(string thumbprintAlgori /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task GetCertificateAsync(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + /// + public virtual async Task GetCertificateAsync(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, RequestContext context) { Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); @@ -3231,7 +3289,7 @@ public virtual async Task GetCertificateAsync(string thumbprintAlgorit scope.Start(); try { - using HttpMessage message = CreateGetCertificateRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpdate, select, context); + using HttpMessage message = CreateGetCertificateRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, select, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -3251,7 +3309,7 @@ public virtual async Task GetCertificateAsync(string thumbprintAlgorit /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -3259,7 +3317,7 @@ public virtual async Task GetCertificateAsync(string thumbprintAlgorit /// The algorithm used to derive the thumbprint parameter. This must be sha1. /// The thumbprint of the Certificate to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3270,8 +3328,8 @@ public virtual async Task GetCertificateAsync(string thumbprintAlgorit /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response GetCertificate(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + /// + public virtual Response GetCertificate(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, RequestContext context) { Argument.AssertNotNullOrEmpty(thumbprintAlgorithm, nameof(thumbprintAlgorithm)); Argument.AssertNotNullOrEmpty(thumbprint, nameof(thumbprint)); @@ -3280,7 +3338,91 @@ public virtual Response GetCertificate(string thumbprintAlgorithm, string thumbp scope.Start(); try { - using HttpMessage message = CreateGetCertificateRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpdate, select, context); + using HttpMessage message = CreateGetCertificateRequest(thumbprintAlgorithm, thumbprint, timeOutInSeconds, ocpDate, select, context); + return _pipeline.ProcessMessage(message, context); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Checks the specified Job Schedule exists. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job Schedule which you want to check. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + internal virtual async Task JobScheduleExistsAsync(string jobScheduleId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.JobScheduleExists"); + scope.Start(); + try + { + using HttpMessage message = CreateJobScheduleExistsRequest(jobScheduleId, timeOutInSeconds, ocpDate, requestConditions, context); + return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); + } + catch (Exception e) + { + scope.Failed(e); + throw; + } + } + + // The convenience method is omitted here because it has exactly the same parameter list as the corresponding protocol method + /// + /// [Protocol Method] Checks the specified Job Schedule exists. + /// + /// + /// + /// This protocol method allows explicit creation of the request and processing of the response for advanced scenarios. + /// + /// + /// + /// + /// The ID of the Job Schedule which you want to check. + /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". + /// + /// The time the request was issued. Client libraries typically set this to the + /// current system clock time; set it explicitly if you are calling the REST API + /// directly. + /// + /// The content to send as the request conditions of the request. + /// The request context, which can override default behaviors of the client pipeline on a per-call basis. + /// is null. + /// is an empty string, and was expected to be non-empty. + /// Service returned a non-success status code. + /// The response returned from the service. + internal virtual Response JobScheduleExists(string jobScheduleId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) + { + Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); + + using var scope = ClientDiagnostics.CreateScope("BatchClient.JobScheduleExists"); + scope.Start(); + try + { + using HttpMessage message = CreateJobScheduleExistsRequest(jobScheduleId, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -3303,7 +3445,7 @@ public virtual Response GetCertificate(string thumbprintAlgorithm, string thumbp /// /// The ID of the Job Schedule to delete. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3315,16 +3457,15 @@ public virtual Response GetCertificate(string thumbprintAlgorithm, string thumbp /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DeleteJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual async Task DeleteJobScheduleInternalAsync(string jobScheduleId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteJobScheduleInternal"); scope.Start(); try { - using HttpMessage message = CreateDeleteJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, force, requestConditions, context); + using HttpMessage message = CreateDeleteJobScheduleInternalRequest(jobScheduleId, timeOutInSeconds, ocpDate, force, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -3347,7 +3488,7 @@ public virtual async Task DeleteJobScheduleAsync(string jobScheduleId, /// /// The ID of the Job Schedule to delete. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3359,16 +3500,15 @@ public virtual async Task DeleteJobScheduleAsync(string jobScheduleId, /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DeleteJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual Response DeleteJobScheduleInternal(string jobScheduleId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeleteJobScheduleInternal"); scope.Start(); try { - using HttpMessage message = CreateDeleteJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, force, requestConditions, context); + using HttpMessage message = CreateDeleteJobScheduleInternalRequest(jobScheduleId, timeOutInSeconds, ocpDate, force, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -3381,7 +3521,7 @@ public virtual Response DeleteJobSchedule(string jobScheduleId, int? timeOutInSe /// Gets information about the specified Job Schedule. /// The ID of the Job Schedule to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3392,20 +3532,20 @@ public virtual Response DeleteJobSchedule(string jobScheduleId, int? timeOutInSe /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - /// - public virtual async Task> GetJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual async Task> GetJobScheduleAsync(string jobScheduleId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await GetJobScheduleAsync(jobScheduleId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context).ConfigureAwait(false); + Response response = await GetJobScheduleAsync(jobScheduleId, timeOutInSeconds, ocpDate, select, expand, requestConditions, context).ConfigureAwait(false); return Response.FromValue(BatchJobSchedule.FromResponse(response), response); } /// Gets information about the specified Job Schedule. /// The ID of the Job Schedule to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3416,13 +3556,13 @@ public virtual async Task> GetJobScheduleAsync(string /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - /// - public virtual Response GetJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual Response GetJobSchedule(string jobScheduleId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = GetJobSchedule(jobScheduleId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + Response response = GetJobSchedule(jobScheduleId, timeOutInSeconds, ocpDate, select, expand, requestConditions, context); return Response.FromValue(BatchJobSchedule.FromResponse(response), response); } @@ -3436,14 +3576,14 @@ public virtual Response GetJobSchedule(string jobScheduleId, i /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Job Schedule to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3456,8 +3596,8 @@ public virtual Response GetJobSchedule(string jobScheduleId, i /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task GetJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + /// + public virtual async Task GetJobScheduleAsync(string jobScheduleId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); @@ -3465,7 +3605,7 @@ public virtual async Task GetJobScheduleAsync(string jobScheduleId, in scope.Start(); try { - using HttpMessage message = CreateGetJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + using HttpMessage message = CreateGetJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpDate, select, expand, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -3485,14 +3625,14 @@ public virtual async Task GetJobScheduleAsync(string jobScheduleId, in /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Job Schedule to get. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3505,8 +3645,8 @@ public virtual async Task GetJobScheduleAsync(string jobScheduleId, in /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response GetJobSchedule(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + /// + public virtual Response GetJobSchedule(string jobScheduleId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); @@ -3514,7 +3654,7 @@ public virtual Response GetJobSchedule(string jobScheduleId, int? timeOutInSecon scope.Start(); try { - using HttpMessage message = CreateGetJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + using HttpMessage message = CreateGetJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpDate, select, expand, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -3537,7 +3677,7 @@ public virtual Response GetJobSchedule(string jobScheduleId, int? timeOutInSecon /// The ID of the Job Schedule to update. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3548,8 +3688,8 @@ public virtual Response GetJobSchedule(string jobScheduleId, int? timeOutInSecon /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task UpdateJobScheduleAsync(string jobScheduleId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task UpdateJobScheduleAsync(string jobScheduleId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); Argument.AssertNotNull(content, nameof(content)); @@ -3558,7 +3698,7 @@ public virtual async Task UpdateJobScheduleAsync(string jobScheduleId, scope.Start(); try { - using HttpMessage message = CreateUpdateJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateUpdateJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -3581,7 +3721,7 @@ public virtual async Task UpdateJobScheduleAsync(string jobScheduleId, /// The ID of the Job Schedule to update. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3592,8 +3732,8 @@ public virtual async Task UpdateJobScheduleAsync(string jobScheduleId, /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response UpdateJobSchedule(string jobScheduleId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response UpdateJobSchedule(string jobScheduleId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); Argument.AssertNotNull(content, nameof(content)); @@ -3602,7 +3742,7 @@ public virtual Response UpdateJobSchedule(string jobScheduleId, RequestContent c scope.Start(); try { - using HttpMessage message = CreateUpdateJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateUpdateJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -3616,7 +3756,7 @@ public virtual Response UpdateJobSchedule(string jobScheduleId, RequestContent c /// The ID of the Job Schedule to update. /// A Job Schedule with updated properties. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3632,15 +3772,15 @@ public virtual Response UpdateJobSchedule(string jobScheduleId, RequestContent c /// impact Jobs created by the schedule after the update has taken place; currently /// running Jobs are unaffected. /// - /// - public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId, BatchJobSchedule jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId, BatchJobSchedule jobSchedule, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); Argument.AssertNotNull(jobSchedule, nameof(jobSchedule)); using RequestContent content = jobSchedule.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await ReplaceJobScheduleAsync(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + Response response = await ReplaceJobScheduleAsync(jobScheduleId, content, timeOutInSeconds, ocpDate, requestConditions, context).ConfigureAwait(false); return response; } @@ -3648,7 +3788,7 @@ public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId /// The ID of the Job Schedule to update. /// A Job Schedule with updated properties. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3664,15 +3804,15 @@ public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId /// impact Jobs created by the schedule after the update has taken place; currently /// running Jobs are unaffected. /// - /// - public virtual Response ReplaceJobSchedule(string jobScheduleId, BatchJobSchedule jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual Response ReplaceJobSchedule(string jobScheduleId, BatchJobSchedule jobSchedule, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); Argument.AssertNotNull(jobSchedule, nameof(jobSchedule)); using RequestContent content = jobSchedule.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = ReplaceJobSchedule(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); + Response response = ReplaceJobSchedule(jobScheduleId, content, timeOutInSeconds, ocpDate, requestConditions, context); return response; } @@ -3686,7 +3826,7 @@ public virtual Response ReplaceJobSchedule(string jobScheduleId, BatchJobSchedul /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -3694,7 +3834,7 @@ public virtual Response ReplaceJobSchedule(string jobScheduleId, BatchJobSchedul /// The ID of the Job Schedule to update. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3705,8 +3845,8 @@ public virtual Response ReplaceJobSchedule(string jobScheduleId, BatchJobSchedul /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); Argument.AssertNotNull(content, nameof(content)); @@ -3715,7 +3855,7 @@ public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId scope.Start(); try { - using HttpMessage message = CreateReplaceJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateReplaceJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -3735,7 +3875,7 @@ public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -3743,7 +3883,7 @@ public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId /// The ID of the Job Schedule to update. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3754,8 +3894,8 @@ public virtual async Task ReplaceJobScheduleAsync(string jobScheduleId /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response ReplaceJobSchedule(string jobScheduleId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response ReplaceJobSchedule(string jobScheduleId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); Argument.AssertNotNull(content, nameof(content)); @@ -3764,7 +3904,7 @@ public virtual Response ReplaceJobSchedule(string jobScheduleId, RequestContent scope.Start(); try { - using HttpMessage message = CreateReplaceJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateReplaceJobScheduleRequest(jobScheduleId, content, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -3787,7 +3927,7 @@ public virtual Response ReplaceJobSchedule(string jobScheduleId, RequestContent /// /// The ID of the Job Schedule to disable. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3798,8 +3938,8 @@ public virtual Response ReplaceJobSchedule(string jobScheduleId, RequestContent /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DisableJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task DisableJobScheduleAsync(string jobScheduleId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); @@ -3807,7 +3947,7 @@ public virtual async Task DisableJobScheduleAsync(string jobScheduleId scope.Start(); try { - using HttpMessage message = CreateDisableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateDisableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -3830,7 +3970,7 @@ public virtual async Task DisableJobScheduleAsync(string jobScheduleId /// /// The ID of the Job Schedule to disable. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3841,8 +3981,8 @@ public virtual async Task DisableJobScheduleAsync(string jobScheduleId /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DisableJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response DisableJobSchedule(string jobScheduleId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); @@ -3850,7 +3990,7 @@ public virtual Response DisableJobSchedule(string jobScheduleId, int? timeOutInS scope.Start(); try { - using HttpMessage message = CreateDisableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateDisableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -3873,7 +4013,7 @@ public virtual Response DisableJobSchedule(string jobScheduleId, int? timeOutInS /// /// The ID of the Job Schedule to enable. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3884,8 +4024,8 @@ public virtual Response DisableJobSchedule(string jobScheduleId, int? timeOutInS /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task EnableJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task EnableJobScheduleAsync(string jobScheduleId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); @@ -3893,7 +4033,7 @@ public virtual async Task EnableJobScheduleAsync(string jobScheduleId, scope.Start(); try { - using HttpMessage message = CreateEnableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateEnableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -3916,7 +4056,7 @@ public virtual async Task EnableJobScheduleAsync(string jobScheduleId, /// /// The ID of the Job Schedule to enable. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3927,8 +4067,8 @@ public virtual async Task EnableJobScheduleAsync(string jobScheduleId, /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response EnableJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response EnableJobSchedule(string jobScheduleId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); @@ -3936,7 +4076,7 @@ public virtual Response EnableJobSchedule(string jobScheduleId, int? timeOutInSe scope.Start(); try { - using HttpMessage message = CreateEnableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateEnableJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -3959,7 +4099,7 @@ public virtual Response EnableJobSchedule(string jobScheduleId, int? timeOutInSe /// /// The ID of the Job Schedule to terminates. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -3971,16 +4111,15 @@ public virtual Response EnableJobSchedule(string jobScheduleId, int? timeOutInSe /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task TerminateJobScheduleAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual async Task TerminateJobScheduleInternalAsync(string jobScheduleId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateJobScheduleInternal"); scope.Start(); try { - using HttpMessage message = CreateTerminateJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, force, requestConditions, context); + using HttpMessage message = CreateTerminateJobScheduleInternalRequest(jobScheduleId, timeOutInSeconds, ocpDate, force, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -4003,7 +4142,7 @@ public virtual async Task TerminateJobScheduleAsync(string jobSchedule /// /// The ID of the Job Schedule to terminates. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4015,16 +4154,15 @@ public virtual async Task TerminateJobScheduleAsync(string jobSchedule /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response TerminateJobSchedule(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual Response TerminateJobScheduleInternal(string jobScheduleId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, bool? force = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateJobSchedule"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.TerminateJobScheduleInternal"); scope.Start(); try { - using HttpMessage message = CreateTerminateJobScheduleRequest(jobScheduleId, timeOutInSeconds, ocpdate, force, requestConditions, context); + using HttpMessage message = CreateTerminateJobScheduleInternalRequest(jobScheduleId, timeOutInSeconds, ocpDate, force, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -4037,42 +4175,42 @@ public virtual Response TerminateJobSchedule(string jobScheduleId, int? timeOutI /// Creates a Job Schedule to the specified Account. /// The Job Schedule to be created. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The cancellation token to use. /// is null. - /// - public virtual async Task CreateJobScheduleAsync(BatchJobScheduleCreateContent jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual async Task CreateJobScheduleAsync(BatchJobScheduleCreateOptions jobSchedule, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(jobSchedule, nameof(jobSchedule)); using RequestContent content = jobSchedule.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await CreateJobScheduleAsync(content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await CreateJobScheduleAsync(content, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); return response; } /// Creates a Job Schedule to the specified Account. /// The Job Schedule to be created. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The cancellation token to use. /// is null. - /// - public virtual Response CreateJobSchedule(BatchJobScheduleCreateContent jobSchedule, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual Response CreateJobSchedule(BatchJobScheduleCreateOptions jobSchedule, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNull(jobSchedule, nameof(jobSchedule)); using RequestContent content = jobSchedule.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = CreateJobSchedule(content, timeOutInSeconds, ocpdate, context); + Response response = CreateJobSchedule(content, timeOutInSeconds, ocpDate, context); return response; } @@ -4086,14 +4224,14 @@ public virtual Response CreateJobSchedule(BatchJobScheduleCreateContent jobSched /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4102,8 +4240,8 @@ public virtual Response CreateJobSchedule(BatchJobScheduleCreateContent jobSched /// is null. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task CreateJobScheduleAsync(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task CreateJobScheduleAsync(RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNull(content, nameof(content)); @@ -4111,7 +4249,7 @@ public virtual async Task CreateJobScheduleAsync(RequestContent conten scope.Start(); try { - using HttpMessage message = CreateCreateJobScheduleRequest(content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateCreateJobScheduleRequest(content, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -4131,14 +4269,14 @@ public virtual async Task CreateJobScheduleAsync(RequestContent conten /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4147,8 +4285,8 @@ public virtual async Task CreateJobScheduleAsync(RequestContent conten /// is null. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response CreateJobSchedule(RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response CreateJobSchedule(RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNull(content, nameof(content)); @@ -4156,7 +4294,7 @@ public virtual Response CreateJobSchedule(RequestContent content, int? timeOutIn scope.Start(); try { - using HttpMessage message = CreateCreateJobScheduleRequest(content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateCreateJobScheduleRequest(content, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -4170,7 +4308,7 @@ public virtual Response CreateJobSchedule(RequestContent content, int? timeOutIn /// The ID of the Job to which the Task is to be created. /// The Task to be created. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4183,15 +4321,15 @@ public virtual Response CreateJobSchedule(RequestContent content, int? timeOutIn /// Task has not completed within 180 days of being added it will be terminated by /// the Batch service and left in whatever state it was in at that time. /// - /// - public virtual async Task CreateTaskAsync(string jobId, BatchTaskCreateContent task, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual async Task CreateTaskAsync(string jobId, BatchTaskCreateOptions task, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNull(task, nameof(task)); using RequestContent content = task.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await CreateTaskAsync(jobId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await CreateTaskAsync(jobId, content, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); return response; } @@ -4199,7 +4337,7 @@ public virtual async Task CreateTaskAsync(string jobId, BatchTaskCreat /// The ID of the Job to which the Task is to be created. /// The Task to be created. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4212,15 +4350,15 @@ public virtual async Task CreateTaskAsync(string jobId, BatchTaskCreat /// Task has not completed within 180 days of being added it will be terminated by /// the Batch service and left in whatever state it was in at that time. /// - /// - public virtual Response CreateTask(string jobId, BatchTaskCreateContent task, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual Response CreateTask(string jobId, BatchTaskCreateOptions task, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNull(task, nameof(task)); using RequestContent content = task.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = CreateTask(jobId, content, timeOutInSeconds, ocpdate, context); + Response response = CreateTask(jobId, content, timeOutInSeconds, ocpDate, context); return response; } @@ -4234,7 +4372,7 @@ public virtual Response CreateTask(string jobId, BatchTaskCreateContent task, in /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -4242,7 +4380,7 @@ public virtual Response CreateTask(string jobId, BatchTaskCreateContent task, in /// The ID of the Job to which the Task is to be created. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4252,8 +4390,8 @@ public virtual Response CreateTask(string jobId, BatchTaskCreateContent task, in /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task CreateTaskAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task CreateTaskAsync(string jobId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNull(content, nameof(content)); @@ -4262,7 +4400,7 @@ public virtual async Task CreateTaskAsync(string jobId, RequestContent scope.Start(); try { - using HttpMessage message = CreateCreateTaskRequest(jobId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateCreateTaskRequest(jobId, content, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -4282,7 +4420,7 @@ public virtual async Task CreateTaskAsync(string jobId, RequestContent /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -4290,7 +4428,7 @@ public virtual async Task CreateTaskAsync(string jobId, RequestContent /// The ID of the Job to which the Task is to be created. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4300,8 +4438,8 @@ public virtual async Task CreateTaskAsync(string jobId, RequestContent /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response CreateTask(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response CreateTask(string jobId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNull(content, nameof(content)); @@ -4310,7 +4448,7 @@ public virtual Response CreateTask(string jobId, RequestContent content, int? ti scope.Start(); try { - using HttpMessage message = CreateCreateTaskRequest(jobId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateCreateTaskRequest(jobId, content, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -4324,7 +4462,7 @@ public virtual Response CreateTask(string jobId, RequestContent content, int? ti /// The ID of the Job to which the Task collection is to be added. /// The Tasks to be added. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4348,23 +4486,23 @@ public virtual Response CreateTask(string jobId, RequestContent content, int? ti /// completed within 180 days of being added it will be terminated by the Batch /// service and left in whatever state it was in at that time. /// - /// - public virtual async Task> CreateTaskCollectionAsync(string jobId, BatchTaskGroup taskCollection, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual async Task> CreateTaskCollectionAsync(string jobId, BatchTaskGroup taskCollection, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNull(taskCollection, nameof(taskCollection)); using RequestContent content = taskCollection.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await CreateTaskCollectionAsync(jobId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); - return Response.FromValue(BatchTaskAddCollectionResult.FromResponse(response), response); + Response response = await CreateTaskCollectionAsync(jobId, content, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); + return Response.FromValue(BatchCreateTaskCollectionResult.FromResponse(response), response); } /// Adds a collection of Tasks to the specified Job. /// The ID of the Job to which the Task collection is to be added. /// The Tasks to be added. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4388,16 +4526,16 @@ public virtual async Task> CreateTaskColl /// completed within 180 days of being added it will be terminated by the Batch /// service and left in whatever state it was in at that time. /// - /// - public virtual Response CreateTaskCollection(string jobId, BatchTaskGroup taskCollection, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual Response CreateTaskCollection(string jobId, BatchTaskGroup taskCollection, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNull(taskCollection, nameof(taskCollection)); using RequestContent content = taskCollection.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = CreateTaskCollection(jobId, content, timeOutInSeconds, ocpdate, context); - return Response.FromValue(BatchTaskAddCollectionResult.FromResponse(response), response); + Response response = CreateTaskCollection(jobId, content, timeOutInSeconds, ocpDate, context); + return Response.FromValue(BatchCreateTaskCollectionResult.FromResponse(response), response); } /// @@ -4410,7 +4548,7 @@ public virtual Response CreateTaskCollection(strin /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -4418,7 +4556,7 @@ public virtual Response CreateTaskCollection(strin /// The ID of the Job to which the Task collection is to be added. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4428,8 +4566,8 @@ public virtual Response CreateTaskCollection(strin /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task CreateTaskCollectionAsync(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task CreateTaskCollectionAsync(string jobId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNull(content, nameof(content)); @@ -4438,7 +4576,7 @@ public virtual async Task CreateTaskCollectionAsync(string jobId, Requ scope.Start(); try { - using HttpMessage message = CreateCreateTaskCollectionRequest(jobId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateCreateTaskCollectionRequest(jobId, content, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -4458,7 +4596,7 @@ public virtual async Task CreateTaskCollectionAsync(string jobId, Requ /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -4466,7 +4604,7 @@ public virtual async Task CreateTaskCollectionAsync(string jobId, Requ /// The ID of the Job to which the Task collection is to be added. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4476,8 +4614,8 @@ public virtual async Task CreateTaskCollectionAsync(string jobId, Requ /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response CreateTaskCollection(string jobId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response CreateTaskCollection(string jobId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNull(content, nameof(content)); @@ -4486,7 +4624,7 @@ public virtual Response CreateTaskCollection(string jobId, RequestContent conten scope.Start(); try { - using HttpMessage message = CreateCreateTaskCollectionRequest(jobId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateCreateTaskCollectionRequest(jobId, content, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -4510,7 +4648,7 @@ public virtual Response CreateTaskCollection(string jobId, RequestContent conten /// The ID of the Job from which to delete the Task. /// The ID of the Task to delete. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4521,8 +4659,8 @@ public virtual Response CreateTaskCollection(string jobId, RequestContent conten /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DeleteTaskAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task DeleteTaskAsync(string jobId, string taskId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -4531,7 +4669,7 @@ public virtual async Task DeleteTaskAsync(string jobId, string taskId, scope.Start(); try { - using HttpMessage message = CreateDeleteTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateDeleteTaskRequest(jobId, taskId, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -4555,7 +4693,7 @@ public virtual async Task DeleteTaskAsync(string jobId, string taskId, /// The ID of the Job from which to delete the Task. /// The ID of the Task to delete. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4566,8 +4704,8 @@ public virtual async Task DeleteTaskAsync(string jobId, string taskId, /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DeleteTask(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response DeleteTask(string jobId, string taskId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -4576,7 +4714,7 @@ public virtual Response DeleteTask(string jobId, string taskId, int? timeOutInSe scope.Start(); try { - using HttpMessage message = CreateDeleteTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateDeleteTaskRequest(jobId, taskId, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -4590,7 +4728,7 @@ public virtual Response DeleteTask(string jobId, string taskId, int? timeOutInSe /// The ID of the Job that contains the Task. /// The ID of the Task to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4606,14 +4744,14 @@ public virtual Response DeleteTask(string jobId, string taskId, int? timeOutInSe /// nodeInfo refer to the primary Task. Use the list subtasks API to retrieve /// information about subtasks. /// - /// - public virtual async Task> GetTaskAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual async Task> GetTaskAsync(string jobId, string taskId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await GetTaskAsync(jobId, taskId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context).ConfigureAwait(false); + Response response = await GetTaskAsync(jobId, taskId, timeOutInSeconds, ocpDate, select, expand, requestConditions, context).ConfigureAwait(false); return Response.FromValue(BatchTask.FromResponse(response), response); } @@ -4621,7 +4759,7 @@ public virtual async Task> GetTaskAsync(string jobId, string /// The ID of the Job that contains the Task. /// The ID of the Task to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4637,14 +4775,14 @@ public virtual async Task> GetTaskAsync(string jobId, string /// nodeInfo refer to the primary Task. Use the list subtasks API to retrieve /// information about subtasks. /// - /// - public virtual Response GetTask(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual Response GetTask(string jobId, string taskId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, IEnumerable expand = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = GetTask(jobId, taskId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + Response response = GetTask(jobId, taskId, timeOutInSeconds, ocpDate, select, expand, requestConditions, context); return Response.FromValue(BatchTask.FromResponse(response), response); } @@ -4658,7 +4796,7 @@ public virtual Response GetTask(string jobId, string taskId, int? tim /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -4666,7 +4804,7 @@ public virtual Response GetTask(string jobId, string taskId, int? tim /// The ID of the Job that contains the Task. /// The ID of the Task to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4679,8 +4817,8 @@ public virtual Response GetTask(string jobId, string taskId, int? tim /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task GetTaskAsync(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + /// + public virtual async Task GetTaskAsync(string jobId, string taskId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -4689,7 +4827,7 @@ public virtual async Task GetTaskAsync(string jobId, string taskId, in scope.Start(); try { - using HttpMessage message = CreateGetTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + using HttpMessage message = CreateGetTaskRequest(jobId, taskId, timeOutInSeconds, ocpDate, select, expand, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -4709,7 +4847,7 @@ public virtual async Task GetTaskAsync(string jobId, string taskId, in /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -4717,7 +4855,7 @@ public virtual async Task GetTaskAsync(string jobId, string taskId, in /// The ID of the Job that contains the Task. /// The ID of the Task to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4730,8 +4868,8 @@ public virtual async Task GetTaskAsync(string jobId, string taskId, in /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response GetTask(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + /// + public virtual Response GetTask(string jobId, string taskId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -4740,7 +4878,7 @@ public virtual Response GetTask(string jobId, string taskId, int? timeOutInSecon scope.Start(); try { - using HttpMessage message = CreateGetTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, select, expand, requestConditions, context); + using HttpMessage message = CreateGetTaskRequest(jobId, taskId, timeOutInSeconds, ocpDate, select, expand, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -4755,7 +4893,7 @@ public virtual Response GetTask(string jobId, string taskId, int? timeOutInSecon /// The ID of the Task to update. /// The Task to update. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4764,8 +4902,8 @@ public virtual Response GetTask(string jobId, string taskId, int? timeOutInSecon /// The cancellation token to use. /// , or is null. /// or is an empty string, and was expected to be non-empty. - /// - public virtual async Task ReplaceTaskAsync(string jobId, string taskId, BatchTask task, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual async Task ReplaceTaskAsync(string jobId, string taskId, BatchTask task, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -4773,7 +4911,7 @@ public virtual async Task ReplaceTaskAsync(string jobId, string taskId using RequestContent content = task.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await ReplaceTaskAsync(jobId, taskId, content, timeOutInSeconds, ocpdate, requestConditions, context).ConfigureAwait(false); + Response response = await ReplaceTaskAsync(jobId, taskId, content, timeOutInSeconds, ocpDate, requestConditions, context).ConfigureAwait(false); return response; } @@ -4782,7 +4920,7 @@ public virtual async Task ReplaceTaskAsync(string jobId, string taskId /// The ID of the Task to update. /// The Task to update. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4791,8 +4929,8 @@ public virtual async Task ReplaceTaskAsync(string jobId, string taskId /// The cancellation token to use. /// , or is null. /// or is an empty string, and was expected to be non-empty. - /// - public virtual Response ReplaceTask(string jobId, string taskId, BatchTask task, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual Response ReplaceTask(string jobId, string taskId, BatchTask task, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -4800,7 +4938,7 @@ public virtual Response ReplaceTask(string jobId, string taskId, BatchTask task, using RequestContent content = task.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = ReplaceTask(jobId, taskId, content, timeOutInSeconds, ocpdate, requestConditions, context); + Response response = ReplaceTask(jobId, taskId, content, timeOutInSeconds, ocpDate, requestConditions, context); return response; } @@ -4814,7 +4952,7 @@ public virtual Response ReplaceTask(string jobId, string taskId, BatchTask task, /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -4823,7 +4961,7 @@ public virtual Response ReplaceTask(string jobId, string taskId, BatchTask task, /// The ID of the Task to update. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4834,8 +4972,8 @@ public virtual Response ReplaceTask(string jobId, string taskId, BatchTask task, /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task ReplaceTaskAsync(string jobId, string taskId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task ReplaceTaskAsync(string jobId, string taskId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -4845,7 +4983,7 @@ public virtual async Task ReplaceTaskAsync(string jobId, string taskId scope.Start(); try { - using HttpMessage message = CreateReplaceTaskRequest(jobId, taskId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateReplaceTaskRequest(jobId, taskId, content, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -4865,7 +5003,7 @@ public virtual async Task ReplaceTaskAsync(string jobId, string taskId /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -4874,7 +5012,7 @@ public virtual async Task ReplaceTaskAsync(string jobId, string taskId /// The ID of the Task to update. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4885,8 +5023,8 @@ public virtual async Task ReplaceTaskAsync(string jobId, string taskId /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response ReplaceTask(string jobId, string taskId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response ReplaceTask(string jobId, string taskId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -4896,7 +5034,7 @@ public virtual Response ReplaceTask(string jobId, string taskId, RequestContent scope.Start(); try { - using HttpMessage message = CreateReplaceTaskRequest(jobId, taskId, content, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateReplaceTaskRequest(jobId, taskId, content, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -4920,7 +5058,7 @@ public virtual Response ReplaceTask(string jobId, string taskId, RequestContent /// The ID of the Job containing the Task. /// The ID of the Task to terminate. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4931,8 +5069,8 @@ public virtual Response ReplaceTask(string jobId, string taskId, RequestContent /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task TerminateTaskAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task TerminateTaskAsync(string jobId, string taskId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -4941,7 +5079,7 @@ public virtual async Task TerminateTaskAsync(string jobId, string task scope.Start(); try { - using HttpMessage message = CreateTerminateTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateTerminateTaskRequest(jobId, taskId, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -4965,7 +5103,7 @@ public virtual async Task TerminateTaskAsync(string jobId, string task /// The ID of the Job containing the Task. /// The ID of the Task to terminate. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -4976,8 +5114,8 @@ public virtual async Task TerminateTaskAsync(string jobId, string task /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response TerminateTask(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response TerminateTask(string jobId, string taskId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -4986,7 +5124,7 @@ public virtual Response TerminateTask(string jobId, string taskId, int? timeOutI scope.Start(); try { - using HttpMessage message = CreateTerminateTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateTerminateTaskRequest(jobId, taskId, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -5011,7 +5149,7 @@ public virtual Response TerminateTask(string jobId, string taskId, int? timeOutI /// The ID of the Job containing the Task. /// The ID of the Task to reactivate. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5022,8 +5160,8 @@ public virtual Response TerminateTask(string jobId, string taskId, int? timeOutI /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task ReactivateTaskAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual async Task ReactivateTaskAsync(string jobId, string taskId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -5032,7 +5170,7 @@ public virtual async Task ReactivateTaskAsync(string jobId, string tas scope.Start(); try { - using HttpMessage message = CreateReactivateTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateReactivateTaskRequest(jobId, taskId, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -5057,7 +5195,7 @@ public virtual async Task ReactivateTaskAsync(string jobId, string tas /// The ID of the Job containing the Task. /// The ID of the Task to reactivate. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5068,8 +5206,8 @@ public virtual async Task ReactivateTaskAsync(string jobId, string tas /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response ReactivateTask(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + /// + public virtual Response ReactivateTask(string jobId, string taskId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -5078,7 +5216,7 @@ public virtual Response ReactivateTask(string jobId, string taskId, int? timeOut scope.Start(); try { - using HttpMessage message = CreateReactivateTaskRequest(jobId, taskId, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateReactivateTaskRequest(jobId, taskId, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -5103,7 +5241,7 @@ public virtual Response ReactivateTask(string jobId, string taskId, int? timeOut /// The ID of the Task whose file you want to retrieve. /// The path to the Task file that you want to get the content of. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5119,8 +5257,8 @@ public virtual Response ReactivateTask(string jobId, string taskId, int? timeOut /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DeleteTaskFileAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? recursive = null, RequestContext context = null) + /// + public virtual async Task DeleteTaskFileAsync(string jobId, string taskId, string filePath, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, bool? recursive = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -5130,7 +5268,7 @@ public virtual async Task DeleteTaskFileAsync(string jobId, string tas scope.Start(); try { - using HttpMessage message = CreateDeleteTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, recursive, context); + using HttpMessage message = CreateDeleteTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpDate, recursive, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -5155,7 +5293,7 @@ public virtual async Task DeleteTaskFileAsync(string jobId, string tas /// The ID of the Task whose file you want to retrieve. /// The path to the Task file that you want to get the content of. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5171,8 +5309,8 @@ public virtual async Task DeleteTaskFileAsync(string jobId, string tas /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DeleteTaskFile(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? recursive = null, RequestContext context = null) + /// + public virtual Response DeleteTaskFile(string jobId, string taskId, string filePath, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, bool? recursive = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -5182,7 +5320,7 @@ public virtual Response DeleteTaskFile(string jobId, string taskId, string fileP scope.Start(); try { - using HttpMessage message = CreateDeleteTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, recursive, context); + using HttpMessage message = CreateDeleteTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpDate, recursive, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -5197,7 +5335,7 @@ public virtual Response DeleteTaskFile(string jobId, string taskId, string fileP /// The ID of the Task whose file you want to retrieve. /// The path to the Task file that you want to get the content of. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5210,15 +5348,15 @@ public virtual Response DeleteTaskFile(string jobId, string taskId, string fileP /// The cancellation token to use. /// , or is null. /// , or is an empty string, and was expected to be non-empty. - /// - public virtual async Task> GetTaskFileAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, string ocpRange = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual async Task> GetTaskFileAsync(string jobId, string taskId, string filePath, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, string ocpRange = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await GetTaskFileAsync(jobId, taskId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context).ConfigureAwait(false); + Response response = await GetTaskFileAsync(jobId, taskId, filePath, timeOutInSeconds, ocpDate, ocpRange, requestConditions, context).ConfigureAwait(false); return Response.FromValue(response.Content, response); } @@ -5227,7 +5365,7 @@ public virtual async Task> GetTaskFileAsync(string jobId, s /// The ID of the Task whose file you want to retrieve. /// The path to the Task file that you want to get the content of. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5240,15 +5378,15 @@ public virtual async Task> GetTaskFileAsync(string jobId, s /// The cancellation token to use. /// , or is null. /// , or is an empty string, and was expected to be non-empty. - /// - public virtual Response GetTaskFile(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, string ocpRange = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual Response GetTaskFile(string jobId, string taskId, string filePath, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, string ocpRange = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = GetTaskFile(jobId, taskId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); + Response response = GetTaskFile(jobId, taskId, filePath, timeOutInSeconds, ocpDate, ocpRange, requestConditions, context); return Response.FromValue(response.Content, response); } @@ -5262,7 +5400,7 @@ public virtual Response GetTaskFile(string jobId, string taskId, str /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -5271,7 +5409,7 @@ public virtual Response GetTaskFile(string jobId, string taskId, str /// The ID of the Task whose file you want to retrieve. /// The path to the Task file that you want to get the content of. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5286,8 +5424,8 @@ public virtual Response GetTaskFile(string jobId, string taskId, str /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task GetTaskFileAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, string ocpRange, RequestConditions requestConditions, RequestContext context) + /// + public virtual async Task GetTaskFileAsync(string jobId, string taskId, string filePath, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, string ocpRange, RequestConditions requestConditions, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -5306,7 +5444,7 @@ public virtual async Task GetTaskFileAsync(string jobId, string taskId scope.Start(); try { - using HttpMessage message = CreateGetTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); + using HttpMessage message = CreateGetTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpDate, ocpRange, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -5326,7 +5464,7 @@ public virtual async Task GetTaskFileAsync(string jobId, string taskId /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -5335,7 +5473,7 @@ public virtual async Task GetTaskFileAsync(string jobId, string taskId /// The ID of the Task whose file you want to retrieve. /// The path to the Task file that you want to get the content of. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5350,8 +5488,8 @@ public virtual async Task GetTaskFileAsync(string jobId, string taskId /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response GetTaskFile(string jobId, string taskId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, string ocpRange, RequestConditions requestConditions, RequestContext context) + /// + public virtual Response GetTaskFile(string jobId, string taskId, string filePath, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, string ocpRange, RequestConditions requestConditions, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -5370,7 +5508,7 @@ public virtual Response GetTaskFile(string jobId, string taskId, string filePath scope.Start(); try { - using HttpMessage message = CreateGetTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); + using HttpMessage message = CreateGetTaskFileRequest(jobId, taskId, filePath, timeOutInSeconds, ocpDate, ocpRange, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -5395,7 +5533,7 @@ public virtual Response GetTaskFile(string jobId, string taskId, string filePath /// The ID of the Task whose file you want to retrieve. /// The path to the Task file that you want to get the content of. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5406,7 +5544,7 @@ public virtual Response GetTaskFile(string jobId, string taskId, string filePath /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - internal virtual async Task GetTaskFilePropertiesInternalAsync(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual async Task GetTaskFilePropertiesInternalAsync(string jobId, string taskId, string filePath, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -5425,7 +5563,7 @@ internal virtual async Task GetTaskFilePropertiesInternalAsync(string scope.Start(); try { - using HttpMessage message = CreateGetTaskFilePropertiesInternalRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateGetTaskFilePropertiesInternalRequest(jobId, taskId, filePath, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -5450,7 +5588,7 @@ internal virtual async Task GetTaskFilePropertiesInternalAsync(string /// The ID of the Task whose file you want to retrieve. /// The path to the Task file that you want to get the content of. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5461,7 +5599,7 @@ internal virtual async Task GetTaskFilePropertiesInternalAsync(string /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - internal virtual Response GetTaskFilePropertiesInternal(string jobId, string taskId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual Response GetTaskFilePropertiesInternal(string jobId, string taskId, string filePath, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); @@ -5480,7 +5618,7 @@ internal virtual Response GetTaskFilePropertiesInternal(string jobId, string tas scope.Start(); try { - using HttpMessage message = CreateGetTaskFilePropertiesInternalRequest(jobId, taskId, filePath, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateGetTaskFilePropertiesInternalRequest(jobId, taskId, filePath, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -5495,7 +5633,7 @@ internal virtual Response GetTaskFilePropertiesInternal(string jobId, string tas /// The ID of the machine on which you want to create a user Account. /// The options to use for creating the user. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5507,8 +5645,8 @@ internal virtual Response GetTaskFilePropertiesInternal(string jobId, string tas /// You can add a user Account to a Compute Node only when it is in the idle or /// running state. /// - /// - public virtual async Task CreateNodeUserAsync(string poolId, string nodeId, BatchNodeUserCreateContent user, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual async Task CreateNodeUserAsync(string poolId, string nodeId, BatchNodeUserCreateOptions user, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -5516,7 +5654,7 @@ public virtual async Task CreateNodeUserAsync(string poolId, string no using RequestContent content = user.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await CreateNodeUserAsync(poolId, nodeId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await CreateNodeUserAsync(poolId, nodeId, content, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); return response; } @@ -5525,7 +5663,7 @@ public virtual async Task CreateNodeUserAsync(string poolId, string no /// The ID of the machine on which you want to create a user Account. /// The options to use for creating the user. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5537,8 +5675,8 @@ public virtual async Task CreateNodeUserAsync(string poolId, string no /// You can add a user Account to a Compute Node only when it is in the idle or /// running state. /// - /// - public virtual Response CreateNodeUser(string poolId, string nodeId, BatchNodeUserCreateContent user, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual Response CreateNodeUser(string poolId, string nodeId, BatchNodeUserCreateOptions user, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -5546,7 +5684,7 @@ public virtual Response CreateNodeUser(string poolId, string nodeId, BatchNodeUs using RequestContent content = user.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = CreateNodeUser(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + Response response = CreateNodeUser(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return response; } @@ -5560,7 +5698,7 @@ public virtual Response CreateNodeUser(string poolId, string nodeId, BatchNodeUs /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -5569,7 +5707,7 @@ public virtual Response CreateNodeUser(string poolId, string nodeId, BatchNodeUs /// The ID of the machine on which you want to create a user Account. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5579,8 +5717,8 @@ public virtual Response CreateNodeUser(string poolId, string nodeId, BatchNodeUs /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task CreateNodeUserAsync(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task CreateNodeUserAsync(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -5590,7 +5728,7 @@ public virtual async Task CreateNodeUserAsync(string poolId, string no scope.Start(); try { - using HttpMessage message = CreateCreateNodeUserRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateCreateNodeUserRequest(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -5610,7 +5748,7 @@ public virtual async Task CreateNodeUserAsync(string poolId, string no /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -5619,7 +5757,7 @@ public virtual async Task CreateNodeUserAsync(string poolId, string no /// The ID of the machine on which you want to create a user Account. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5629,8 +5767,8 @@ public virtual async Task CreateNodeUserAsync(string poolId, string no /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response CreateNodeUser(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response CreateNodeUser(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -5640,7 +5778,7 @@ public virtual Response CreateNodeUser(string poolId, string nodeId, RequestCont scope.Start(); try { - using HttpMessage message = CreateCreateNodeUserRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateCreateNodeUserRequest(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -5665,7 +5803,7 @@ public virtual Response CreateNodeUser(string poolId, string nodeId, RequestCont /// The ID of the machine on which you want to delete a user Account. /// The name of the user Account to delete. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5675,8 +5813,8 @@ public virtual Response CreateNodeUser(string poolId, string nodeId, RequestCont /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DeleteNodeUserAsync(string poolId, string nodeId, string userName, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task DeleteNodeUserAsync(string poolId, string nodeId, string userName, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -5686,7 +5824,7 @@ public virtual async Task DeleteNodeUserAsync(string poolId, string no scope.Start(); try { - using HttpMessage message = CreateDeleteNodeUserRequest(poolId, nodeId, userName, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateDeleteNodeUserRequest(poolId, nodeId, userName, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -5711,7 +5849,7 @@ public virtual async Task DeleteNodeUserAsync(string poolId, string no /// The ID of the machine on which you want to delete a user Account. /// The name of the user Account to delete. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5721,8 +5859,8 @@ public virtual async Task DeleteNodeUserAsync(string poolId, string no /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DeleteNodeUser(string poolId, string nodeId, string userName, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response DeleteNodeUser(string poolId, string nodeId, string userName, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -5732,7 +5870,7 @@ public virtual Response DeleteNodeUser(string poolId, string nodeId, string user scope.Start(); try { - using HttpMessage message = CreateDeleteNodeUserRequest(poolId, nodeId, userName, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateDeleteNodeUserRequest(poolId, nodeId, userName, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -5746,15 +5884,15 @@ public virtual Response DeleteNodeUser(string poolId, string nodeId, string user /// The ID of the Pool that contains the Compute Node. /// The ID of the machine on which you want to update a user Account. /// The name of the user Account to update. - /// The options to use for updating the user. + /// The options to use for updating the user. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The cancellation token to use. - /// , , or is null. + /// , , or is null. /// , or is an empty string, and was expected to be non-empty. /// /// This operation replaces of all the updatable properties of the Account. For @@ -5762,17 +5900,17 @@ public virtual Response DeleteNodeUser(string poolId, string nodeId, string user /// replaced with the default value, not left unmodified. You can update a user /// Account on a Compute Node only when it is in the idle or running state. /// - /// - public virtual async Task ReplaceNodeUserAsync(string poolId, string nodeId, string userName, BatchNodeUserUpdateContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual async Task ReplaceNodeUserAsync(string poolId, string nodeId, string userName, BatchNodeUserUpdateOptions updateOptions, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); Argument.AssertNotNullOrEmpty(userName, nameof(userName)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNull(updateOptions, nameof(updateOptions)); - using RequestContent content0 = content.ToRequestContent(); + using RequestContent content = updateOptions.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await ReplaceNodeUserAsync(poolId, nodeId, userName, content0, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await ReplaceNodeUserAsync(poolId, nodeId, userName, content, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); return response; } @@ -5780,15 +5918,15 @@ public virtual async Task ReplaceNodeUserAsync(string poolId, string n /// The ID of the Pool that contains the Compute Node. /// The ID of the machine on which you want to update a user Account. /// The name of the user Account to update. - /// The options to use for updating the user. + /// The options to use for updating the user. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The cancellation token to use. - /// , , or is null. + /// , , or is null. /// , or is an empty string, and was expected to be non-empty. /// /// This operation replaces of all the updatable properties of the Account. For @@ -5796,17 +5934,17 @@ public virtual async Task ReplaceNodeUserAsync(string poolId, string n /// replaced with the default value, not left unmodified. You can update a user /// Account on a Compute Node only when it is in the idle or running state. /// - /// - public virtual Response ReplaceNodeUser(string poolId, string nodeId, string userName, BatchNodeUserUpdateContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual Response ReplaceNodeUser(string poolId, string nodeId, string userName, BatchNodeUserUpdateOptions updateOptions, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); Argument.AssertNotNullOrEmpty(userName, nameof(userName)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNull(updateOptions, nameof(updateOptions)); - using RequestContent content0 = content.ToRequestContent(); + using RequestContent content = updateOptions.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = ReplaceNodeUser(poolId, nodeId, userName, content0, timeOutInSeconds, ocpdate, context); + Response response = ReplaceNodeUser(poolId, nodeId, userName, content, timeOutInSeconds, ocpDate, context); return response; } @@ -5820,7 +5958,7 @@ public virtual Response ReplaceNodeUser(string poolId, string nodeId, string use /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -5830,7 +5968,7 @@ public virtual Response ReplaceNodeUser(string poolId, string nodeId, string use /// The name of the user Account to update. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5840,8 +5978,8 @@ public virtual Response ReplaceNodeUser(string poolId, string nodeId, string use /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task ReplaceNodeUserAsync(string poolId, string nodeId, string userName, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task ReplaceNodeUserAsync(string poolId, string nodeId, string userName, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -5852,7 +5990,7 @@ public virtual async Task ReplaceNodeUserAsync(string poolId, string n scope.Start(); try { - using HttpMessage message = CreateReplaceNodeUserRequest(poolId, nodeId, userName, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateReplaceNodeUserRequest(poolId, nodeId, userName, content, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -5872,7 +6010,7 @@ public virtual async Task ReplaceNodeUserAsync(string poolId, string n /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -5882,7 +6020,7 @@ public virtual async Task ReplaceNodeUserAsync(string poolId, string n /// The name of the user Account to update. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5892,8 +6030,8 @@ public virtual async Task ReplaceNodeUserAsync(string poolId, string n /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response ReplaceNodeUser(string poolId, string nodeId, string userName, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response ReplaceNodeUser(string poolId, string nodeId, string userName, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -5904,7 +6042,7 @@ public virtual Response ReplaceNodeUser(string poolId, string nodeId, string use scope.Start(); try { - using HttpMessage message = CreateReplaceNodeUserRequest(poolId, nodeId, userName, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateReplaceNodeUserRequest(poolId, nodeId, userName, content, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -5918,7 +6056,7 @@ public virtual Response ReplaceNodeUser(string poolId, string nodeId, string use /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node that you want to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5927,14 +6065,14 @@ public virtual Response ReplaceNodeUser(string poolId, string nodeId, string use /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// - public virtual async Task> GetNodeAsync(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + public virtual async Task> GetNodeAsync(string poolId, string nodeId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await GetNodeAsync(poolId, nodeId, timeOutInSeconds, ocpdate, select, context).ConfigureAwait(false); + Response response = await GetNodeAsync(poolId, nodeId, timeOutInSeconds, ocpDate, select, context).ConfigureAwait(false); return Response.FromValue(BatchNode.FromResponse(response), response); } @@ -5942,7 +6080,7 @@ public virtual async Task> GetNodeAsync(string poolId, strin /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node that you want to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5951,14 +6089,14 @@ public virtual async Task> GetNodeAsync(string poolId, strin /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// - public virtual Response GetNode(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + public virtual Response GetNode(string poolId, string nodeId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = GetNode(poolId, nodeId, timeOutInSeconds, ocpdate, select, context); + Response response = GetNode(poolId, nodeId, timeOutInSeconds, ocpDate, select, context); return Response.FromValue(BatchNode.FromResponse(response), response); } @@ -5972,7 +6110,7 @@ public virtual Response GetNode(string poolId, string nodeId, int? ti /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -5980,7 +6118,7 @@ public virtual Response GetNode(string poolId, string nodeId, int? ti /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node that you want to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -5991,8 +6129,8 @@ public virtual Response GetNode(string poolId, string nodeId, int? ti /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task GetNodeAsync(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + /// + public virtual async Task GetNodeAsync(string poolId, string nodeId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -6001,7 +6139,7 @@ public virtual async Task GetNodeAsync(string poolId, string nodeId, i scope.Start(); try { - using HttpMessage message = CreateGetNodeRequest(poolId, nodeId, timeOutInSeconds, ocpdate, select, context); + using HttpMessage message = CreateGetNodeRequest(poolId, nodeId, timeOutInSeconds, ocpDate, select, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -6021,7 +6159,7 @@ public virtual async Task GetNodeAsync(string poolId, string nodeId, i /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -6029,7 +6167,7 @@ public virtual async Task GetNodeAsync(string poolId, string nodeId, i /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node that you want to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6040,8 +6178,8 @@ public virtual async Task GetNodeAsync(string poolId, string nodeId, i /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response GetNode(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + /// + public virtual Response GetNode(string poolId, string nodeId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -6050,7 +6188,7 @@ public virtual Response GetNode(string poolId, string nodeId, int? timeOutInSeco scope.Start(); try { - using HttpMessage message = CreateGetNodeRequest(poolId, nodeId, timeOutInSeconds, ocpdate, select, context); + using HttpMessage message = CreateGetNodeRequest(poolId, nodeId, timeOutInSeconds, ocpDate, select, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -6063,9 +6201,9 @@ public virtual Response GetNode(string poolId, string nodeId, int? timeOutInSeco /// Restarts the specified Compute Node. /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node that you want to restart. - /// The options to use for rebooting the Compute Node. + /// The options to use for rebooting the Compute Node. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6074,24 +6212,23 @@ public virtual Response GetNode(string poolId, string nodeId, int? timeOutInSeco /// or is null. /// or is an empty string, and was expected to be non-empty. /// You can restart a Compute Node only if it is in an idle or running state. - /// - public virtual async Task RebootNodeAsync(string poolId, string nodeId, BatchNodeRebootContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + internal virtual async Task RebootNodeInternalAsync(string poolId, string nodeId, BatchNodeRebootOptions options = null, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using RequestContent content = parameters?.ToRequestContent(); + using RequestContent content = options?.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await RebootNodeAsync(poolId, nodeId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await RebootNodeInternalAsync(poolId, nodeId, content, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); return response; } /// Restarts the specified Compute Node. /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node that you want to restart. - /// The options to use for rebooting the Compute Node. + /// The options to use for rebooting the Compute Node. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6100,15 +6237,14 @@ public virtual async Task RebootNodeAsync(string poolId, string nodeId /// or is null. /// or is an empty string, and was expected to be non-empty. /// You can restart a Compute Node only if it is in an idle or running state. - /// - public virtual Response RebootNode(string poolId, string nodeId, BatchNodeRebootContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + internal virtual Response RebootNodeInternal(string poolId, string nodeId, BatchNodeRebootOptions options = null, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using RequestContent content = parameters?.ToRequestContent(); + using RequestContent content = options?.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = RebootNode(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + Response response = RebootNodeInternal(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return response; } @@ -6122,7 +6258,7 @@ public virtual Response RebootNode(string poolId, string nodeId, BatchNodeReboot /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -6131,7 +6267,7 @@ public virtual Response RebootNode(string poolId, string nodeId, BatchNodeReboot /// The ID of the Compute Node that you want to restart. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6141,17 +6277,16 @@ public virtual Response RebootNode(string poolId, string nodeId, BatchNodeReboot /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task RebootNodeAsync(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + internal virtual async Task RebootNodeInternalAsync(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.RebootNode"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.RebootNodeInternal"); scope.Start(); try { - using HttpMessage message = CreateRebootNodeRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateRebootNodeInternalRequest(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -6171,7 +6306,7 @@ public virtual async Task RebootNodeAsync(string poolId, string nodeId /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -6180,7 +6315,7 @@ public virtual async Task RebootNodeAsync(string poolId, string nodeId /// The ID of the Compute Node that you want to restart. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6190,17 +6325,16 @@ public virtual async Task RebootNodeAsync(string poolId, string nodeId /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response RebootNode(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + internal virtual Response RebootNodeInternal(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.RebootNode"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.RebootNodeInternal"); scope.Start(); try { - using HttpMessage message = CreateRebootNodeRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateRebootNodeInternalRequest(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -6224,7 +6358,7 @@ public virtual Response RebootNode(string poolId, string nodeId, RequestContent /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node that you want to restart. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6234,17 +6368,16 @@ public virtual Response RebootNode(string poolId, string nodeId, RequestContent /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task StartNodeAsync(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + internal virtual async Task StartNodeInternalAsync(string poolId, string nodeId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.StartNode"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.StartNodeInternal"); scope.Start(); try { - using HttpMessage message = CreateStartNodeRequest(poolId, nodeId, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateStartNodeInternalRequest(poolId, nodeId, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -6268,7 +6401,7 @@ public virtual async Task StartNodeAsync(string poolId, string nodeId, /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node that you want to restart. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6278,17 +6411,16 @@ public virtual async Task StartNodeAsync(string poolId, string nodeId, /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response StartNode(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + internal virtual Response StartNodeInternal(string poolId, string nodeId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.StartNode"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.StartNodeInternal"); scope.Start(); try { - using HttpMessage message = CreateStartNodeRequest(poolId, nodeId, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateStartNodeInternalRequest(poolId, nodeId, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -6301,9 +6433,9 @@ public virtual Response StartNode(string poolId, string nodeId, int? timeOutInSe /// Reinstalls the operating system on the specified Compute Node. /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node that you want to restart. - /// The options to use for reimaging the Compute Node. + /// The options to use for reimaging the Compute Node. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6316,24 +6448,23 @@ public virtual Response StartNode(string poolId, string nodeId, int? timeOutInSe /// idle or running state. This API can be invoked only on Pools created with the /// cloud service configuration property. /// - /// - public virtual async Task ReimageNodeAsync(string poolId, string nodeId, BatchNodeReimageContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + internal virtual async Task ReimageNodeInternalAsync(string poolId, string nodeId, BatchNodeReimageOptions options = null, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using RequestContent content = parameters?.ToRequestContent(); + using RequestContent content = options?.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await ReimageNodeAsync(poolId, nodeId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await ReimageNodeInternalAsync(poolId, nodeId, content, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); return response; } /// Reinstalls the operating system on the specified Compute Node. /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node that you want to restart. - /// The options to use for reimaging the Compute Node. + /// The options to use for reimaging the Compute Node. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6346,15 +6477,14 @@ public virtual async Task ReimageNodeAsync(string poolId, string nodeI /// idle or running state. This API can be invoked only on Pools created with the /// cloud service configuration property. /// - /// - public virtual Response ReimageNode(string poolId, string nodeId, BatchNodeReimageContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + internal virtual Response ReimageNodeInternal(string poolId, string nodeId, BatchNodeReimageOptions options = null, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using RequestContent content = parameters?.ToRequestContent(); + using RequestContent content = options?.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = ReimageNode(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + Response response = ReimageNodeInternal(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return response; } @@ -6368,7 +6498,7 @@ public virtual Response ReimageNode(string poolId, string nodeId, BatchNodeReima /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -6377,7 +6507,7 @@ public virtual Response ReimageNode(string poolId, string nodeId, BatchNodeReima /// The ID of the Compute Node that you want to restart. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6387,17 +6517,16 @@ public virtual Response ReimageNode(string poolId, string nodeId, BatchNodeReima /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task ReimageNodeAsync(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + internal virtual async Task ReimageNodeInternalAsync(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.ReimageNode"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReimageNodeInternal"); scope.Start(); try { - using HttpMessage message = CreateReimageNodeRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateReimageNodeInternalRequest(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -6417,7 +6546,7 @@ public virtual async Task ReimageNodeAsync(string poolId, string nodeI /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -6426,7 +6555,7 @@ public virtual async Task ReimageNodeAsync(string poolId, string nodeI /// The ID of the Compute Node that you want to restart. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6436,17 +6565,16 @@ public virtual async Task ReimageNodeAsync(string poolId, string nodeI /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response ReimageNode(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + internal virtual Response ReimageNodeInternal(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.ReimageNode"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.ReimageNodeInternal"); scope.Start(); try { - using HttpMessage message = CreateReimageNodeRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateReimageNodeInternalRequest(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -6459,9 +6587,9 @@ public virtual Response ReimageNode(string poolId, string nodeId, RequestContent /// Deallocates the specified Compute Node. /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node that you want to restart. - /// The options to use for deallocating the Compute Node. + /// The options to use for deallocating the Compute Node. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6470,24 +6598,23 @@ public virtual Response ReimageNode(string poolId, string nodeId, RequestContent /// or is null. /// or is an empty string, and was expected to be non-empty. /// You can deallocate a Compute Node only if it is in an idle or running state. - /// - public virtual async Task DeallocateNodeAsync(string poolId, string nodeId, BatchNodeDeallocateContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + internal virtual async Task DeallocateNodeInternalAsync(string poolId, string nodeId, BatchNodeDeallocateOptions options = null, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using RequestContent content = parameters?.ToRequestContent(); + using RequestContent content = options?.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await DeallocateNodeAsync(poolId, nodeId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await DeallocateNodeInternalAsync(poolId, nodeId, content, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); return response; } /// Deallocates the specified Compute Node. /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node that you want to restart. - /// The options to use for deallocating the Compute Node. + /// The options to use for deallocating the Compute Node. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6496,15 +6623,14 @@ public virtual async Task DeallocateNodeAsync(string poolId, string no /// or is null. /// or is an empty string, and was expected to be non-empty. /// You can deallocate a Compute Node only if it is in an idle or running state. - /// - public virtual Response DeallocateNode(string poolId, string nodeId, BatchNodeDeallocateContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + internal virtual Response DeallocateNodeInternal(string poolId, string nodeId, BatchNodeDeallocateOptions options = null, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using RequestContent content = parameters?.ToRequestContent(); + using RequestContent content = options?.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = DeallocateNode(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + Response response = DeallocateNodeInternal(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return response; } @@ -6518,7 +6644,7 @@ public virtual Response DeallocateNode(string poolId, string nodeId, BatchNodeDe /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -6527,7 +6653,7 @@ public virtual Response DeallocateNode(string poolId, string nodeId, BatchNodeDe /// The ID of the Compute Node that you want to restart. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6537,17 +6663,16 @@ public virtual Response DeallocateNode(string poolId, string nodeId, BatchNodeDe /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DeallocateNodeAsync(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + internal virtual async Task DeallocateNodeInternalAsync(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeallocateNode"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeallocateNodeInternal"); scope.Start(); try { - using HttpMessage message = CreateDeallocateNodeRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateDeallocateNodeInternalRequest(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -6567,7 +6692,7 @@ public virtual async Task DeallocateNodeAsync(string poolId, string no /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -6576,7 +6701,7 @@ public virtual async Task DeallocateNodeAsync(string poolId, string no /// The ID of the Compute Node that you want to restart. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6586,17 +6711,16 @@ public virtual async Task DeallocateNodeAsync(string poolId, string no /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DeallocateNode(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + internal virtual Response DeallocateNodeInternal(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using var scope = ClientDiagnostics.CreateScope("BatchClient.DeallocateNode"); + using var scope = ClientDiagnostics.CreateScope("BatchClient.DeallocateNodeInternal"); scope.Start(); try { - using HttpMessage message = CreateDeallocateNodeRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateDeallocateNodeInternalRequest(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -6609,9 +6733,9 @@ public virtual Response DeallocateNode(string poolId, string nodeId, RequestCont /// Disables Task scheduling on the specified Compute Node. /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node on which you want to disable Task scheduling. - /// The options to use for disabling scheduling on the Compute Node. + /// The options to use for disabling scheduling on the Compute Node. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6623,24 +6747,24 @@ public virtual Response DeallocateNode(string poolId, string nodeId, RequestCont /// You can disable Task scheduling on a Compute Node only if its current /// scheduling state is enabled. /// - /// - public virtual async Task DisableNodeSchedulingAsync(string poolId, string nodeId, BatchNodeDisableSchedulingContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual async Task DisableNodeSchedulingAsync(string poolId, string nodeId, BatchNodeDisableSchedulingOptions options = null, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using RequestContent content = parameters?.ToRequestContent(); + using RequestContent content = options?.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await DisableNodeSchedulingAsync(poolId, nodeId, content, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await DisableNodeSchedulingAsync(poolId, nodeId, content, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); return response; } /// Disables Task scheduling on the specified Compute Node. /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node on which you want to disable Task scheduling. - /// The options to use for disabling scheduling on the Compute Node. + /// The options to use for disabling scheduling on the Compute Node. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6652,15 +6776,15 @@ public virtual async Task DisableNodeSchedulingAsync(string poolId, st /// You can disable Task scheduling on a Compute Node only if its current /// scheduling state is enabled. /// - /// - public virtual Response DisableNodeScheduling(string poolId, string nodeId, BatchNodeDisableSchedulingContent parameters = null, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual Response DisableNodeScheduling(string poolId, string nodeId, BatchNodeDisableSchedulingOptions options = null, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - using RequestContent content = parameters?.ToRequestContent(); + using RequestContent content = options?.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = DisableNodeScheduling(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + Response response = DisableNodeScheduling(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return response; } @@ -6674,7 +6798,7 @@ public virtual Response DisableNodeScheduling(string poolId, string nodeId, Batc /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -6683,7 +6807,7 @@ public virtual Response DisableNodeScheduling(string poolId, string nodeId, Batc /// The ID of the Compute Node on which you want to disable Task scheduling. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6693,8 +6817,8 @@ public virtual Response DisableNodeScheduling(string poolId, string nodeId, Batc /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DisableNodeSchedulingAsync(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task DisableNodeSchedulingAsync(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -6703,7 +6827,7 @@ public virtual async Task DisableNodeSchedulingAsync(string poolId, st scope.Start(); try { - using HttpMessage message = CreateDisableNodeSchedulingRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateDisableNodeSchedulingRequest(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -6723,7 +6847,7 @@ public virtual async Task DisableNodeSchedulingAsync(string poolId, st /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -6732,7 +6856,7 @@ public virtual async Task DisableNodeSchedulingAsync(string poolId, st /// The ID of the Compute Node on which you want to disable Task scheduling. /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6742,8 +6866,8 @@ public virtual async Task DisableNodeSchedulingAsync(string poolId, st /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DisableNodeScheduling(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response DisableNodeScheduling(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -6752,7 +6876,7 @@ public virtual Response DisableNodeScheduling(string poolId, string nodeId, Requ scope.Start(); try { - using HttpMessage message = CreateDisableNodeSchedulingRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateDisableNodeSchedulingRequest(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -6776,7 +6900,7 @@ public virtual Response DisableNodeScheduling(string poolId, string nodeId, Requ /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node on which you want to enable Task scheduling. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6786,8 +6910,8 @@ public virtual Response DisableNodeScheduling(string poolId, string nodeId, Requ /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task EnableNodeSchedulingAsync(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task EnableNodeSchedulingAsync(string poolId, string nodeId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -6796,7 +6920,7 @@ public virtual async Task EnableNodeSchedulingAsync(string poolId, str scope.Start(); try { - using HttpMessage message = CreateEnableNodeSchedulingRequest(poolId, nodeId, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateEnableNodeSchedulingRequest(poolId, nodeId, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -6820,7 +6944,7 @@ public virtual async Task EnableNodeSchedulingAsync(string poolId, str /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node on which you want to enable Task scheduling. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6830,8 +6954,8 @@ public virtual async Task EnableNodeSchedulingAsync(string poolId, str /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response EnableNodeScheduling(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response EnableNodeScheduling(string poolId, string nodeId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -6840,7 +6964,7 @@ public virtual Response EnableNodeScheduling(string poolId, string nodeId, int? scope.Start(); try { - using HttpMessage message = CreateEnableNodeSchedulingRequest(poolId, nodeId, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateEnableNodeSchedulingRequest(poolId, nodeId, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -6854,7 +6978,7 @@ public virtual Response EnableNodeScheduling(string poolId, string nodeId, int? /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node for which to obtain the remote login settings. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6866,14 +6990,14 @@ public virtual Response EnableNodeScheduling(string poolId, string nodeId, int? /// Before you can remotely login to a Compute Node using the remote login settings, /// you must create a user Account on the Compute Node. /// - /// - public virtual async Task> GetNodeRemoteLoginSettingsAsync(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual async Task> GetNodeRemoteLoginSettingsAsync(string poolId, string nodeId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await GetNodeRemoteLoginSettingsAsync(poolId, nodeId, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await GetNodeRemoteLoginSettingsAsync(poolId, nodeId, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); return Response.FromValue(BatchNodeRemoteLoginSettings.FromResponse(response), response); } @@ -6881,7 +7005,7 @@ public virtual async Task> GetNodeRemoteL /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node for which to obtain the remote login settings. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6893,14 +7017,14 @@ public virtual async Task> GetNodeRemoteL /// Before you can remotely login to a Compute Node using the remote login settings, /// you must create a user Account on the Compute Node. /// - /// - public virtual Response GetNodeRemoteLoginSettings(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual Response GetNodeRemoteLoginSettings(string poolId, string nodeId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = GetNodeRemoteLoginSettings(poolId, nodeId, timeOutInSeconds, ocpdate, context); + Response response = GetNodeRemoteLoginSettings(poolId, nodeId, timeOutInSeconds, ocpDate, context); return Response.FromValue(BatchNodeRemoteLoginSettings.FromResponse(response), response); } @@ -6914,7 +7038,7 @@ public virtual Response GetNodeRemoteLoginSettings /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -6922,7 +7046,7 @@ public virtual Response GetNodeRemoteLoginSettings /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node for which to obtain the remote login settings. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6932,8 +7056,8 @@ public virtual Response GetNodeRemoteLoginSettings /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task GetNodeRemoteLoginSettingsAsync(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + /// + public virtual async Task GetNodeRemoteLoginSettingsAsync(string poolId, string nodeId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -6942,7 +7066,7 @@ public virtual async Task GetNodeRemoteLoginSettingsAsync(string poolI scope.Start(); try { - using HttpMessage message = CreateGetNodeRemoteLoginSettingsRequest(poolId, nodeId, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateGetNodeRemoteLoginSettingsRequest(poolId, nodeId, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -6962,7 +7086,7 @@ public virtual async Task GetNodeRemoteLoginSettingsAsync(string poolI /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -6970,7 +7094,7 @@ public virtual async Task GetNodeRemoteLoginSettingsAsync(string poolI /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node for which to obtain the remote login settings. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -6980,8 +7104,8 @@ public virtual async Task GetNodeRemoteLoginSettingsAsync(string poolI /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response GetNodeRemoteLoginSettings(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + /// + public virtual Response GetNodeRemoteLoginSettings(string poolId, string nodeId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -6990,7 +7114,7 @@ public virtual Response GetNodeRemoteLoginSettings(string poolId, string nodeId, scope.Start(); try { - using HttpMessage message = CreateGetNodeRemoteLoginSettingsRequest(poolId, nodeId, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateGetNodeRemoteLoginSettingsRequest(poolId, nodeId, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -7009,15 +7133,15 @@ public virtual Response GetNodeRemoteLoginSettings(string poolId, string nodeId, /// The ID of the Compute Node for which you want to get the Remote Desktop /// Protocol file. /// - /// The Azure Batch service log files upload options. + /// The Azure Batch service log files upload options. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The cancellation token to use. - /// , or is null. + /// , or is null. /// or is an empty string, and was expected to be non-empty. /// /// This is for gathering Azure Batch service log files in an automated fashion @@ -7025,16 +7149,16 @@ public virtual Response GetNodeRemoteLoginSettings(string poolId, string nodeId, /// Azure support. The Azure Batch service log files should be shared with Azure /// support to aid in debugging issues with the Batch service. /// - /// - public virtual async Task> UploadNodeLogsAsync(string poolId, string nodeId, UploadBatchServiceLogsContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual async Task> UploadNodeLogsAsync(string poolId, string nodeId, UploadBatchServiceLogsOptions uploadOptions, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNull(uploadOptions, nameof(uploadOptions)); - using RequestContent content0 = content.ToRequestContent(); + using RequestContent content = uploadOptions.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await UploadNodeLogsAsync(poolId, nodeId, content0, timeOutInSeconds, ocpdate, context).ConfigureAwait(false); + Response response = await UploadNodeLogsAsync(poolId, nodeId, content, timeOutInSeconds, ocpDate, context).ConfigureAwait(false); return Response.FromValue(UploadBatchServiceLogsResult.FromResponse(response), response); } @@ -7047,15 +7171,15 @@ public virtual async Task> UploadNodeLogs /// The ID of the Compute Node for which you want to get the Remote Desktop /// Protocol file. /// - /// The Azure Batch service log files upload options. + /// The Azure Batch service log files upload options. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. /// /// The cancellation token to use. - /// , or is null. + /// , or is null. /// or is an empty string, and was expected to be non-empty. /// /// This is for gathering Azure Batch service log files in an automated fashion @@ -7063,16 +7187,16 @@ public virtual async Task> UploadNodeLogs /// Azure support. The Azure Batch service log files should be shared with Azure /// support to aid in debugging issues with the Batch service. /// - /// - public virtual Response UploadNodeLogs(string poolId, string nodeId, UploadBatchServiceLogsContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, CancellationToken cancellationToken = default) + /// + public virtual Response UploadNodeLogs(string poolId, string nodeId, UploadBatchServiceLogsOptions uploadOptions, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - Argument.AssertNotNull(content, nameof(content)); + Argument.AssertNotNull(uploadOptions, nameof(uploadOptions)); - using RequestContent content0 = content.ToRequestContent(); + using RequestContent content = uploadOptions.ToRequestContent(); RequestContext context = FromCancellationToken(cancellationToken); - Response response = UploadNodeLogs(poolId, nodeId, content0, timeOutInSeconds, ocpdate, context); + Response response = UploadNodeLogs(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return Response.FromValue(UploadBatchServiceLogsResult.FromResponse(response), response); } @@ -7087,7 +7211,7 @@ public virtual Response UploadNodeLogs(string pool /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -7099,7 +7223,7 @@ public virtual Response UploadNodeLogs(string pool /// /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7109,8 +7233,8 @@ public virtual Response UploadNodeLogs(string pool /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task UploadNodeLogsAsync(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual async Task UploadNodeLogsAsync(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -7120,7 +7244,7 @@ public virtual async Task UploadNodeLogsAsync(string poolId, string no scope.Start(); try { - using HttpMessage message = CreateUploadNodeLogsRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateUploadNodeLogsRequest(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -7141,7 +7265,7 @@ public virtual async Task UploadNodeLogsAsync(string poolId, string no /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -7153,7 +7277,7 @@ public virtual async Task UploadNodeLogsAsync(string poolId, string no /// /// The content to send as the body of the request. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7163,8 +7287,8 @@ public virtual async Task UploadNodeLogsAsync(string poolId, string no /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response UploadNodeLogs(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestContext context = null) + /// + public virtual Response UploadNodeLogs(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -7174,7 +7298,7 @@ public virtual Response UploadNodeLogs(string poolId, string nodeId, RequestCont scope.Start(); try { - using HttpMessage message = CreateUploadNodeLogsRequest(poolId, nodeId, content, timeOutInSeconds, ocpdate, context); + using HttpMessage message = CreateUploadNodeLogsRequest(poolId, nodeId, content, timeOutInSeconds, ocpDate, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -7189,7 +7313,7 @@ public virtual Response UploadNodeLogs(string poolId, string nodeId, RequestCont /// The ID of the Compute Node that contains the extensions. /// The name of the Compute Node Extension that you want to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7198,15 +7322,15 @@ public virtual Response UploadNodeLogs(string poolId, string nodeId, RequestCont /// The cancellation token to use. /// , or is null. /// , or is an empty string, and was expected to be non-empty. - /// - public virtual async Task> GetNodeExtensionAsync(string poolId, string nodeId, string extensionName, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + public virtual async Task> GetNodeExtensionAsync(string poolId, string nodeId, string extensionName, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); Argument.AssertNotNullOrEmpty(extensionName, nameof(extensionName)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await GetNodeExtensionAsync(poolId, nodeId, extensionName, timeOutInSeconds, ocpdate, select, context).ConfigureAwait(false); + Response response = await GetNodeExtensionAsync(poolId, nodeId, extensionName, timeOutInSeconds, ocpDate, select, context).ConfigureAwait(false); return Response.FromValue(BatchNodeVMExtension.FromResponse(response), response); } @@ -7215,7 +7339,7 @@ public virtual async Task> GetNodeExtensionAsync( /// The ID of the Compute Node that contains the extensions. /// The name of the Compute Node Extension that you want to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7224,15 +7348,15 @@ public virtual async Task> GetNodeExtensionAsync( /// The cancellation token to use. /// , or is null. /// , or is an empty string, and was expected to be non-empty. - /// - public virtual Response GetNodeExtension(string poolId, string nodeId, string extensionName, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + public virtual Response GetNodeExtension(string poolId, string nodeId, string extensionName, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); Argument.AssertNotNullOrEmpty(extensionName, nameof(extensionName)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = GetNodeExtension(poolId, nodeId, extensionName, timeOutInSeconds, ocpdate, select, context); + Response response = GetNodeExtension(poolId, nodeId, extensionName, timeOutInSeconds, ocpDate, select, context); return Response.FromValue(BatchNodeVMExtension.FromResponse(response), response); } @@ -7246,7 +7370,7 @@ public virtual Response GetNodeExtension(string poolId, st /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -7255,7 +7379,7 @@ public virtual Response GetNodeExtension(string poolId, st /// The ID of the Compute Node that contains the extensions. /// The name of the Compute Node Extension that you want to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7266,8 +7390,8 @@ public virtual Response GetNodeExtension(string poolId, st /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task GetNodeExtensionAsync(string poolId, string nodeId, string extensionName, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + /// + public virtual async Task GetNodeExtensionAsync(string poolId, string nodeId, string extensionName, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -7277,7 +7401,7 @@ public virtual async Task GetNodeExtensionAsync(string poolId, string scope.Start(); try { - using HttpMessage message = CreateGetNodeExtensionRequest(poolId, nodeId, extensionName, timeOutInSeconds, ocpdate, select, context); + using HttpMessage message = CreateGetNodeExtensionRequest(poolId, nodeId, extensionName, timeOutInSeconds, ocpDate, select, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -7297,7 +7421,7 @@ public virtual async Task GetNodeExtensionAsync(string poolId, string /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -7306,7 +7430,7 @@ public virtual async Task GetNodeExtensionAsync(string poolId, string /// The ID of the Compute Node that contains the extensions. /// The name of the Compute Node Extension that you want to get information about. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7317,8 +7441,8 @@ public virtual async Task GetNodeExtensionAsync(string poolId, string /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response GetNodeExtension(string poolId, string nodeId, string extensionName, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + /// + public virtual Response GetNodeExtension(string poolId, string nodeId, string extensionName, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -7328,7 +7452,7 @@ public virtual Response GetNodeExtension(string poolId, string nodeId, string ex scope.Start(); try { - using HttpMessage message = CreateGetNodeExtensionRequest(poolId, nodeId, extensionName, timeOutInSeconds, ocpdate, select, context); + using HttpMessage message = CreateGetNodeExtensionRequest(poolId, nodeId, extensionName, timeOutInSeconds, ocpDate, select, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -7353,7 +7477,7 @@ public virtual Response GetNodeExtension(string poolId, string nodeId, string ex /// The ID of the Compute Node. /// The path to the file or directory. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7369,8 +7493,8 @@ public virtual Response GetNodeExtension(string poolId, string nodeId, string ex /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task DeleteNodeFileAsync(string poolId, string nodeId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? recursive = null, RequestContext context = null) + /// + public virtual async Task DeleteNodeFileAsync(string poolId, string nodeId, string filePath, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, bool? recursive = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -7380,7 +7504,7 @@ public virtual async Task DeleteNodeFileAsync(string poolId, string no scope.Start(); try { - using HttpMessage message = CreateDeleteNodeFileRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, recursive, context); + using HttpMessage message = CreateDeleteNodeFileRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpDate, recursive, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -7405,7 +7529,7 @@ public virtual async Task DeleteNodeFileAsync(string poolId, string no /// The ID of the Compute Node. /// The path to the file or directory. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7421,8 +7545,8 @@ public virtual async Task DeleteNodeFileAsync(string poolId, string no /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response DeleteNodeFile(string poolId, string nodeId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, bool? recursive = null, RequestContext context = null) + /// + public virtual Response DeleteNodeFile(string poolId, string nodeId, string filePath, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, bool? recursive = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -7432,7 +7556,7 @@ public virtual Response DeleteNodeFile(string poolId, string nodeId, string file scope.Start(); try { - using HttpMessage message = CreateDeleteNodeFileRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, recursive, context); + using HttpMessage message = CreateDeleteNodeFileRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpDate, recursive, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -7447,7 +7571,7 @@ public virtual Response DeleteNodeFile(string poolId, string nodeId, string file /// The ID of the Compute Node. /// The path to the file or directory. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7460,15 +7584,15 @@ public virtual Response DeleteNodeFile(string poolId, string nodeId, string file /// The cancellation token to use. /// , or is null. /// , or is an empty string, and was expected to be non-empty. - /// - public virtual async Task> GetNodeFileAsync(string poolId, string nodeId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, string ocpRange = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual async Task> GetNodeFileAsync(string poolId, string nodeId, string filePath, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, string ocpRange = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = await GetNodeFileAsync(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context).ConfigureAwait(false); + Response response = await GetNodeFileAsync(poolId, nodeId, filePath, timeOutInSeconds, ocpDate, ocpRange, requestConditions, context).ConfigureAwait(false); return Response.FromValue(response.Content, response); } @@ -7477,7 +7601,7 @@ public virtual async Task> GetNodeFileAsync(string poolId, /// The ID of the Compute Node. /// The path to the file or directory. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7490,15 +7614,15 @@ public virtual async Task> GetNodeFileAsync(string poolId, /// The cancellation token to use. /// , or is null. /// , or is an empty string, and was expected to be non-empty. - /// - public virtual Response GetNodeFile(string poolId, string nodeId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, string ocpRange = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) + /// + public virtual Response GetNodeFile(string poolId, string nodeId, string filePath, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, string ocpRange = null, RequestConditions requestConditions = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); Argument.AssertNotNullOrEmpty(filePath, nameof(filePath)); RequestContext context = FromCancellationToken(cancellationToken); - Response response = GetNodeFile(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); + Response response = GetNodeFile(poolId, nodeId, filePath, timeOutInSeconds, ocpDate, ocpRange, requestConditions, context); return Response.FromValue(response.Content, response); } @@ -7512,7 +7636,7 @@ public virtual Response GetNodeFile(string poolId, string nodeId, st /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -7521,7 +7645,7 @@ public virtual Response GetNodeFile(string poolId, string nodeId, st /// The ID of the Compute Node. /// The path to the file or directory. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7536,8 +7660,8 @@ public virtual Response GetNodeFile(string poolId, string nodeId, st /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual async Task GetNodeFileAsync(string poolId, string nodeId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, string ocpRange, RequestConditions requestConditions, RequestContext context) + /// + public virtual async Task GetNodeFileAsync(string poolId, string nodeId, string filePath, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, string ocpRange, RequestConditions requestConditions, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -7556,7 +7680,7 @@ public virtual async Task GetNodeFileAsync(string poolId, string nodeI scope.Start(); try { - using HttpMessage message = CreateGetNodeFileRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); + using HttpMessage message = CreateGetNodeFileRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpDate, ocpRange, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -7576,7 +7700,7 @@ public virtual async Task GetNodeFileAsync(string poolId, string nodeI /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -7585,7 +7709,7 @@ public virtual async Task GetNodeFileAsync(string poolId, string nodeI /// The ID of the Compute Node. /// The path to the file or directory. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7600,8 +7724,8 @@ public virtual async Task GetNodeFileAsync(string poolId, string nodeI /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - /// - public virtual Response GetNodeFile(string poolId, string nodeId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, string ocpRange, RequestConditions requestConditions, RequestContext context) + /// + public virtual Response GetNodeFile(string poolId, string nodeId, string filePath, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, string ocpRange, RequestConditions requestConditions, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -7620,7 +7744,7 @@ public virtual Response GetNodeFile(string poolId, string nodeId, string filePat scope.Start(); try { - using HttpMessage message = CreateGetNodeFileRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, ocpRange, requestConditions, context); + using HttpMessage message = CreateGetNodeFileRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpDate, ocpRange, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -7645,7 +7769,7 @@ public virtual Response GetNodeFile(string poolId, string nodeId, string filePat /// The ID of the Compute Node. /// The path to the file or directory. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7656,7 +7780,7 @@ public virtual Response GetNodeFile(string poolId, string nodeId, string filePat /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - internal virtual async Task GetNodeFilePropertiesInternalAsync(string poolId, string nodeId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual async Task GetNodeFilePropertiesInternalAsync(string poolId, string nodeId, string filePath, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -7675,7 +7799,7 @@ internal virtual async Task GetNodeFilePropertiesInternalAsync(string scope.Start(); try { - using HttpMessage message = CreateGetNodeFilePropertiesInternalRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateGetNodeFilePropertiesInternalRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpDate, requestConditions, context); return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false); } catch (Exception e) @@ -7700,7 +7824,7 @@ internal virtual async Task GetNodeFilePropertiesInternalAsync(string /// The ID of the Compute Node. /// The path to the file or directory. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7711,7 +7835,7 @@ internal virtual async Task GetNodeFilePropertiesInternalAsync(string /// , or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The response returned from the service. - internal virtual Response GetNodeFilePropertiesInternal(string poolId, string nodeId, string filePath, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, RequestConditions requestConditions = null, RequestContext context = null) + internal virtual Response GetNodeFilePropertiesInternal(string poolId, string nodeId, string filePath, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, RequestConditions requestConditions = null, RequestContext context = null) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); @@ -7730,7 +7854,7 @@ internal virtual Response GetNodeFilePropertiesInternal(string poolId, string no scope.Start(); try { - using HttpMessage message = CreateGetNodeFilePropertiesInternalRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpdate, requestConditions, context); + using HttpMessage message = CreateGetNodeFilePropertiesInternalRequest(poolId, nodeId, filePath, timeOutInSeconds, ocpDate, requestConditions, context); return _pipeline.ProcessMessage(message, context); } catch (Exception e) @@ -7742,7 +7866,7 @@ internal virtual Response GetNodeFilePropertiesInternal(string poolId, string no /// Lists all of the applications available in the specified Account. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7759,18 +7883,18 @@ internal virtual Response GetNodeFilePropertiesInternal(string poolId, string no /// available to Compute Nodes, use the Azure portal or the Azure Resource Manager /// API. /// - /// - public virtual AsyncPageable GetApplicationsAsync(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, CancellationToken cancellationToken = default) + /// + public virtual AsyncPageable GetApplicationsAsync(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, CancellationToken cancellationToken = default) { RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetApplicationsRequest(timeOutInSeconds, ocpdate, maxresults, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetApplicationsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetApplicationsRequest(timeOutInSeconds, ocpDate, maxresults, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetApplicationsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchApplication.DeserializeBatchApplication(e), ClientDiagnostics, _pipeline, "BatchClient.GetApplications", "value", "odata.nextLink", context); } /// Lists all of the applications available in the specified Account. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7787,12 +7911,12 @@ public virtual AsyncPageable GetApplicationsAsync(int? timeOut /// available to Compute Nodes, use the Azure portal or the Azure Resource Manager /// API. /// - /// - public virtual Pageable GetApplications(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, CancellationToken cancellationToken = default) + /// + public virtual Pageable GetApplications(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, CancellationToken cancellationToken = default) { RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetApplicationsRequest(timeOutInSeconds, ocpdate, maxresults, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetApplicationsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetApplicationsRequest(timeOutInSeconds, ocpDate, maxresults, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetApplicationsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchApplication.DeserializeBatchApplication(e), ClientDiagnostics, _pipeline, "BatchClient.GetApplications", "value", "odata.nextLink", context); } @@ -7806,13 +7930,13 @@ public virtual Pageable GetApplications(int? timeOutInSeconds /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7824,11 +7948,11 @@ public virtual Pageable GetApplications(int? timeOutInSeconds /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetApplicationsAsync(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, RequestContext context) + /// + public virtual AsyncPageable GetApplicationsAsync(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, RequestContext context) { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetApplicationsRequest(timeOutInSeconds, ocpdate, maxresults, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetApplicationsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetApplicationsRequest(timeOutInSeconds, ocpDate, maxresults, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetApplicationsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetApplications", "value", "odata.nextLink", context); } @@ -7842,13 +7966,13 @@ public virtual AsyncPageable GetApplicationsAsync(int? timeOutInSeco /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7860,11 +7984,11 @@ public virtual AsyncPageable GetApplicationsAsync(int? timeOutInSeco /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetApplications(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, RequestContext context) + /// + public virtual Pageable GetApplications(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, RequestContext context) { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetApplicationsRequest(timeOutInSeconds, ocpdate, maxresults, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetApplicationsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetApplicationsRequest(timeOutInSeconds, ocpDate, maxresults, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetApplicationsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetApplications", "value", "odata.nextLink", context); } @@ -7873,7 +7997,7 @@ public virtual Pageable GetApplications(int? timeOutInSeconds, DateT /// for the specified Account. /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7905,12 +8029,12 @@ public virtual Pageable GetApplications(int? timeOutInSeconds, DateT /// times of the last aggregation interval currently available; that is, only the /// last aggregation interval is returned. /// - /// - public virtual AsyncPageable GetPoolUsageMetricsAsync(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, DateTimeOffset? starttime = null, DateTimeOffset? endtime = null, string filter = null, CancellationToken cancellationToken = default) + /// + public virtual AsyncPageable GetPoolUsageMetricsAsync(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, DateTimeOffset? starttime = null, DateTimeOffset? endtime = null, string filter = null, CancellationToken cancellationToken = default) { RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolUsageMetricsRequest(timeOutInSeconds, ocpdate, maxresults, starttime, endtime, filter, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolUsageMetricsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, starttime, endtime, filter, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolUsageMetricsRequest(timeOutInSeconds, ocpDate, maxresults, starttime, endtime, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolUsageMetricsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, starttime, endtime, filter, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchPoolUsageMetrics.DeserializeBatchPoolUsageMetrics(e), ClientDiagnostics, _pipeline, "BatchClient.GetPoolUsageMetrics", "value", "odata.nextLink", context); } @@ -7919,7 +8043,7 @@ public virtual AsyncPageable GetPoolUsageMetricsAsync(int /// for the specified Account. /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -7951,12 +8075,12 @@ public virtual AsyncPageable GetPoolUsageMetricsAsync(int /// times of the last aggregation interval currently available; that is, only the /// last aggregation interval is returned. /// - /// - public virtual Pageable GetPoolUsageMetrics(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, DateTimeOffset? starttime = null, DateTimeOffset? endtime = null, string filter = null, CancellationToken cancellationToken = default) + /// + public virtual Pageable GetPoolUsageMetrics(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, DateTimeOffset? starttime = null, DateTimeOffset? endtime = null, string filter = null, CancellationToken cancellationToken = default) { RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolUsageMetricsRequest(timeOutInSeconds, ocpdate, maxresults, starttime, endtime, filter, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolUsageMetricsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, starttime, endtime, filter, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolUsageMetricsRequest(timeOutInSeconds, ocpDate, maxresults, starttime, endtime, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolUsageMetricsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, starttime, endtime, filter, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchPoolUsageMetrics.DeserializeBatchPoolUsageMetrics(e), ClientDiagnostics, _pipeline, "BatchClient.GetPoolUsageMetrics", "value", "odata.nextLink", context); } @@ -7971,13 +8095,13 @@ public virtual Pageable GetPoolUsageMetrics(int? timeOutI /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8003,11 +8127,11 @@ public virtual Pageable GetPoolUsageMetrics(int? timeOutI /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetPoolUsageMetricsAsync(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, DateTimeOffset? starttime, DateTimeOffset? endtime, string filter, RequestContext context) + /// + public virtual AsyncPageable GetPoolUsageMetricsAsync(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, DateTimeOffset? starttime, DateTimeOffset? endtime, string filter, RequestContext context) { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolUsageMetricsRequest(timeOutInSeconds, ocpdate, maxresults, starttime, endtime, filter, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolUsageMetricsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, starttime, endtime, filter, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolUsageMetricsRequest(timeOutInSeconds, ocpDate, maxresults, starttime, endtime, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolUsageMetricsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, starttime, endtime, filter, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetPoolUsageMetrics", "value", "odata.nextLink", context); } @@ -8022,13 +8146,13 @@ public virtual AsyncPageable GetPoolUsageMetricsAsync(int? timeOutIn /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8054,17 +8178,17 @@ public virtual AsyncPageable GetPoolUsageMetricsAsync(int? timeOutIn /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetPoolUsageMetrics(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, DateTimeOffset? starttime, DateTimeOffset? endtime, string filter, RequestContext context) + /// + public virtual Pageable GetPoolUsageMetrics(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, DateTimeOffset? starttime, DateTimeOffset? endtime, string filter, RequestContext context) { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolUsageMetricsRequest(timeOutInSeconds, ocpdate, maxresults, starttime, endtime, filter, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolUsageMetricsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, starttime, endtime, filter, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolUsageMetricsRequest(timeOutInSeconds, ocpDate, maxresults, starttime, endtime, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolUsageMetricsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, starttime, endtime, filter, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetPoolUsageMetrics", "value", "odata.nextLink", context); } /// Lists all of the Pools which be mounted. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8080,18 +8204,18 @@ public virtual Pageable GetPoolUsageMetrics(int? timeOutInSeconds, D /// An OData $select clause. /// An OData $expand clause. /// The cancellation token to use. - /// - public virtual AsyncPageable GetPoolsAsync(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + /// + public virtual AsyncPageable GetPoolsAsync(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) { RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolsRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolsRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchPool.DeserializeBatchPool(e), ClientDiagnostics, _pipeline, "BatchClient.GetPools", "value", "odata.nextLink", context); } /// Lists all of the Pools which be mounted. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8107,12 +8231,12 @@ public virtual AsyncPageable GetPoolsAsync(int? timeOutInSeconds = nu /// An OData $select clause. /// An OData $expand clause. /// The cancellation token to use. - /// - public virtual Pageable GetPools(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + /// + public virtual Pageable GetPools(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) { RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolsRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolsRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchPool.DeserializeBatchPool(e), ClientDiagnostics, _pipeline, "BatchClient.GetPools", "value", "odata.nextLink", context); } @@ -8126,13 +8250,13 @@ public virtual Pageable GetPools(int? timeOutInSeconds = null, DateTi /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8150,11 +8274,11 @@ public virtual Pageable GetPools(int? timeOutInSeconds = null, DateTi /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetPoolsAsync(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + /// + public virtual AsyncPageable GetPoolsAsync(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolsRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolsRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetPools", "value", "odata.nextLink", context); } @@ -8168,13 +8292,13 @@ public virtual AsyncPageable GetPoolsAsync(int? timeOutInSeconds, Da /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8192,17 +8316,17 @@ public virtual AsyncPageable GetPoolsAsync(int? timeOutInSeconds, Da /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetPools(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + /// + public virtual Pageable GetPools(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolsRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolsRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetPools", "value", "odata.nextLink", context); } /// Lists all Virtual Machine Images supported by the Azure Batch service. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8216,18 +8340,18 @@ public virtual Pageable GetPools(int? timeOutInSeconds, DateTimeOffs /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. /// /// The cancellation token to use. - /// - public virtual AsyncPageable GetSupportedImagesAsync(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, CancellationToken cancellationToken = default) + /// + public virtual AsyncPageable GetSupportedImagesAsync(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, CancellationToken cancellationToken = default) { RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSupportedImagesRequest(timeOutInSeconds, ocpdate, maxresults, filter, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSupportedImagesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSupportedImagesRequest(timeOutInSeconds, ocpDate, maxresults, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSupportedImagesNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchSupportedImage.DeserializeBatchSupportedImage(e), ClientDiagnostics, _pipeline, "BatchClient.GetSupportedImages", "value", "odata.nextLink", context); } /// Lists all Virtual Machine Images supported by the Azure Batch service. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8241,12 +8365,12 @@ public virtual AsyncPageable GetSupportedImagesAsync(int? t /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. /// /// The cancellation token to use. - /// - public virtual Pageable GetSupportedImages(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, CancellationToken cancellationToken = default) + /// + public virtual Pageable GetSupportedImages(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, CancellationToken cancellationToken = default) { RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSupportedImagesRequest(timeOutInSeconds, ocpdate, maxresults, filter, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSupportedImagesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSupportedImagesRequest(timeOutInSeconds, ocpDate, maxresults, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSupportedImagesNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchSupportedImage.DeserializeBatchSupportedImage(e), ClientDiagnostics, _pipeline, "BatchClient.GetSupportedImages", "value", "odata.nextLink", context); } @@ -8260,13 +8384,13 @@ public virtual Pageable GetSupportedImages(int? timeOutInSe /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8282,11 +8406,11 @@ public virtual Pageable GetSupportedImages(int? timeOutInSe /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetSupportedImagesAsync(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, RequestContext context) + /// + public virtual AsyncPageable GetSupportedImagesAsync(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, RequestContext context) { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSupportedImagesRequest(timeOutInSeconds, ocpdate, maxresults, filter, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSupportedImagesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSupportedImagesRequest(timeOutInSeconds, ocpDate, maxresults, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSupportedImagesNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetSupportedImages", "value", "odata.nextLink", context); } @@ -8300,13 +8424,13 @@ public virtual AsyncPageable GetSupportedImagesAsync(int? timeOutInS /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8322,11 +8446,11 @@ public virtual AsyncPageable GetSupportedImagesAsync(int? timeOutInS /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetSupportedImages(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, RequestContext context) + /// + public virtual Pageable GetSupportedImages(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, RequestContext context) { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSupportedImagesRequest(timeOutInSeconds, ocpdate, maxresults, filter, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSupportedImagesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSupportedImagesRequest(timeOutInSeconds, ocpDate, maxresults, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSupportedImagesNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetSupportedImages", "value", "odata.nextLink", context); } @@ -8336,7 +8460,7 @@ public virtual Pageable GetSupportedImages(int? timeOutInSeconds, Da /// use a list query. /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8350,12 +8474,12 @@ public virtual Pageable GetSupportedImages(int? timeOutInSeconds, Da /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. /// /// The cancellation token to use. - /// - public virtual AsyncPageable GetPoolNodeCountsAsync(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, CancellationToken cancellationToken = default) + /// + public virtual AsyncPageable GetPoolNodeCountsAsync(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, CancellationToken cancellationToken = default) { RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolNodeCountsRequest(timeOutInSeconds, ocpdate, maxresults, filter, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolNodeCountsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolNodeCountsRequest(timeOutInSeconds, ocpDate, maxresults, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolNodeCountsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchPoolNodeCounts.DeserializeBatchPoolNodeCounts(e), ClientDiagnostics, _pipeline, "BatchClient.GetPoolNodeCounts", "value", "odata.nextLink", context); } @@ -8365,7 +8489,7 @@ public virtual AsyncPageable GetPoolNodeCountsAsync(int? ti /// use a list query. /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8379,12 +8503,12 @@ public virtual AsyncPageable GetPoolNodeCountsAsync(int? ti /// https://learn.microsoft.com/rest/api/batchservice/odata-filters-in-batch#list-support-images. /// /// The cancellation token to use. - /// - public virtual Pageable GetPoolNodeCounts(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, CancellationToken cancellationToken = default) + /// + public virtual Pageable GetPoolNodeCounts(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, CancellationToken cancellationToken = default) { RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolNodeCountsRequest(timeOutInSeconds, ocpdate, maxresults, filter, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolNodeCountsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolNodeCountsRequest(timeOutInSeconds, ocpDate, maxresults, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolNodeCountsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchPoolNodeCounts.DeserializeBatchPoolNodeCounts(e), ClientDiagnostics, _pipeline, "BatchClient.GetPoolNodeCounts", "value", "odata.nextLink", context); } @@ -8400,13 +8524,13 @@ public virtual Pageable GetPoolNodeCounts(int? timeOutInSec /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8422,11 +8546,11 @@ public virtual Pageable GetPoolNodeCounts(int? timeOutInSec /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetPoolNodeCountsAsync(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, RequestContext context) + /// + public virtual AsyncPageable GetPoolNodeCountsAsync(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, RequestContext context) { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolNodeCountsRequest(timeOutInSeconds, ocpdate, maxresults, filter, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolNodeCountsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolNodeCountsRequest(timeOutInSeconds, ocpDate, maxresults, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolNodeCountsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetPoolNodeCounts", "value", "odata.nextLink", context); } @@ -8442,13 +8566,13 @@ public virtual AsyncPageable GetPoolNodeCountsAsync(int? timeOutInSe /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8464,17 +8588,17 @@ public virtual AsyncPageable GetPoolNodeCountsAsync(int? timeOutInSe /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetPoolNodeCounts(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, RequestContext context) + /// + public virtual Pageable GetPoolNodeCounts(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, RequestContext context) { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolNodeCountsRequest(timeOutInSeconds, ocpdate, maxresults, filter, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolNodeCountsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetPoolNodeCountsRequest(timeOutInSeconds, ocpDate, maxresults, filter, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetPoolNodeCountsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetPoolNodeCounts", "value", "odata.nextLink", context); } /// Lists all of the Jobs in the specified Account. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8490,18 +8614,18 @@ public virtual Pageable GetPoolNodeCounts(int? timeOutInSeconds, Dat /// An OData $select clause. /// An OData $expand clause. /// The cancellation token to use. - /// - public virtual AsyncPageable GetJobsAsync(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + /// + public virtual AsyncPageable GetJobsAsync(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) { RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchJob.DeserializeBatchJob(e), ClientDiagnostics, _pipeline, "BatchClient.GetJobs", "value", "odata.nextLink", context); } /// Lists all of the Jobs in the specified Account. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8517,12 +8641,12 @@ public virtual AsyncPageable GetJobsAsync(int? timeOutInSeconds = null /// An OData $select clause. /// An OData $expand clause. /// The cancellation token to use. - /// - public virtual Pageable GetJobs(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + /// + public virtual Pageable GetJobs(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) { RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchJob.DeserializeBatchJob(e), ClientDiagnostics, _pipeline, "BatchClient.GetJobs", "value", "odata.nextLink", context); } @@ -8536,13 +8660,13 @@ public virtual Pageable GetJobs(int? timeOutInSeconds = null, DateTime /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8560,11 +8684,11 @@ public virtual Pageable GetJobs(int? timeOutInSeconds = null, DateTime /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetJobsAsync(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + /// + public virtual AsyncPageable GetJobsAsync(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobs", "value", "odata.nextLink", context); } @@ -8578,13 +8702,13 @@ public virtual AsyncPageable GetJobsAsync(int? timeOutInSeconds, Dat /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8602,18 +8726,18 @@ public virtual AsyncPageable GetJobsAsync(int? timeOutInSeconds, Dat /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetJobs(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + /// + public virtual Pageable GetJobs(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobs", "value", "odata.nextLink", context); } /// Lists the Jobs that have been created under the specified Job Schedule. /// The ID of the Job Schedule from which you want to get a list of Jobs. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8631,21 +8755,21 @@ public virtual Pageable GetJobs(int? timeOutInSeconds, DateTimeOffse /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - /// - public virtual AsyncPageable GetJobsFromSchedulesAsync(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + /// + public virtual AsyncPageable GetJobsFromSchedulesAsync(string jobScheduleId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsFromSchedulesRequest(jobScheduleId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsFromSchedulesNextPageRequest(nextLink, jobScheduleId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsFromSchedulesRequest(jobScheduleId, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsFromSchedulesNextPageRequest(nextLink, jobScheduleId, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchJob.DeserializeBatchJob(e), ClientDiagnostics, _pipeline, "BatchClient.GetJobsFromSchedules", "value", "odata.nextLink", context); } /// Lists the Jobs that have been created under the specified Job Schedule. /// The ID of the Job Schedule from which you want to get a list of Jobs. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8663,14 +8787,14 @@ public virtual AsyncPageable GetJobsFromSchedulesAsync(string jobSched /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - /// - public virtual Pageable GetJobsFromSchedules(string jobScheduleId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + /// + public virtual Pageable GetJobsFromSchedules(string jobScheduleId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsFromSchedulesRequest(jobScheduleId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsFromSchedulesNextPageRequest(nextLink, jobScheduleId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsFromSchedulesRequest(jobScheduleId, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsFromSchedulesNextPageRequest(nextLink, jobScheduleId, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchJob.DeserializeBatchJob(e), ClientDiagnostics, _pipeline, "BatchClient.GetJobsFromSchedules", "value", "odata.nextLink", context); } @@ -8684,14 +8808,14 @@ public virtual Pageable GetJobsFromSchedules(string jobScheduleId, int /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Job Schedule from which you want to get a list of Jobs. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8711,13 +8835,13 @@ public virtual Pageable GetJobsFromSchedules(string jobScheduleId, int /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetJobsFromSchedulesAsync(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + /// + public virtual AsyncPageable GetJobsFromSchedulesAsync(string jobScheduleId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsFromSchedulesRequest(jobScheduleId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsFromSchedulesNextPageRequest(nextLink, jobScheduleId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsFromSchedulesRequest(jobScheduleId, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsFromSchedulesNextPageRequest(nextLink, jobScheduleId, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobsFromSchedules", "value", "odata.nextLink", context); } @@ -8731,14 +8855,14 @@ public virtual AsyncPageable GetJobsFromSchedulesAsync(string jobSch /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Job Schedule from which you want to get a list of Jobs. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8758,13 +8882,13 @@ public virtual AsyncPageable GetJobsFromSchedulesAsync(string jobSch /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetJobsFromSchedules(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + /// + public virtual Pageable GetJobsFromSchedules(string jobScheduleId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { Argument.AssertNotNullOrEmpty(jobScheduleId, nameof(jobScheduleId)); - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsFromSchedulesRequest(jobScheduleId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsFromSchedulesNextPageRequest(nextLink, jobScheduleId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobsFromSchedulesRequest(jobScheduleId, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobsFromSchedulesNextPageRequest(nextLink, jobScheduleId, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobsFromSchedules", "value", "odata.nextLink", context); } @@ -8774,7 +8898,7 @@ public virtual Pageable GetJobsFromSchedules(string jobScheduleId, i /// /// The ID of the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8799,14 +8923,14 @@ public virtual Pageable GetJobsFromSchedules(string jobScheduleId, i /// service returns HTTP status code 409 (Conflict) with an error code of /// JobPreparationTaskNotSpecified. /// - /// - public virtual AsyncPageable GetJobPreparationAndReleaseTaskStatusesAsync(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + public virtual AsyncPageable GetJobPreparationAndReleaseTaskStatusesAsync(string jobId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobPreparationAndReleaseTaskStatusesRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobPreparationAndReleaseTaskStatusesRequest(jobId, timeOutInSeconds, ocpDate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpDate, maxresults, filter, select, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchJobPreparationAndReleaseTaskStatus.DeserializeBatchJobPreparationAndReleaseTaskStatus(e), ClientDiagnostics, _pipeline, "BatchClient.GetJobPreparationAndReleaseTaskStatuses", "value", "odata.nextLink", context); } @@ -8816,7 +8940,7 @@ public virtual AsyncPageable GetJobPrep /// /// The ID of the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8841,14 +8965,14 @@ public virtual AsyncPageable GetJobPrep /// service returns HTTP status code 409 (Conflict) with an error code of /// JobPreparationTaskNotSpecified. /// - /// - public virtual Pageable GetJobPreparationAndReleaseTaskStatuses(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + public virtual Pageable GetJobPreparationAndReleaseTaskStatuses(string jobId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobPreparationAndReleaseTaskStatusesRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobPreparationAndReleaseTaskStatusesRequest(jobId, timeOutInSeconds, ocpDate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpDate, maxresults, filter, select, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchJobPreparationAndReleaseTaskStatus.DeserializeBatchJobPreparationAndReleaseTaskStatus(e), ClientDiagnostics, _pipeline, "BatchClient.GetJobPreparationAndReleaseTaskStatuses", "value", "odata.nextLink", context); } @@ -8863,14 +8987,14 @@ public virtual Pageable GetJobPreparati /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8889,13 +9013,13 @@ public virtual Pageable GetJobPreparati /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetJobPreparationAndReleaseTaskStatusesAsync(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + /// + public virtual AsyncPageable GetJobPreparationAndReleaseTaskStatusesAsync(string jobId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobPreparationAndReleaseTaskStatusesRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobPreparationAndReleaseTaskStatusesRequest(jobId, timeOutInSeconds, ocpDate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpDate, maxresults, filter, select, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobPreparationAndReleaseTaskStatuses", "value", "odata.nextLink", context); } @@ -8910,14 +9034,14 @@ public virtual AsyncPageable GetJobPreparationAndReleaseTaskStatuses /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8936,19 +9060,19 @@ public virtual AsyncPageable GetJobPreparationAndReleaseTaskStatuses /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetJobPreparationAndReleaseTaskStatuses(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + /// + public virtual Pageable GetJobPreparationAndReleaseTaskStatuses(string jobId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobPreparationAndReleaseTaskStatusesRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobPreparationAndReleaseTaskStatusesRequest(jobId, timeOutInSeconds, ocpDate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpDate, maxresults, filter, select, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobPreparationAndReleaseTaskStatuses", "value", "odata.nextLink", context); } /// Lists all of the Certificates that have been added to the specified Account. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8963,18 +9087,18 @@ public virtual Pageable GetJobPreparationAndReleaseTaskStatuses(stri /// /// An OData $select clause. /// The cancellation token to use. - /// - public virtual AsyncPageable GetCertificatesAsync(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + public virtual AsyncPageable GetCertificatesAsync(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) { RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetCertificatesRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetCertificatesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetCertificatesRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetCertificatesNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchCertificate.DeserializeBatchCertificate(e), ClientDiagnostics, _pipeline, "BatchClient.GetCertificates", "value", "odata.nextLink", context); } /// Lists all of the Certificates that have been added to the specified Account. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -8989,12 +9113,12 @@ public virtual AsyncPageable GetCertificatesAsync(int? timeOut /// /// An OData $select clause. /// The cancellation token to use. - /// - public virtual Pageable GetCertificates(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + public virtual Pageable GetCertificates(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) { RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetCertificatesRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetCertificatesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetCertificatesRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetCertificatesNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchCertificate.DeserializeBatchCertificate(e), ClientDiagnostics, _pipeline, "BatchClient.GetCertificates", "value", "odata.nextLink", context); } @@ -9008,13 +9132,13 @@ public virtual Pageable GetCertificates(int? timeOutInSeconds /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9031,11 +9155,11 @@ public virtual Pageable GetCertificates(int? timeOutInSeconds /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetCertificatesAsync(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + /// + public virtual AsyncPageable GetCertificatesAsync(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, RequestContext context) { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetCertificatesRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetCertificatesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetCertificatesRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetCertificatesNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetCertificates", "value", "odata.nextLink", context); } @@ -9049,13 +9173,13 @@ public virtual AsyncPageable GetCertificatesAsync(int? timeOutInSeco /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9072,17 +9196,17 @@ public virtual AsyncPageable GetCertificatesAsync(int? timeOutInSeco /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetCertificates(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + /// + public virtual Pageable GetCertificates(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, RequestContext context) { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetCertificatesRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetCertificatesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetCertificatesRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetCertificatesNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetCertificates", "value", "odata.nextLink", context); } /// Lists all of the Job Schedules in the specified Account. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9098,18 +9222,18 @@ public virtual Pageable GetCertificates(int? timeOutInSeconds, DateT /// An OData $select clause. /// An OData $expand clause. /// The cancellation token to use. - /// - public virtual AsyncPageable GetJobSchedulesAsync(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + /// + public virtual AsyncPageable GetJobSchedulesAsync(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) { RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobSchedulesRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobSchedulesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobSchedulesRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobSchedulesNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchJobSchedule.DeserializeBatchJobSchedule(e), ClientDiagnostics, _pipeline, "BatchClient.GetJobSchedules", "value", "odata.nextLink", context); } /// Lists all of the Job Schedules in the specified Account. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9125,12 +9249,12 @@ public virtual AsyncPageable GetJobSchedulesAsync(int? timeOut /// An OData $select clause. /// An OData $expand clause. /// The cancellation token to use. - /// - public virtual Pageable GetJobSchedules(int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + /// + public virtual Pageable GetJobSchedules(TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) { RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobSchedulesRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobSchedulesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobSchedulesRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobSchedulesNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchJobSchedule.DeserializeBatchJobSchedule(e), ClientDiagnostics, _pipeline, "BatchClient.GetJobSchedules", "value", "odata.nextLink", context); } @@ -9144,13 +9268,13 @@ public virtual Pageable GetJobSchedules(int? timeOutInSeconds /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9168,11 +9292,11 @@ public virtual Pageable GetJobSchedules(int? timeOutInSeconds /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetJobSchedulesAsync(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + /// + public virtual AsyncPageable GetJobSchedulesAsync(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobSchedulesRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobSchedulesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobSchedulesRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobSchedulesNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobSchedules", "value", "odata.nextLink", context); } @@ -9186,13 +9310,13 @@ public virtual AsyncPageable GetJobSchedulesAsync(int? timeOutInSeco /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9210,18 +9334,18 @@ public virtual AsyncPageable GetJobSchedulesAsync(int? timeOutInSeco /// The request context, which can override default behaviors of the client pipeline on a per-call basis. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetJobSchedules(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + /// + public virtual Pageable GetJobSchedules(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobSchedulesRequest(timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobSchedulesNextPageRequest(nextLink, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetJobSchedulesRequest(timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetJobSchedulesNextPageRequest(nextLink, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetJobSchedules", "value", "odata.nextLink", context); } /// Lists all of the Tasks that are associated with the specified Job. /// The ID of the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9244,21 +9368,21 @@ public virtual Pageable GetJobSchedules(int? timeOutInSeconds, DateT /// nodeInfo refer to the primary Task. Use the list subtasks API to retrieve /// information about subtasks. /// - /// - public virtual AsyncPageable GetTasksAsync(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + /// + public virtual AsyncPageable GetTasksAsync(string jobId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTasksRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTasksNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTasksRequest(jobId, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTasksNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchTask.DeserializeBatchTask(e), ClientDiagnostics, _pipeline, "BatchClient.GetTasks", "value", "odata.nextLink", context); } /// Lists all of the Tasks that are associated with the specified Job. /// The ID of the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9281,14 +9405,14 @@ public virtual AsyncPageable GetTasksAsync(string jobId, int? timeOut /// nodeInfo refer to the primary Task. Use the list subtasks API to retrieve /// information about subtasks. /// - /// - public virtual Pageable GetTasks(string jobId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) + /// + public virtual Pageable GetTasks(string jobId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, IEnumerable expand = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTasksRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTasksNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTasksRequest(jobId, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTasksNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchTask.DeserializeBatchTask(e), ClientDiagnostics, _pipeline, "BatchClient.GetTasks", "value", "odata.nextLink", context); } @@ -9302,14 +9426,14 @@ public virtual Pageable GetTasks(string jobId, int? timeOutInSeconds /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9329,13 +9453,13 @@ public virtual Pageable GetTasks(string jobId, int? timeOutInSeconds /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetTasksAsync(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + /// + public virtual AsyncPageable GetTasksAsync(string jobId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTasksRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTasksNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTasksRequest(jobId, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTasksNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetTasks", "value", "odata.nextLink", context); } @@ -9349,14 +9473,14 @@ public virtual AsyncPageable GetTasksAsync(string jobId, int? timeOu /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Job. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9376,13 +9500,13 @@ public virtual AsyncPageable GetTasksAsync(string jobId, int? timeOu /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetTasks(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + /// + public virtual Pageable GetTasks(string jobId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTasksRequest(jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTasksNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpdate, maxresults, filter, select, expand, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTasksRequest(jobId, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTasksNextPageRequest(nextLink, jobId, timeOutInSeconds, ocpDate, maxresults, filter, select, expand, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetTasks", "value", "odata.nextLink", context); } @@ -9393,7 +9517,7 @@ public virtual Pageable GetTasks(string jobId, int? timeOutInSeconds /// The ID of the Job. /// The ID of the Task. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9403,15 +9527,15 @@ public virtual Pageable GetTasks(string jobId, int? timeOutInSeconds /// or is null. /// or is an empty string, and was expected to be non-empty. /// If the Task is not a multi-instance Task then this returns an empty collection. - /// - public virtual AsyncPageable GetSubTasksAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + public virtual AsyncPageable GetSubTasksAsync(string jobId, string taskId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSubTasksRequest(jobId, taskId, timeOutInSeconds, ocpdate, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSubTasksNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpdate, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSubTasksRequest(jobId, taskId, timeOutInSeconds, ocpDate, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSubTasksNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpDate, select, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchSubtask.DeserializeBatchSubtask(e), ClientDiagnostics, _pipeline, "BatchClient.GetSubTasks", "value", "odata.nextLink", context); } @@ -9422,7 +9546,7 @@ public virtual AsyncPageable GetSubTasksAsync(string jobId, string /// The ID of the Job. /// The ID of the Task. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9432,15 +9556,15 @@ public virtual AsyncPageable GetSubTasksAsync(string jobId, string /// or is null. /// or is an empty string, and was expected to be non-empty. /// If the Task is not a multi-instance Task then this returns an empty collection. - /// - public virtual Pageable GetSubTasks(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + public virtual Pageable GetSubTasks(string jobId, string taskId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, IEnumerable select = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSubTasksRequest(jobId, taskId, timeOutInSeconds, ocpdate, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSubTasksNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpdate, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSubTasksRequest(jobId, taskId, timeOutInSeconds, ocpDate, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSubTasksNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpDate, select, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchSubtask.DeserializeBatchSubtask(e), ClientDiagnostics, _pipeline, "BatchClient.GetSubTasks", "value", "odata.nextLink", context); } @@ -9455,7 +9579,7 @@ public virtual Pageable GetSubTasks(string jobId, string taskId, i /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -9463,7 +9587,7 @@ public virtual Pageable GetSubTasks(string jobId, string taskId, i /// The ID of the Job. /// The ID of the Task. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9474,14 +9598,14 @@ public virtual Pageable GetSubTasks(string jobId, string taskId, i /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetSubTasksAsync(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + /// + public virtual AsyncPageable GetSubTasksAsync(string jobId, string taskId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSubTasksRequest(jobId, taskId, timeOutInSeconds, ocpdate, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSubTasksNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpdate, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSubTasksRequest(jobId, taskId, timeOutInSeconds, ocpDate, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSubTasksNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpDate, select, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetSubTasks", "value", "odata.nextLink", context); } @@ -9496,7 +9620,7 @@ public virtual AsyncPageable GetSubTasksAsync(string jobId, string t /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -9504,7 +9628,7 @@ public virtual AsyncPageable GetSubTasksAsync(string jobId, string t /// The ID of the Job. /// The ID of the Task. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9515,14 +9639,14 @@ public virtual AsyncPageable GetSubTasksAsync(string jobId, string t /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetSubTasks(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + /// + public virtual Pageable GetSubTasks(string jobId, string taskId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSubTasksRequest(jobId, taskId, timeOutInSeconds, ocpdate, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSubTasksNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpdate, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetSubTasksRequest(jobId, taskId, timeOutInSeconds, ocpDate, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetSubTasksNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpDate, select, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetSubTasks", "value", "odata.nextLink", context); } @@ -9530,7 +9654,7 @@ public virtual Pageable GetSubTasks(string jobId, string taskId, int /// The ID of the Job that contains the Task. /// The ID of the Task whose files you want to list. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9550,15 +9674,15 @@ public virtual Pageable GetSubTasks(string jobId, string taskId, int /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// - public virtual AsyncPageable GetTaskFilesAsync(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, bool? recursive = null, CancellationToken cancellationToken = default) + /// + public virtual AsyncPageable GetTaskFilesAsync(string jobId, string taskId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, bool? recursive = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTaskFilesRequest(jobId, taskId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTaskFilesNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTaskFilesRequest(jobId, taskId, timeOutInSeconds, ocpDate, maxresults, filter, recursive, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTaskFilesNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpDate, maxresults, filter, recursive, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchNodeFile.DeserializeBatchNodeFile(e), ClientDiagnostics, _pipeline, "BatchClient.GetTaskFiles", "value", "odata.nextLink", context); } @@ -9566,7 +9690,7 @@ public virtual AsyncPageable GetTaskFilesAsync(string jobId, stri /// The ID of the Job that contains the Task. /// The ID of the Task whose files you want to list. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9586,15 +9710,15 @@ public virtual AsyncPageable GetTaskFilesAsync(string jobId, stri /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// - public virtual Pageable GetTaskFiles(string jobId, string taskId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, bool? recursive = null, CancellationToken cancellationToken = default) + /// + public virtual Pageable GetTaskFiles(string jobId, string taskId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, bool? recursive = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTaskFilesRequest(jobId, taskId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTaskFilesNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTaskFilesRequest(jobId, taskId, timeOutInSeconds, ocpDate, maxresults, filter, recursive, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTaskFilesNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpDate, maxresults, filter, recursive, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchNodeFile.DeserializeBatchNodeFile(e), ClientDiagnostics, _pipeline, "BatchClient.GetTaskFiles", "value", "odata.nextLink", context); } @@ -9608,7 +9732,7 @@ public virtual Pageable GetTaskFiles(string jobId, string taskId, /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -9616,7 +9740,7 @@ public virtual Pageable GetTaskFiles(string jobId, string taskId, /// The ID of the Job that contains the Task. /// The ID of the Task whose files you want to list. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9638,14 +9762,14 @@ public virtual Pageable GetTaskFiles(string jobId, string taskId, /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetTaskFilesAsync(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, RequestContext context) + /// + public virtual AsyncPageable GetTaskFilesAsync(string jobId, string taskId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, bool? recursive, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTaskFilesRequest(jobId, taskId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTaskFilesNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTaskFilesRequest(jobId, taskId, timeOutInSeconds, ocpDate, maxresults, filter, recursive, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTaskFilesNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpDate, maxresults, filter, recursive, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetTaskFiles", "value", "odata.nextLink", context); } @@ -9659,7 +9783,7 @@ public virtual AsyncPageable GetTaskFilesAsync(string jobId, string /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -9667,7 +9791,7 @@ public virtual AsyncPageable GetTaskFilesAsync(string jobId, string /// The ID of the Job that contains the Task. /// The ID of the Task whose files you want to list. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9689,21 +9813,21 @@ public virtual AsyncPageable GetTaskFilesAsync(string jobId, string /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetTaskFiles(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, RequestContext context) + /// + public virtual Pageable GetTaskFiles(string jobId, string taskId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, bool? recursive, RequestContext context) { Argument.AssertNotNullOrEmpty(jobId, nameof(jobId)); Argument.AssertNotNullOrEmpty(taskId, nameof(taskId)); - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTaskFilesRequest(jobId, taskId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTaskFilesNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetTaskFilesRequest(jobId, taskId, timeOutInSeconds, ocpDate, maxresults, filter, recursive, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetTaskFilesNextPageRequest(nextLink, jobId, taskId, timeOutInSeconds, ocpDate, maxresults, filter, recursive, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetTaskFiles", "value", "odata.nextLink", context); } /// Lists the Compute Nodes in the specified Pool. /// The ID of the Pool from which you want to list Compute Nodes. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9720,21 +9844,21 @@ public virtual Pageable GetTaskFiles(string jobId, string taskId, in /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - /// - public virtual AsyncPageable GetNodesAsync(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + public virtual AsyncPageable GetNodesAsync(string poolId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodesRequest(poolId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodesNextPageRequest(nextLink, poolId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodesRequest(poolId, timeOutInSeconds, ocpDate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodesNextPageRequest(nextLink, poolId, timeOutInSeconds, ocpDate, maxresults, filter, select, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchNode.DeserializeBatchNode(e), ClientDiagnostics, _pipeline, "BatchClient.GetNodes", "value", "odata.nextLink", context); } /// Lists the Compute Nodes in the specified Pool. /// The ID of the Pool from which you want to list Compute Nodes. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9751,14 +9875,14 @@ public virtual AsyncPageable GetNodesAsync(string poolId, int? timeOu /// The cancellation token to use. /// is null. /// is an empty string, and was expected to be non-empty. - /// - public virtual Pageable GetNodes(string poolId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + public virtual Pageable GetNodes(string poolId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, IEnumerable select = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodesRequest(poolId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodesNextPageRequest(nextLink, poolId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodesRequest(poolId, timeOutInSeconds, ocpDate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodesNextPageRequest(nextLink, poolId, timeOutInSeconds, ocpDate, maxresults, filter, select, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchNode.DeserializeBatchNode(e), ClientDiagnostics, _pipeline, "BatchClient.GetNodes", "value", "odata.nextLink", context); } @@ -9772,14 +9896,14 @@ public virtual Pageable GetNodes(string poolId, int? timeOutInSeconds /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Pool from which you want to list Compute Nodes. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9798,13 +9922,13 @@ public virtual Pageable GetNodes(string poolId, int? timeOutInSeconds /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetNodesAsync(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + /// + public virtual AsyncPageable GetNodesAsync(string poolId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodesRequest(poolId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodesNextPageRequest(nextLink, poolId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodesRequest(poolId, timeOutInSeconds, ocpDate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodesNextPageRequest(nextLink, poolId, timeOutInSeconds, ocpDate, maxresults, filter, select, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetNodes", "value", "odata.nextLink", context); } @@ -9818,14 +9942,14 @@ public virtual AsyncPageable GetNodesAsync(string poolId, int? timeO /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// /// /// The ID of the Pool from which you want to list Compute Nodes. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9844,13 +9968,13 @@ public virtual AsyncPageable GetNodesAsync(string poolId, int? timeO /// is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetNodes(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + /// + public virtual Pageable GetNodes(string poolId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodesRequest(poolId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodesNextPageRequest(nextLink, poolId, timeOutInSeconds, ocpdate, maxresults, filter, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodesRequest(poolId, timeOutInSeconds, ocpDate, maxresults, filter, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodesNextPageRequest(nextLink, poolId, timeOutInSeconds, ocpDate, maxresults, filter, select, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetNodes", "value", "odata.nextLink", context); } @@ -9858,7 +9982,7 @@ public virtual Pageable GetNodes(string poolId, int? timeOutInSecond /// The ID of the Pool that contains Compute Node. /// The ID of the Compute Node that you want to list extensions. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9871,15 +9995,15 @@ public virtual Pageable GetNodes(string poolId, int? timeOutInSecond /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// - public virtual AsyncPageable GetNodeExtensionsAsync(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + public virtual AsyncPageable GetNodeExtensionsAsync(string poolId, string nodeId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, IEnumerable select = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeExtensionsRequest(poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeExtensionsNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeExtensionsRequest(poolId, nodeId, timeOutInSeconds, ocpDate, maxresults, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeExtensionsNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpDate, maxresults, select, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchNodeVMExtension.DeserializeBatchNodeVMExtension(e), ClientDiagnostics, _pipeline, "BatchClient.GetNodeExtensions", "value", "odata.nextLink", context); } @@ -9887,7 +10011,7 @@ public virtual AsyncPageable GetNodeExtensionsAsync(string /// The ID of the Pool that contains Compute Node. /// The ID of the Compute Node that you want to list extensions. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9900,15 +10024,15 @@ public virtual AsyncPageable GetNodeExtensionsAsync(string /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// - public virtual Pageable GetNodeExtensions(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, IEnumerable select = null, CancellationToken cancellationToken = default) + /// + public virtual Pageable GetNodeExtensions(string poolId, string nodeId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, IEnumerable select = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeExtensionsRequest(poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeExtensionsNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeExtensionsRequest(poolId, nodeId, timeOutInSeconds, ocpDate, maxresults, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeExtensionsNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpDate, maxresults, select, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchNodeVMExtension.DeserializeBatchNodeVMExtension(e), ClientDiagnostics, _pipeline, "BatchClient.GetNodeExtensions", "value", "odata.nextLink", context); } @@ -9922,7 +10046,7 @@ public virtual Pageable GetNodeExtensions(string poolId, s /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -9930,7 +10054,7 @@ public virtual Pageable GetNodeExtensions(string poolId, s /// The ID of the Pool that contains Compute Node. /// The ID of the Compute Node that you want to list extensions. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9945,14 +10069,14 @@ public virtual Pageable GetNodeExtensions(string poolId, s /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetNodeExtensionsAsync(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, IEnumerable select, RequestContext context) + /// + public virtual AsyncPageable GetNodeExtensionsAsync(string poolId, string nodeId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, IEnumerable select, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeExtensionsRequest(poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeExtensionsNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeExtensionsRequest(poolId, nodeId, timeOutInSeconds, ocpDate, maxresults, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeExtensionsNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpDate, maxresults, select, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetNodeExtensions", "value", "odata.nextLink", context); } @@ -9966,7 +10090,7 @@ public virtual AsyncPageable GetNodeExtensionsAsync(string poolId, s /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -9974,7 +10098,7 @@ public virtual AsyncPageable GetNodeExtensionsAsync(string poolId, s /// The ID of the Pool that contains Compute Node. /// The ID of the Compute Node that you want to list extensions. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -9989,14 +10113,14 @@ public virtual AsyncPageable GetNodeExtensionsAsync(string poolId, s /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetNodeExtensions(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, IEnumerable select, RequestContext context) + /// + public virtual Pageable GetNodeExtensions(string poolId, string nodeId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, IEnumerable select, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeExtensionsRequest(poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, select, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeExtensionsNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, select, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeExtensionsRequest(poolId, nodeId, timeOutInSeconds, ocpDate, maxresults, select, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeExtensionsNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpDate, maxresults, select, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetNodeExtensions", "value", "odata.nextLink", context); } @@ -10004,7 +10128,7 @@ public virtual Pageable GetNodeExtensions(string poolId, string node /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node whose files you want to list. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -10021,15 +10145,15 @@ public virtual Pageable GetNodeExtensions(string poolId, string node /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// - public virtual AsyncPageable GetNodeFilesAsync(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, bool? recursive = null, CancellationToken cancellationToken = default) + /// + public virtual AsyncPageable GetNodeFilesAsync(string poolId, string nodeId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, bool? recursive = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeFilesRequest(poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeFilesNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeFilesRequest(poolId, nodeId, timeOutInSeconds, ocpDate, maxresults, filter, recursive, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeFilesNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpDate, maxresults, filter, recursive, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BatchNodeFile.DeserializeBatchNodeFile(e), ClientDiagnostics, _pipeline, "BatchClient.GetNodeFiles", "value", "odata.nextLink", context); } @@ -10037,7 +10161,7 @@ public virtual AsyncPageable GetNodeFilesAsync(string poolId, str /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node whose files you want to list. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -10054,15 +10178,15 @@ public virtual AsyncPageable GetNodeFilesAsync(string poolId, str /// The cancellation token to use. /// or is null. /// or is an empty string, and was expected to be non-empty. - /// - public virtual Pageable GetNodeFiles(string poolId, string nodeId, int? timeOutInSeconds = null, DateTimeOffset? ocpdate = null, int? maxresults = null, string filter = null, bool? recursive = null, CancellationToken cancellationToken = default) + /// + public virtual Pageable GetNodeFiles(string poolId, string nodeId, TimeSpan? timeOutInSeconds = null, DateTimeOffset? ocpDate = null, int? maxresults = null, string filter = null, bool? recursive = null, CancellationToken cancellationToken = default) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); RequestContext context = cancellationToken.CanBeCanceled ? new RequestContext { CancellationToken = cancellationToken } : null; - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeFilesRequest(poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeFilesNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeFilesRequest(poolId, nodeId, timeOutInSeconds, ocpDate, maxresults, filter, recursive, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeFilesNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpDate, maxresults, filter, recursive, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BatchNodeFile.DeserializeBatchNodeFile(e), ClientDiagnostics, _pipeline, "BatchClient.GetNodeFiles", "value", "odata.nextLink", context); } @@ -10076,7 +10200,7 @@ public virtual Pageable GetNodeFiles(string poolId, string nodeId /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -10084,7 +10208,7 @@ public virtual Pageable GetNodeFiles(string poolId, string nodeId /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node whose files you want to list. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -10103,14 +10227,14 @@ public virtual Pageable GetNodeFiles(string poolId, string nodeId /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual AsyncPageable GetNodeFilesAsync(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, RequestContext context) + /// + public virtual AsyncPageable GetNodeFilesAsync(string poolId, string nodeId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, bool? recursive, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeFilesRequest(poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeFilesNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeFilesRequest(poolId, nodeId, timeOutInSeconds, ocpDate, maxresults, filter, recursive, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeFilesNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpDate, maxresults, filter, recursive, context); return GeneratorPageableHelpers.CreateAsyncPageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetNodeFiles", "value", "odata.nextLink", context); } @@ -10124,7 +10248,7 @@ public virtual AsyncPageable GetNodeFilesAsync(string poolId, string /// /// /// - /// Please try the simpler convenience overload with strongly typed models first. + /// Please try the simpler convenience overload with strongly typed models first. /// /// /// @@ -10132,7 +10256,7 @@ public virtual AsyncPageable GetNodeFilesAsync(string poolId, string /// The ID of the Pool that contains the Compute Node. /// The ID of the Compute Node whose files you want to list. /// The maximum time that the server can spend processing the request, in seconds. The default is 30 seconds. If the value is larger than 30, the default will be used instead.". - /// + /// /// The time the request was issued. Client libraries typically set this to the /// current system clock time; set it explicitly if you are calling the REST API /// directly. @@ -10151,18 +10275,18 @@ public virtual AsyncPageable GetNodeFilesAsync(string poolId, string /// or is an empty string, and was expected to be non-empty. /// Service returned a non-success status code. /// The from the service containing a list of objects. Details of the body schema for each item in the collection are in the Remarks section below. - /// - public virtual Pageable GetNodeFiles(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, RequestContext context) + /// + public virtual Pageable GetNodeFiles(string poolId, string nodeId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, bool? recursive, RequestContext context) { Argument.AssertNotNullOrEmpty(poolId, nameof(poolId)); Argument.AssertNotNullOrEmpty(nodeId, nameof(nodeId)); - HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeFilesRequest(poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); - HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeFilesNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpdate, maxresults, filter, recursive, context); + HttpMessage FirstPageRequest(int? pageSizeHint) => CreateGetNodeFilesRequest(poolId, nodeId, timeOutInSeconds, ocpDate, maxresults, filter, recursive, context); + HttpMessage NextPageRequest(int? pageSizeHint, string nextLink) => CreateGetNodeFilesNextPageRequest(nextLink, poolId, nodeId, timeOutInSeconds, ocpDate, maxresults, filter, recursive, context); return GeneratorPageableHelpers.CreatePageable(FirstPageRequest, NextPageRequest, e => BinaryData.FromString(e.GetRawText()), ClientDiagnostics, _pipeline, "BatchClient.GetNodeFiles", "value", "odata.nextLink", context); } - internal HttpMessage CreateGetApplicationsRequest(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, RequestContext context) + internal HttpMessage CreateGetApplicationsRequest(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -10173,7 +10297,7 @@ internal HttpMessage CreateGetApplicationsRequest(int? timeOutInSeconds, DateTim uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (maxresults != null) { @@ -10183,14 +10307,14 @@ internal HttpMessage CreateGetApplicationsRequest(int? timeOutInSeconds, DateTim request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetApplicationRequest(string applicationId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateGetApplicationRequest(string applicationId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -10202,20 +10326,20 @@ internal HttpMessage CreateGetApplicationRequest(string applicationId, int? time uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetPoolUsageMetricsRequest(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, DateTimeOffset? starttime, DateTimeOffset? endtime, string filter, RequestContext context) + internal HttpMessage CreateGetPoolUsageMetricsRequest(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, DateTimeOffset? starttime, DateTimeOffset? endtime, string filter, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -10226,7 +10350,7 @@ internal HttpMessage CreateGetPoolUsageMetricsRequest(int? timeOutInSeconds, Dat uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (maxresults != null) { @@ -10248,14 +10372,14 @@ internal HttpMessage CreateGetPoolUsageMetricsRequest(int? timeOutInSeconds, Dat request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateCreatePoolRequest(RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateCreatePoolRequest(RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier201); var request = message.Request; @@ -10266,22 +10390,22 @@ internal HttpMessage CreateCreatePoolRequest(RequestContent content, int? timeOu uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); request.Content = content; return message; } - internal HttpMessage CreateGetPoolsRequest(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + internal HttpMessage CreateGetPoolsRequest(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -10292,7 +10416,7 @@ internal HttpMessage CreateGetPoolsRequest(int? timeOutInSeconds, DateTimeOffset uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (maxresults != null) { @@ -10314,14 +10438,14 @@ internal HttpMessage CreateGetPoolsRequest(int? timeOutInSeconds, DateTimeOffset request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateDeletePoolRequest(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateDeletePoolInternalRequest(string poolId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -10333,15 +10457,15 @@ internal HttpMessage CreateDeletePoolRequest(string poolId, int? timeOutInSecond uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -10350,7 +10474,7 @@ internal HttpMessage CreateDeletePoolRequest(string poolId, int? timeOutInSecond return message; } - internal HttpMessage CreatePoolExistsRequest(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreatePoolExistsRequest(string poolId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200404); var request = message.Request; @@ -10362,15 +10486,15 @@ internal HttpMessage CreatePoolExistsRequest(string poolId, int? timeOutInSecond uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -10379,7 +10503,7 @@ internal HttpMessage CreatePoolExistsRequest(string poolId, int? timeOutInSecond return message; } - internal HttpMessage CreateGetPoolRequest(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateGetPoolRequest(string poolId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -10391,7 +10515,7 @@ internal HttpMessage CreateGetPoolRequest(string poolId, int? timeOutInSeconds, uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) { @@ -10405,9 +10529,9 @@ internal HttpMessage CreateGetPoolRequest(string poolId, int? timeOutInSeconds, request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -10416,7 +10540,7 @@ internal HttpMessage CreateGetPoolRequest(string poolId, int? timeOutInSeconds, return message; } - internal HttpMessage CreateUpdatePoolRequest(string poolId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateUpdatePoolRequest(string poolId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -10428,15 +10552,15 @@ internal HttpMessage CreateUpdatePoolRequest(string poolId, RequestContent conte uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -10447,7 +10571,7 @@ internal HttpMessage CreateUpdatePoolRequest(string poolId, RequestContent conte return message; } - internal HttpMessage CreateDisablePoolAutoScaleRequest(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateDisablePoolAutoScaleRequest(string poolId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -10460,20 +10584,20 @@ internal HttpMessage CreateDisablePoolAutoScaleRequest(string poolId, int? timeO uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateEnablePoolAutoScaleRequest(string poolId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateEnablePoolAutoScaleRequest(string poolId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -10486,15 +10610,15 @@ internal HttpMessage CreateEnablePoolAutoScaleRequest(string poolId, RequestCont uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -10505,7 +10629,7 @@ internal HttpMessage CreateEnablePoolAutoScaleRequest(string poolId, RequestCont return message; } - internal HttpMessage CreateEvaluatePoolAutoScaleRequest(string poolId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateEvaluatePoolAutoScaleRequest(string poolId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -10518,22 +10642,22 @@ internal HttpMessage CreateEvaluatePoolAutoScaleRequest(string poolId, RequestCo uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); request.Content = content; return message; } - internal HttpMessage CreateResizePoolRequest(string poolId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateResizePoolInternalRequest(string poolId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -10546,15 +10670,15 @@ internal HttpMessage CreateResizePoolRequest(string poolId, RequestContent conte uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -10565,7 +10689,7 @@ internal HttpMessage CreateResizePoolRequest(string poolId, RequestContent conte return message; } - internal HttpMessage CreateStopPoolResizeRequest(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateStopPoolResizeInternalRequest(string poolId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -10578,15 +10702,15 @@ internal HttpMessage CreateStopPoolResizeRequest(string poolId, int? timeOutInSe uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -10595,7 +10719,7 @@ internal HttpMessage CreateStopPoolResizeRequest(string poolId, int? timeOutInSe return message; } - internal HttpMessage CreateReplacePoolPropertiesRequest(string poolId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateReplacePoolPropertiesRequest(string poolId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier204); var request = message.Request; @@ -10608,22 +10732,22 @@ internal HttpMessage CreateReplacePoolPropertiesRequest(string poolId, RequestCo uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); request.Content = content; return message; } - internal HttpMessage CreateRemoveNodesRequest(string poolId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateRemoveNodesInternalRequest(string poolId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -10636,15 +10760,15 @@ internal HttpMessage CreateRemoveNodesRequest(string poolId, RequestContent cont uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -10655,7 +10779,7 @@ internal HttpMessage CreateRemoveNodesRequest(string poolId, RequestContent cont return message; } - internal HttpMessage CreateGetSupportedImagesRequest(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, RequestContext context) + internal HttpMessage CreateGetSupportedImagesRequest(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -10666,7 +10790,7 @@ internal HttpMessage CreateGetSupportedImagesRequest(int? timeOutInSeconds, Date uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (maxresults != null) { @@ -10680,14 +10804,14 @@ internal HttpMessage CreateGetSupportedImagesRequest(int? timeOutInSeconds, Date request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetPoolNodeCountsRequest(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, RequestContext context) + internal HttpMessage CreateGetPoolNodeCountsRequest(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -10698,7 +10822,7 @@ internal HttpMessage CreateGetPoolNodeCountsRequest(int? timeOutInSeconds, DateT uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (maxresults != null) { @@ -10712,14 +10836,14 @@ internal HttpMessage CreateGetPoolNodeCountsRequest(int? timeOutInSeconds, DateT request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateDeleteJobRequest(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, bool? force, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateDeleteJobInternalRequest(string jobId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, bool? force, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -10731,7 +10855,7 @@ internal HttpMessage CreateDeleteJobRequest(string jobId, int? timeOutInSeconds, uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (force != null) { @@ -10741,9 +10865,9 @@ internal HttpMessage CreateDeleteJobRequest(string jobId, int? timeOutInSeconds, request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -10752,7 +10876,7 @@ internal HttpMessage CreateDeleteJobRequest(string jobId, int? timeOutInSeconds, return message; } - internal HttpMessage CreateGetJobRequest(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateGetJobRequest(string jobId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -10764,7 +10888,7 @@ internal HttpMessage CreateGetJobRequest(string jobId, int? timeOutInSeconds, Da uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) { @@ -10778,9 +10902,9 @@ internal HttpMessage CreateGetJobRequest(string jobId, int? timeOutInSeconds, Da request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -10789,7 +10913,7 @@ internal HttpMessage CreateGetJobRequest(string jobId, int? timeOutInSeconds, Da return message; } - internal HttpMessage CreateUpdateJobRequest(string jobId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateUpdateJobRequest(string jobId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -10801,15 +10925,15 @@ internal HttpMessage CreateUpdateJobRequest(string jobId, RequestContent content uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -10820,7 +10944,7 @@ internal HttpMessage CreateUpdateJobRequest(string jobId, RequestContent content return message; } - internal HttpMessage CreateReplaceJobRequest(string jobId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateReplaceJobRequest(string jobId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -10832,15 +10956,15 @@ internal HttpMessage CreateReplaceJobRequest(string jobId, RequestContent conten uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -10851,7 +10975,7 @@ internal HttpMessage CreateReplaceJobRequest(string jobId, RequestContent conten return message; } - internal HttpMessage CreateDisableJobRequest(string jobId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateDisableJobInternalRequest(string jobId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -10864,15 +10988,15 @@ internal HttpMessage CreateDisableJobRequest(string jobId, RequestContent conten uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -10883,7 +11007,7 @@ internal HttpMessage CreateDisableJobRequest(string jobId, RequestContent conten return message; } - internal HttpMessage CreateEnableJobRequest(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateEnableJobInternalRequest(string jobId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -10896,15 +11020,15 @@ internal HttpMessage CreateEnableJobRequest(string jobId, int? timeOutInSeconds, uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -10913,7 +11037,7 @@ internal HttpMessage CreateEnableJobRequest(string jobId, int? timeOutInSeconds, return message; } - internal HttpMessage CreateTerminateJobRequest(string jobId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, bool? force, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateTerminateJobInternalRequest(string jobId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, bool? force, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -10926,7 +11050,7 @@ internal HttpMessage CreateTerminateJobRequest(string jobId, RequestContent cont uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (force != null) { @@ -10936,9 +11060,9 @@ internal HttpMessage CreateTerminateJobRequest(string jobId, RequestContent cont request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -10949,7 +11073,7 @@ internal HttpMessage CreateTerminateJobRequest(string jobId, RequestContent cont return message; } - internal HttpMessage CreateCreateJobRequest(RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateCreateJobRequest(RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier201); var request = message.Request; @@ -10960,22 +11084,22 @@ internal HttpMessage CreateCreateJobRequest(RequestContent content, int? timeOut uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); request.Content = content; return message; } - internal HttpMessage CreateGetJobsRequest(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + internal HttpMessage CreateGetJobsRequest(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -10986,7 +11110,7 @@ internal HttpMessage CreateGetJobsRequest(int? timeOutInSeconds, DateTimeOffset? uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (maxresults != null) { @@ -11008,14 +11132,14 @@ internal HttpMessage CreateGetJobsRequest(int? timeOutInSeconds, DateTimeOffset? request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetJobsFromSchedulesRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + internal HttpMessage CreateGetJobsFromSchedulesRequest(string jobScheduleId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11028,7 +11152,7 @@ internal HttpMessage CreateGetJobsFromSchedulesRequest(string jobScheduleId, int uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (maxresults != null) { @@ -11050,14 +11174,14 @@ internal HttpMessage CreateGetJobsFromSchedulesRequest(string jobScheduleId, int request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetJobPreparationAndReleaseTaskStatusesRequest(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + internal HttpMessage CreateGetJobPreparationAndReleaseTaskStatusesRequest(string jobId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11070,7 +11194,7 @@ internal HttpMessage CreateGetJobPreparationAndReleaseTaskStatusesRequest(string uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (maxresults != null) { @@ -11088,14 +11212,14 @@ internal HttpMessage CreateGetJobPreparationAndReleaseTaskStatusesRequest(string request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetJobTaskCountsRequest(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateGetJobTaskCountsRequest(string jobId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11108,20 +11232,20 @@ internal HttpMessage CreateGetJobTaskCountsRequest(string jobId, int? timeOutInS uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateCreateCertificateRequest(RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateCreateCertificateRequest(RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier201); var request = message.Request; @@ -11132,22 +11256,22 @@ internal HttpMessage CreateCreateCertificateRequest(RequestContent content, int? uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); request.Content = content; return message; } - internal HttpMessage CreateGetCertificatesRequest(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + internal HttpMessage CreateGetCertificatesRequest(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11158,7 +11282,7 @@ internal HttpMessage CreateGetCertificatesRequest(int? timeOutInSeconds, DateTim uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (maxresults != null) { @@ -11176,14 +11300,14 @@ internal HttpMessage CreateGetCertificatesRequest(int? timeOutInSeconds, DateTim request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateCancelCertificateDeletionRequest(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateCancelCertificateDeletionRequest(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier204); var request = message.Request; @@ -11198,20 +11322,20 @@ internal HttpMessage CreateCancelCertificateDeletionRequest(string thumbprintAlg uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateDeleteCertificateRequest(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateDeleteCertificateInternalRequest(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -11226,20 +11350,20 @@ internal HttpMessage CreateDeleteCertificateRequest(string thumbprintAlgorithm, uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetCertificateRequest(string thumbprintAlgorithm, string thumbprint, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + internal HttpMessage CreateGetCertificateRequest(string thumbprintAlgorithm, string thumbprint, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11254,7 +11378,7 @@ internal HttpMessage CreateGetCertificateRequest(string thumbprintAlgorithm, str uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) { @@ -11264,14 +11388,14 @@ internal HttpMessage CreateGetCertificateRequest(string thumbprintAlgorithm, str request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateJobScheduleExistsRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateJobScheduleExistsRequest(string jobScheduleId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200404); var request = message.Request; @@ -11283,15 +11407,15 @@ internal HttpMessage CreateJobScheduleExistsRequest(string jobScheduleId, int? t uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -11300,7 +11424,7 @@ internal HttpMessage CreateJobScheduleExistsRequest(string jobScheduleId, int? t return message; } - internal HttpMessage CreateDeleteJobScheduleRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, bool? force, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateDeleteJobScheduleInternalRequest(string jobScheduleId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, bool? force, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -11312,7 +11436,7 @@ internal HttpMessage CreateDeleteJobScheduleRequest(string jobScheduleId, int? t uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (force != null) { @@ -11322,9 +11446,9 @@ internal HttpMessage CreateDeleteJobScheduleRequest(string jobScheduleId, int? t request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -11333,7 +11457,7 @@ internal HttpMessage CreateDeleteJobScheduleRequest(string jobScheduleId, int? t return message; } - internal HttpMessage CreateGetJobScheduleRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateGetJobScheduleRequest(string jobScheduleId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11345,7 +11469,7 @@ internal HttpMessage CreateGetJobScheduleRequest(string jobScheduleId, int? time uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) { @@ -11359,9 +11483,9 @@ internal HttpMessage CreateGetJobScheduleRequest(string jobScheduleId, int? time request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -11370,7 +11494,7 @@ internal HttpMessage CreateGetJobScheduleRequest(string jobScheduleId, int? time return message; } - internal HttpMessage CreateUpdateJobScheduleRequest(string jobScheduleId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateUpdateJobScheduleRequest(string jobScheduleId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11382,15 +11506,15 @@ internal HttpMessage CreateUpdateJobScheduleRequest(string jobScheduleId, Reques uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -11401,7 +11525,7 @@ internal HttpMessage CreateUpdateJobScheduleRequest(string jobScheduleId, Reques return message; } - internal HttpMessage CreateReplaceJobScheduleRequest(string jobScheduleId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateReplaceJobScheduleRequest(string jobScheduleId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11413,15 +11537,15 @@ internal HttpMessage CreateReplaceJobScheduleRequest(string jobScheduleId, Reque uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -11432,7 +11556,7 @@ internal HttpMessage CreateReplaceJobScheduleRequest(string jobScheduleId, Reque return message; } - internal HttpMessage CreateDisableJobScheduleRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateDisableJobScheduleRequest(string jobScheduleId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier204); var request = message.Request; @@ -11445,15 +11569,15 @@ internal HttpMessage CreateDisableJobScheduleRequest(string jobScheduleId, int? uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -11462,7 +11586,7 @@ internal HttpMessage CreateDisableJobScheduleRequest(string jobScheduleId, int? return message; } - internal HttpMessage CreateEnableJobScheduleRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateEnableJobScheduleRequest(string jobScheduleId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier204); var request = message.Request; @@ -11475,15 +11599,15 @@ internal HttpMessage CreateEnableJobScheduleRequest(string jobScheduleId, int? t uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -11492,7 +11616,7 @@ internal HttpMessage CreateEnableJobScheduleRequest(string jobScheduleId, int? t return message; } - internal HttpMessage CreateTerminateJobScheduleRequest(string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, bool? force, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateTerminateJobScheduleInternalRequest(string jobScheduleId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, bool? force, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -11505,7 +11629,7 @@ internal HttpMessage CreateTerminateJobScheduleRequest(string jobScheduleId, int uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (force != null) { @@ -11515,9 +11639,9 @@ internal HttpMessage CreateTerminateJobScheduleRequest(string jobScheduleId, int request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -11526,7 +11650,7 @@ internal HttpMessage CreateTerminateJobScheduleRequest(string jobScheduleId, int return message; } - internal HttpMessage CreateCreateJobScheduleRequest(RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateCreateJobScheduleRequest(RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier201); var request = message.Request; @@ -11537,22 +11661,22 @@ internal HttpMessage CreateCreateJobScheduleRequest(RequestContent content, int? uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); request.Content = content; return message; } - internal HttpMessage CreateGetJobSchedulesRequest(int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + internal HttpMessage CreateGetJobSchedulesRequest(TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11563,7 +11687,7 @@ internal HttpMessage CreateGetJobSchedulesRequest(int? timeOutInSeconds, DateTim uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (maxresults != null) { @@ -11585,14 +11709,14 @@ internal HttpMessage CreateGetJobSchedulesRequest(int? timeOutInSeconds, DateTim request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateCreateTaskRequest(string jobId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateCreateTaskRequest(string jobId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier201); var request = message.Request; @@ -11605,22 +11729,22 @@ internal HttpMessage CreateCreateTaskRequest(string jobId, RequestContent conten uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); request.Content = content; return message; } - internal HttpMessage CreateGetTasksRequest(string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + internal HttpMessage CreateGetTasksRequest(string jobId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11633,7 +11757,7 @@ internal HttpMessage CreateGetTasksRequest(string jobId, int? timeOutInSeconds, uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (maxresults != null) { @@ -11655,14 +11779,14 @@ internal HttpMessage CreateGetTasksRequest(string jobId, int? timeOutInSeconds, request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateCreateTaskCollectionRequest(string jobId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateCreateTaskCollectionRequest(string jobId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11675,22 +11799,22 @@ internal HttpMessage CreateCreateTaskCollectionRequest(string jobId, RequestCont uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); request.Content = content; return message; } - internal HttpMessage CreateDeleteTaskRequest(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateDeleteTaskRequest(string jobId, string taskId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11704,15 +11828,15 @@ internal HttpMessage CreateDeleteTaskRequest(string jobId, string taskId, int? t uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -11721,7 +11845,7 @@ internal HttpMessage CreateDeleteTaskRequest(string jobId, string taskId, int? t return message; } - internal HttpMessage CreateGetTaskRequest(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateGetTaskRequest(string jobId, string taskId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, IEnumerable expand, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11735,7 +11859,7 @@ internal HttpMessage CreateGetTaskRequest(string jobId, string taskId, int? time uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) { @@ -11749,9 +11873,9 @@ internal HttpMessage CreateGetTaskRequest(string jobId, string taskId, int? time request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -11760,7 +11884,7 @@ internal HttpMessage CreateGetTaskRequest(string jobId, string taskId, int? time return message; } - internal HttpMessage CreateReplaceTaskRequest(string jobId, string taskId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateReplaceTaskRequest(string jobId, string taskId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11774,15 +11898,15 @@ internal HttpMessage CreateReplaceTaskRequest(string jobId, string taskId, Reque uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -11793,7 +11917,7 @@ internal HttpMessage CreateReplaceTaskRequest(string jobId, string taskId, Reque return message; } - internal HttpMessage CreateGetSubTasksRequest(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + internal HttpMessage CreateGetSubTasksRequest(string jobId, string taskId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11808,7 +11932,7 @@ internal HttpMessage CreateGetSubTasksRequest(string jobId, string taskId, int? uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) { @@ -11818,14 +11942,14 @@ internal HttpMessage CreateGetSubTasksRequest(string jobId, string taskId, int? request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateTerminateTaskRequest(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateTerminateTaskRequest(string jobId, string taskId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier204); var request = message.Request; @@ -11840,15 +11964,15 @@ internal HttpMessage CreateTerminateTaskRequest(string jobId, string taskId, int uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -11857,7 +11981,7 @@ internal HttpMessage CreateTerminateTaskRequest(string jobId, string taskId, int return message; } - internal HttpMessage CreateReactivateTaskRequest(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateReactivateTaskRequest(string jobId, string taskId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier204); var request = message.Request; @@ -11872,15 +11996,15 @@ internal HttpMessage CreateReactivateTaskRequest(string jobId, string taskId, in uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -11889,7 +12013,7 @@ internal HttpMessage CreateReactivateTaskRequest(string jobId, string taskId, in return message; } - internal HttpMessage CreateDeleteTaskFileRequest(string jobId, string taskId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, bool? recursive, RequestContext context) + internal HttpMessage CreateDeleteTaskFileRequest(string jobId, string taskId, string filePath, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, bool? recursive, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11905,7 +12029,7 @@ internal HttpMessage CreateDeleteTaskFileRequest(string jobId, string taskId, st uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (recursive != null) { @@ -11915,14 +12039,14 @@ internal HttpMessage CreateDeleteTaskFileRequest(string jobId, string taskId, st request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetTaskFileRequest(string jobId, string taskId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, string ocpRange, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateGetTaskFileRequest(string jobId, string taskId, string filePath, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, string ocpRange, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11938,15 +12062,15 @@ internal HttpMessage CreateGetTaskFileRequest(string jobId, string taskId, strin uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/octet-stream"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (ocpRange != null) { @@ -11959,7 +12083,7 @@ internal HttpMessage CreateGetTaskFileRequest(string jobId, string taskId, strin return message; } - internal HttpMessage CreateGetTaskFilePropertiesInternalRequest(string jobId, string taskId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateGetTaskFilePropertiesInternalRequest(string jobId, string taskId, string filePath, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -11975,15 +12099,15 @@ internal HttpMessage CreateGetTaskFilePropertiesInternalRequest(string jobId, st uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -11992,7 +12116,7 @@ internal HttpMessage CreateGetTaskFilePropertiesInternalRequest(string jobId, st return message; } - internal HttpMessage CreateGetTaskFilesRequest(string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, RequestContext context) + internal HttpMessage CreateGetTaskFilesRequest(string jobId, string taskId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, bool? recursive, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12007,7 +12131,7 @@ internal HttpMessage CreateGetTaskFilesRequest(string jobId, string taskId, int? uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (maxresults != null) { @@ -12025,14 +12149,14 @@ internal HttpMessage CreateGetTaskFilesRequest(string jobId, string taskId, int? request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateCreateNodeUserRequest(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateCreateNodeUserRequest(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier201); var request = message.Request; @@ -12047,22 +12171,22 @@ internal HttpMessage CreateCreateNodeUserRequest(string poolId, string nodeId, R uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); request.Content = content; return message; } - internal HttpMessage CreateDeleteNodeUserRequest(string poolId, string nodeId, string userName, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateDeleteNodeUserRequest(string poolId, string nodeId, string userName, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12078,20 +12202,20 @@ internal HttpMessage CreateDeleteNodeUserRequest(string poolId, string nodeId, s uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateReplaceNodeUserRequest(string poolId, string nodeId, string userName, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateReplaceNodeUserRequest(string poolId, string nodeId, string userName, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12107,22 +12231,22 @@ internal HttpMessage CreateReplaceNodeUserRequest(string poolId, string nodeId, uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); request.Content = content; return message; } - internal HttpMessage CreateGetNodeRequest(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + internal HttpMessage CreateGetNodeRequest(string poolId, string nodeId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12136,7 +12260,7 @@ internal HttpMessage CreateGetNodeRequest(string poolId, string nodeId, int? tim uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) { @@ -12146,14 +12270,14 @@ internal HttpMessage CreateGetNodeRequest(string poolId, string nodeId, int? tim request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateRebootNodeRequest(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateRebootNodeInternalRequest(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -12168,22 +12292,22 @@ internal HttpMessage CreateRebootNodeRequest(string poolId, string nodeId, Reque uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); request.Content = content; return message; } - internal HttpMessage CreateStartNodeRequest(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateStartNodeInternalRequest(string poolId, string nodeId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -12198,20 +12322,20 @@ internal HttpMessage CreateStartNodeRequest(string poolId, string nodeId, int? t uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateReimageNodeRequest(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateReimageNodeInternalRequest(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -12226,22 +12350,22 @@ internal HttpMessage CreateReimageNodeRequest(string poolId, string nodeId, Requ uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); request.Content = content; return message; } - internal HttpMessage CreateDeallocateNodeRequest(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateDeallocateNodeInternalRequest(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier202); var request = message.Request; @@ -12256,22 +12380,22 @@ internal HttpMessage CreateDeallocateNodeRequest(string poolId, string nodeId, R uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); request.Content = content; return message; } - internal HttpMessage CreateDisableNodeSchedulingRequest(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateDisableNodeSchedulingRequest(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12286,22 +12410,22 @@ internal HttpMessage CreateDisableNodeSchedulingRequest(string poolId, string no uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); request.Content = content; return message; } - internal HttpMessage CreateEnableNodeSchedulingRequest(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateEnableNodeSchedulingRequest(string poolId, string nodeId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12316,20 +12440,20 @@ internal HttpMessage CreateEnableNodeSchedulingRequest(string poolId, string nod uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetNodeRemoteLoginSettingsRequest(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateGetNodeRemoteLoginSettingsRequest(string poolId, string nodeId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12344,20 +12468,20 @@ internal HttpMessage CreateGetNodeRemoteLoginSettingsRequest(string poolId, stri uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateUploadNodeLogsRequest(string poolId, string nodeId, RequestContent content, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestContext context) + internal HttpMessage CreateUploadNodeLogsRequest(string poolId, string nodeId, RequestContent content, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12372,22 +12496,22 @@ internal HttpMessage CreateUploadNodeLogsRequest(string poolId, string nodeId, R uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } request.Headers.Add("Content-Type", "application/json; odata=minimalmetadata"); request.Content = content; return message; } - internal HttpMessage CreateGetNodesRequest(string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + internal HttpMessage CreateGetNodesRequest(string poolId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12400,7 +12524,7 @@ internal HttpMessage CreateGetNodesRequest(string poolId, int? timeOutInSeconds, uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (maxresults != null) { @@ -12418,14 +12542,14 @@ internal HttpMessage CreateGetNodesRequest(string poolId, int? timeOutInSeconds, request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetNodeExtensionRequest(string poolId, string nodeId, string extensionName, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + internal HttpMessage CreateGetNodeExtensionRequest(string poolId, string nodeId, string extensionName, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12441,7 +12565,7 @@ internal HttpMessage CreateGetNodeExtensionRequest(string poolId, string nodeId, uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (select != null && !(select is ChangeTrackingList changeTrackingList && changeTrackingList.IsUndefined)) { @@ -12451,14 +12575,14 @@ internal HttpMessage CreateGetNodeExtensionRequest(string poolId, string nodeId, request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetNodeExtensionsRequest(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, IEnumerable select, RequestContext context) + internal HttpMessage CreateGetNodeExtensionsRequest(string poolId, string nodeId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, IEnumerable select, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12473,7 +12597,7 @@ internal HttpMessage CreateGetNodeExtensionsRequest(string poolId, string nodeId uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (maxresults != null) { @@ -12487,14 +12611,14 @@ internal HttpMessage CreateGetNodeExtensionsRequest(string poolId, string nodeId request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateDeleteNodeFileRequest(string poolId, string nodeId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, bool? recursive, RequestContext context) + internal HttpMessage CreateDeleteNodeFileRequest(string poolId, string nodeId, string filePath, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, bool? recursive, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12510,7 +12634,7 @@ internal HttpMessage CreateDeleteNodeFileRequest(string poolId, string nodeId, s uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (recursive != null) { @@ -12520,14 +12644,14 @@ internal HttpMessage CreateDeleteNodeFileRequest(string poolId, string nodeId, s request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetNodeFileRequest(string poolId, string nodeId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, string ocpRange, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateGetNodeFileRequest(string poolId, string nodeId, string filePath, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, string ocpRange, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12543,15 +12667,15 @@ internal HttpMessage CreateGetNodeFileRequest(string poolId, string nodeId, stri uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/octet-stream"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (ocpRange != null) { @@ -12564,7 +12688,7 @@ internal HttpMessage CreateGetNodeFileRequest(string poolId, string nodeId, stri return message; } - internal HttpMessage CreateGetNodeFilePropertiesInternalRequest(string poolId, string nodeId, string filePath, int? timeOutInSeconds, DateTimeOffset? ocpdate, RequestConditions requestConditions, RequestContext context) + internal HttpMessage CreateGetNodeFilePropertiesInternalRequest(string poolId, string nodeId, string filePath, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, RequestConditions requestConditions, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12580,15 +12704,15 @@ internal HttpMessage CreateGetNodeFilePropertiesInternalRequest(string poolId, s uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } request.Uri = uri; request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } if (requestConditions != null) { @@ -12597,7 +12721,7 @@ internal HttpMessage CreateGetNodeFilePropertiesInternalRequest(string poolId, s return message; } - internal HttpMessage CreateGetNodeFilesRequest(string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, RequestContext context) + internal HttpMessage CreateGetNodeFilesRequest(string poolId, string nodeId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, bool? recursive, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12612,7 +12736,7 @@ internal HttpMessage CreateGetNodeFilesRequest(string poolId, string nodeId, int uri.AppendQuery("api-version", _apiVersion, true); if (timeOutInSeconds != null) { - uri.AppendQuery("timeOut", timeOutInSeconds.Value, true); + uri.AppendQuery("timeOut", timeOutInSeconds.Value, "%s", true); } if (maxresults != null) { @@ -12630,14 +12754,14 @@ internal HttpMessage CreateGetNodeFilesRequest(string poolId, string nodeId, int request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetApplicationsNextPageRequest(string nextLink, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, RequestContext context) + internal HttpMessage CreateGetApplicationsNextPageRequest(string nextLink, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12649,14 +12773,14 @@ internal HttpMessage CreateGetApplicationsNextPageRequest(string nextLink, int? request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetPoolUsageMetricsNextPageRequest(string nextLink, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, DateTimeOffset? starttime, DateTimeOffset? endtime, string filter, RequestContext context) + internal HttpMessage CreateGetPoolUsageMetricsNextPageRequest(string nextLink, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, DateTimeOffset? starttime, DateTimeOffset? endtime, string filter, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12668,14 +12792,14 @@ internal HttpMessage CreateGetPoolUsageMetricsNextPageRequest(string nextLink, i request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetPoolsNextPageRequest(string nextLink, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + internal HttpMessage CreateGetPoolsNextPageRequest(string nextLink, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12687,14 +12811,14 @@ internal HttpMessage CreateGetPoolsNextPageRequest(string nextLink, int? timeOut request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetSupportedImagesNextPageRequest(string nextLink, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, RequestContext context) + internal HttpMessage CreateGetSupportedImagesNextPageRequest(string nextLink, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12706,14 +12830,14 @@ internal HttpMessage CreateGetSupportedImagesNextPageRequest(string nextLink, in request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetPoolNodeCountsNextPageRequest(string nextLink, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, RequestContext context) + internal HttpMessage CreateGetPoolNodeCountsNextPageRequest(string nextLink, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12725,14 +12849,14 @@ internal HttpMessage CreateGetPoolNodeCountsNextPageRequest(string nextLink, int request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetJobsNextPageRequest(string nextLink, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + internal HttpMessage CreateGetJobsNextPageRequest(string nextLink, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12744,14 +12868,14 @@ internal HttpMessage CreateGetJobsNextPageRequest(string nextLink, int? timeOutI request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetJobsFromSchedulesNextPageRequest(string nextLink, string jobScheduleId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + internal HttpMessage CreateGetJobsFromSchedulesNextPageRequest(string nextLink, string jobScheduleId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12763,14 +12887,14 @@ internal HttpMessage CreateGetJobsFromSchedulesNextPageRequest(string nextLink, request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(string nextLink, string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + internal HttpMessage CreateGetJobPreparationAndReleaseTaskStatusesNextPageRequest(string nextLink, string jobId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12782,14 +12906,14 @@ internal HttpMessage CreateGetJobPreparationAndReleaseTaskStatusesNextPageReques request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetCertificatesNextPageRequest(string nextLink, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + internal HttpMessage CreateGetCertificatesNextPageRequest(string nextLink, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12801,14 +12925,14 @@ internal HttpMessage CreateGetCertificatesNextPageRequest(string nextLink, int? request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetJobSchedulesNextPageRequest(string nextLink, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + internal HttpMessage CreateGetJobSchedulesNextPageRequest(string nextLink, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12820,14 +12944,14 @@ internal HttpMessage CreateGetJobSchedulesNextPageRequest(string nextLink, int? request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetTasksNextPageRequest(string nextLink, string jobId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) + internal HttpMessage CreateGetTasksNextPageRequest(string nextLink, string jobId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, IEnumerable expand, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12839,14 +12963,14 @@ internal HttpMessage CreateGetTasksNextPageRequest(string nextLink, string jobId request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetSubTasksNextPageRequest(string nextLink, string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, IEnumerable select, RequestContext context) + internal HttpMessage CreateGetSubTasksNextPageRequest(string nextLink, string jobId, string taskId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, IEnumerable select, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12858,14 +12982,14 @@ internal HttpMessage CreateGetSubTasksNextPageRequest(string nextLink, string jo request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetTaskFilesNextPageRequest(string nextLink, string jobId, string taskId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, RequestContext context) + internal HttpMessage CreateGetTaskFilesNextPageRequest(string nextLink, string jobId, string taskId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, bool? recursive, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12877,14 +13001,14 @@ internal HttpMessage CreateGetTaskFilesNextPageRequest(string nextLink, string j request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetNodesNextPageRequest(string nextLink, string poolId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, IEnumerable select, RequestContext context) + internal HttpMessage CreateGetNodesNextPageRequest(string nextLink, string poolId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, IEnumerable select, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12896,14 +13020,14 @@ internal HttpMessage CreateGetNodesNextPageRequest(string nextLink, string poolI request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetNodeExtensionsNextPageRequest(string nextLink, string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, IEnumerable select, RequestContext context) + internal HttpMessage CreateGetNodeExtensionsNextPageRequest(string nextLink, string poolId, string nodeId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, IEnumerable select, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12915,14 +13039,14 @@ internal HttpMessage CreateGetNodeExtensionsNextPageRequest(string nextLink, str request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } - internal HttpMessage CreateGetNodeFilesNextPageRequest(string nextLink, string poolId, string nodeId, int? timeOutInSeconds, DateTimeOffset? ocpdate, int? maxresults, string filter, bool? recursive, RequestContext context) + internal HttpMessage CreateGetNodeFilesNextPageRequest(string nextLink, string poolId, string nodeId, TimeSpan? timeOutInSeconds, DateTimeOffset? ocpDate, int? maxresults, string filter, bool? recursive, RequestContext context) { var message = _pipeline.CreateMessage(context, ResponseClassifier200); var request = message.Request; @@ -12934,9 +13058,9 @@ internal HttpMessage CreateGetNodeFilesNextPageRequest(string nextLink, string p request.Headers.Add("Accept", "application/json"); request.Headers.Add("client-request-id", message.Request.ClientRequestId); request.Headers.Add("return-client-request-id", "true"); - if (ocpdate != null) + if (ocpDate != null) { - request.Headers.Add("ocp-date", ocpdate.Value, "R"); + request.Headers.Add("ocp-date", ocpDate.Value, "R"); } return message; } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchContainerConfiguration.Serialization.cs similarity index 72% rename from sdk/batch/Azure.Compute.Batch/src/Generated/ContainerConfiguration.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchContainerConfiguration.Serialization.cs index f2de574f29f5..5194c74431ec 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerConfiguration.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchContainerConfiguration.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class ContainerConfiguration : IUtf8JsonSerializable, IJsonModel + public partial class BatchContainerConfiguration : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReader /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(ContainerConfiguration)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchContainerConfiguration)} does not support writing '{format}' format."); } writer.WritePropertyName("type"u8); @@ -73,19 +73,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - ContainerConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchContainerConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(ContainerConfiguration)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchContainerConfiguration)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeContainerConfiguration(document.RootElement, options); + return DeserializeBatchContainerConfiguration(document.RootElement, options); } - internal static ContainerConfiguration DeserializeContainerConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchContainerConfiguration DeserializeBatchContainerConfiguration(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -139,46 +139,46 @@ internal static ContainerConfiguration DeserializeContainerConfiguration(JsonEle } } serializedAdditionalRawData = rawDataDictionary; - return new ContainerConfiguration(type, containerImageNames ?? new ChangeTrackingList(), containerRegistries ?? new ChangeTrackingList(), serializedAdditionalRawData); + return new BatchContainerConfiguration(type, containerImageNames ?? new ChangeTrackingList(), containerRegistries ?? new ChangeTrackingList(), serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(ContainerConfiguration)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchContainerConfiguration)} does not support writing '{options.Format}' format."); } } - ContainerConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchContainerConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeContainerConfiguration(document.RootElement, options); + return DeserializeBatchContainerConfiguration(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(ContainerConfiguration)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchContainerConfiguration)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static ContainerConfiguration FromResponse(Response response) + internal static BatchContainerConfiguration FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeContainerConfiguration(document.RootElement); + return DeserializeBatchContainerConfiguration(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchContainerConfiguration.cs similarity index 83% rename from sdk/batch/Azure.Compute.Batch/src/Generated/ContainerConfiguration.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchContainerConfiguration.cs index dbcc48aa2857..5e697161c522 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerConfiguration.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchContainerConfiguration.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// The configuration for container-enabled Pools. - public partial class ContainerConfiguration + public partial class BatchContainerConfiguration { /// /// Keeps track of any properties unknown to the library. @@ -45,21 +45,21 @@ public partial class ContainerConfiguration /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// The container technology to be used. - public ContainerConfiguration(ContainerType type) + public BatchContainerConfiguration(ContainerType type) { Type = type; ContainerImageNames = new ChangeTrackingList(); ContainerRegistries = new ChangeTrackingList(); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The container technology to be used. /// The collection of container Image names. This is the full Image reference, as would be specified to "docker pull". An Image will be sourced from the default Docker registry unless the Image is fully qualified with an alternative registry. /// Additional private registries from which containers can be pulled. If any Images must be downloaded from a private registry which requires credentials, then those credentials must be provided here. /// Keeps track of any properties unknown to the library. - internal ContainerConfiguration(ContainerType type, IList containerImageNames, IList containerRegistries, IDictionary serializedAdditionalRawData) + internal BatchContainerConfiguration(ContainerType type, IList containerImageNames, IList containerRegistries, IDictionary serializedAdditionalRawData) { Type = type; ContainerImageNames = containerImageNames; @@ -67,8 +67,8 @@ internal ContainerConfiguration(ContainerType type, IList containerImage _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal ContainerConfiguration() + /// Initializes a new instance of for deserialization. + internal BatchContainerConfiguration() { } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddCollectionResult.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCreateTaskCollectionResult.Serialization.cs similarity index 64% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddCollectionResult.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchCreateTaskCollectionResult.Serialization.cs index e23776c37f4e..acdb9084dbe6 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddCollectionResult.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCreateTaskCollectionResult.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchTaskAddCollectionResult : IUtf8JsonSerializable, IJsonModel + public partial class BatchCreateTaskCollectionResult : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,17 +28,17 @@ void IJsonModel.Write(Utf8JsonWriter writer, Model /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchTaskAddCollectionResult)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchCreateTaskCollectionResult)} does not support writing '{format}' format."); } - if (Optional.IsCollectionDefined(Value)) + if (Optional.IsCollectionDefined(Values)) { writer.WritePropertyName("value"u8); writer.WriteStartArray(); - foreach (var item in Value) + foreach (var item in Values) { writer.WriteObjectValue(item, options); } @@ -61,19 +61,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchTaskAddCollectionResult IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchCreateTaskCollectionResult IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchTaskAddCollectionResult)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchCreateTaskCollectionResult)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchTaskAddCollectionResult(document.RootElement, options); + return DeserializeBatchCreateTaskCollectionResult(document.RootElement, options); } - internal static BatchTaskAddCollectionResult DeserializeBatchTaskAddCollectionResult(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchCreateTaskCollectionResult DeserializeBatchCreateTaskCollectionResult(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -81,7 +81,7 @@ internal static BatchTaskAddCollectionResult DeserializeBatchTaskAddCollectionRe { return null; } - IReadOnlyList value = default; + IReadOnlyList value = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -92,10 +92,10 @@ internal static BatchTaskAddCollectionResult DeserializeBatchTaskAddCollectionRe { continue; } - List array = new List(); + List array = new List(); foreach (var item in property.Value.EnumerateArray()) { - array.Add(BatchTaskAddResult.DeserializeBatchTaskAddResult(item, options)); + array.Add(BatchTaskCreateResult.DeserializeBatchTaskCreateResult(item, options)); } value = array; continue; @@ -106,46 +106,46 @@ internal static BatchTaskAddCollectionResult DeserializeBatchTaskAddCollectionRe } } serializedAdditionalRawData = rawDataDictionary; - return new BatchTaskAddCollectionResult(value ?? new ChangeTrackingList(), serializedAdditionalRawData); + return new BatchCreateTaskCollectionResult(value ?? new ChangeTrackingList(), serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchTaskAddCollectionResult)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchCreateTaskCollectionResult)} does not support writing '{options.Format}' format."); } } - BatchTaskAddCollectionResult IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchCreateTaskCollectionResult IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchTaskAddCollectionResult(document.RootElement, options); + return DeserializeBatchCreateTaskCollectionResult(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchTaskAddCollectionResult)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchCreateTaskCollectionResult)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchTaskAddCollectionResult FromResponse(Response response) + internal static BatchCreateTaskCollectionResult FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchTaskAddCollectionResult(document.RootElement); + return DeserializeBatchCreateTaskCollectionResult(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddCollectionResult.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCreateTaskCollectionResult.cs similarity index 71% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddCollectionResult.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchCreateTaskCollectionResult.cs index 94824c7958c0..20a838edf02c 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddCollectionResult.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchCreateTaskCollectionResult.cs @@ -10,8 +10,8 @@ namespace Azure.Compute.Batch { - /// The result of adding a collection of Tasks to a Job. - public partial class BatchTaskAddCollectionResult + /// The result of creating a collection of Tasks to a Job. + public partial class BatchCreateTaskCollectionResult { /// /// Keeps track of any properties unknown to the library. @@ -45,22 +45,22 @@ public partial class BatchTaskAddCollectionResult /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - internal BatchTaskAddCollectionResult() + /// Initializes a new instance of . + internal BatchCreateTaskCollectionResult() { - Value = new ChangeTrackingList(); + Values = new ChangeTrackingList(); } - /// Initializes a new instance of . - /// The results of the add Task collection operation. + /// Initializes a new instance of . + /// The results of the create Task collection operation. /// Keeps track of any properties unknown to the library. - internal BatchTaskAddCollectionResult(IReadOnlyList value, IDictionary serializedAdditionalRawData) + internal BatchCreateTaskCollectionResult(IReadOnlyList values, IDictionary serializedAdditionalRawData) { - Value = value; + Values = values; _serializedAdditionalRawData = serializedAdditionalRawData; } - /// The results of the add Task collection operation. - public IReadOnlyList Value { get; } + /// The results of the create Task collection operation. + public IReadOnlyList Values { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskSettings.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchDiffDiskSettings.Serialization.cs similarity index 68% rename from sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskSettings.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchDiffDiskSettings.Serialization.cs index 21f8d64c71cb..feca3f38431b 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskSettings.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchDiffDiskSettings.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class DiffDiskSettings : IUtf8JsonSerializable, IJsonModel + public partial class BatchDiffDiskSettings : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriter /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(DiffDiskSettings)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchDiffDiskSettings)} does not support writing '{format}' format."); } if (Optional.IsDefined(Placement)) @@ -56,19 +56,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - DiffDiskSettings IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchDiffDiskSettings IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(DiffDiskSettings)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchDiffDiskSettings)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeDiffDiskSettings(document.RootElement, options); + return DeserializeBatchDiffDiskSettings(document.RootElement, options); } - internal static DiffDiskSettings DeserializeDiffDiskSettings(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchDiffDiskSettings DeserializeBatchDiffDiskSettings(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -96,46 +96,46 @@ internal static DiffDiskSettings DeserializeDiffDiskSettings(JsonElement element } } serializedAdditionalRawData = rawDataDictionary; - return new DiffDiskSettings(placement, serializedAdditionalRawData); + return new BatchDiffDiskSettings(placement, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(DiffDiskSettings)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchDiffDiskSettings)} does not support writing '{options.Format}' format."); } } - DiffDiskSettings IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchDiffDiskSettings IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeDiffDiskSettings(document.RootElement, options); + return DeserializeBatchDiffDiskSettings(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(DiffDiskSettings)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchDiffDiskSettings)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static DiffDiskSettings FromResponse(Response response) + internal static BatchDiffDiskSettings FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeDiffDiskSettings(document.RootElement); + return DeserializeBatchDiffDiskSettings(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskSettings.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchDiffDiskSettings.cs similarity index 88% rename from sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskSettings.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchDiffDiskSettings.cs index 0ae2222fd149..440c76a3a4a6 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/DiffDiskSettings.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchDiffDiskSettings.cs @@ -14,7 +14,7 @@ namespace Azure.Compute.Batch /// Specifies the ephemeral Disk Settings for the operating system disk used by the /// compute node (VM). /// - public partial class DiffDiskSettings + public partial class BatchDiffDiskSettings { /// /// Keeps track of any properties unknown to the library. @@ -48,15 +48,15 @@ public partial class DiffDiskSettings /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - public DiffDiskSettings() + /// Initializes a new instance of . + public BatchDiffDiskSettings() { } - /// Initializes a new instance of . + /// Initializes a new instance of . /// Specifies the ephemeral disk placement for operating system disk for all VMs in the pool. This property can be used by user in the request to choose the location e.g., cache disk space for Ephemeral OS disk provisioning. For more information on Ephemeral OS disk size requirements, please refer to Ephemeral OS disk size requirements for Windows VMs at https://learn.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VMs at https://learn.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements. /// Keeps track of any properties unknown to the library. - internal DiffDiskSettings(DiffDiskPlacement? placement, IDictionary serializedAdditionalRawData) + internal BatchDiffDiskSettings(DiffDiskPlacement? placement, IDictionary serializedAdditionalRawData) { Placement = placement; _serializedAdditionalRawData = serializedAdditionalRawData; diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchErrorSourceCategory.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchErrorSourceCategory.cs new file mode 100644 index 000000000000..d9e984283ff3 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchErrorSourceCategory.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// BatchErrorSourceCategory enums. + public readonly partial struct BatchErrorSourceCategory : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchErrorSourceCategory(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string UserErrorValue = "usererror"; + private const string ServerErrorValue = "servererror"; + + /// The error is due to a user issue, such as misconfiguration. + public static BatchErrorSourceCategory UserError { get; } = new BatchErrorSourceCategory(UserErrorValue); + /// The error is due to an internal server issue. + public static BatchErrorSourceCategory ServerError { get; } = new BatchErrorSourceCategory(ServerErrorValue); + /// Determines if two values are the same. + public static bool operator ==(BatchErrorSourceCategory left, BatchErrorSourceCategory right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchErrorSourceCategory left, BatchErrorSourceCategory right) => !left.Equals(right); + /// Converts a to a . + public static implicit operator BatchErrorSourceCategory(string value) => new BatchErrorSourceCategory(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchErrorSourceCategory other && Equals(other); + /// + public bool Equals(BatchErrorSourceCategory other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/InboundNatPool.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchInboundNatPool.Serialization.cs similarity index 77% rename from sdk/batch/Azure.Compute.Batch/src/Generated/InboundNatPool.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchInboundNatPool.Serialization.cs index a89dc631312d..02f30915ed29 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/InboundNatPool.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchInboundNatPool.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class InboundNatPool : IUtf8JsonSerializable, IJsonModel + public partial class BatchInboundNatPool : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOp /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(InboundNatPool)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchInboundNatPool)} does not support writing '{format}' format."); } writer.WritePropertyName("name"u8); @@ -71,19 +71,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - InboundNatPool IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchInboundNatPool IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(InboundNatPool)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchInboundNatPool)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeInboundNatPool(document.RootElement, options); + return DeserializeBatchInboundNatPool(document.RootElement, options); } - internal static InboundNatPool DeserializeInboundNatPool(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchInboundNatPool DeserializeBatchInboundNatPool(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -146,7 +146,7 @@ internal static InboundNatPool DeserializeInboundNatPool(JsonElement element, Mo } } serializedAdditionalRawData = rawDataDictionary; - return new InboundNatPool( + return new BatchInboundNatPool( name, protocol, backendPort, @@ -156,43 +156,43 @@ internal static InboundNatPool DeserializeInboundNatPool(JsonElement element, Mo serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(InboundNatPool)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchInboundNatPool)} does not support writing '{options.Format}' format."); } } - InboundNatPool IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchInboundNatPool IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeInboundNatPool(document.RootElement, options); + return DeserializeBatchInboundNatPool(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(InboundNatPool)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchInboundNatPool)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static InboundNatPool FromResponse(Response response) + internal static BatchInboundNatPool FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeInboundNatPool(document.RootElement); + return DeserializeBatchInboundNatPool(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/InboundNatPool.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchInboundNatPool.cs similarity index 91% rename from sdk/batch/Azure.Compute.Batch/src/Generated/InboundNatPool.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchInboundNatPool.cs index ec5da9c432cd..0e718fab56b8 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/InboundNatPool.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchInboundNatPool.cs @@ -14,7 +14,7 @@ namespace Azure.Compute.Batch /// A inbound NAT Pool that can be used to address specific ports on Compute Nodes /// in a Batch Pool externally. /// - public partial class InboundNatPool + public partial class BatchInboundNatPool { /// /// Keeps track of any properties unknown to the library. @@ -48,14 +48,14 @@ public partial class InboundNatPool /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. /// The protocol of the endpoint. /// The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. /// The first port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. /// The last port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. /// is null. - public InboundNatPool(string name, InboundEndpointProtocol protocol, int backendPort, int frontendPortRangeStart, int frontendPortRangeEnd) + public BatchInboundNatPool(string name, InboundEndpointProtocol protocol, int backendPort, int frontendPortRangeStart, int frontendPortRangeEnd) { Argument.AssertNotNull(name, nameof(name)); @@ -67,7 +67,7 @@ public InboundNatPool(string name, InboundEndpointProtocol protocol, int backend NetworkSecurityGroupRules = new ChangeTrackingList(); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The name of the endpoint. The name must be unique within a Batch Pool, can contain letters, numbers, underscores, periods, and hyphens. Names must start with a letter or number, must end with a letter, number, or underscore, and cannot exceed 77 characters. If any invalid values are provided the request fails with HTTP status code 400. /// The protocol of the endpoint. /// The port number on the Compute Node. This must be unique within a Batch Pool. Acceptable values are between 1 and 65535 except for 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. @@ -75,7 +75,7 @@ public InboundNatPool(string name, InboundEndpointProtocol protocol, int backend /// The last port number in the range of external ports that will be used to provide inbound access to the backendPort on individual Compute Nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a Pool must be distinct and cannot overlap. Each range must contain at least 40 ports. If any reserved or overlapping values are provided the request fails with HTTP status code 400. /// A list of network security group rules that will be applied to the endpoint. The maximum number of rules that can be specified across all the endpoints on a Batch Pool is 25. If no network security group rules are specified, a default rule will be created to allow inbound access to the specified backendPort. If the maximum number of network security group rules is exceeded the request fails with HTTP status code 400. /// Keeps track of any properties unknown to the library. - internal InboundNatPool(string name, InboundEndpointProtocol protocol, int backendPort, int frontendPortRangeStart, int frontendPortRangeEnd, IList networkSecurityGroupRules, IDictionary serializedAdditionalRawData) + internal BatchInboundNatPool(string name, InboundEndpointProtocol protocol, int backendPort, int frontendPortRangeStart, int frontendPortRangeEnd, IList networkSecurityGroupRules, IDictionary serializedAdditionalRawData) { Name = name; Protocol = protocol; @@ -86,8 +86,8 @@ internal InboundNatPool(string name, InboundEndpointProtocol protocol, int backe _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal InboundNatPool() + /// Initializes a new instance of for deserialization. + internal BatchInboundNatPool() { } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.Serialization.cs index a92864d9ca5b..d60e5aeaa038 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.Serialization.cs @@ -49,15 +49,15 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("usesTaskDependencies"u8); writer.WriteBooleanValue(UsesTaskDependencies.Value); } - if (options.Format != "W" && Optional.IsDefined(Url)) + if (options.Format != "W" && Optional.IsDefined(Uri)) { writer.WritePropertyName("url"u8); - writer.WriteStringValue(Url); + writer.WriteStringValue(Uri.AbsoluteUri); } if (options.Format != "W" && Optional.IsDefined(ETag)) { writer.WritePropertyName("eTag"u8); - writer.WriteStringValue(ETag); + writer.WriteStringValue(ETag.Value.ToString()); } if (options.Format != "W" && Optional.IsDefined(LastModified)) { @@ -136,15 +136,15 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } writer.WritePropertyName("poolInfo"u8); writer.WriteObjectValue(PoolInfo, options); - if (Optional.IsDefined(OnAllTasksComplete)) + if (Optional.IsDefined(AllTasksCompleteMode)) { writer.WritePropertyName("onAllTasksComplete"u8); - writer.WriteStringValue(OnAllTasksComplete.Value.ToString()); + writer.WriteStringValue(AllTasksCompleteMode.Value.ToString()); } - if (options.Format != "W" && Optional.IsDefined(OnTaskFailure)) + if (options.Format != "W" && Optional.IsDefined(TaskFailureMode)) { writer.WritePropertyName("onTaskFailure"u8); - writer.WriteStringValue(OnTaskFailure.Value.ToString()); + writer.WriteStringValue(TaskFailureMode.Value.ToString()); } if (options.Format != "W" && Optional.IsDefined(NetworkConfiguration)) { @@ -166,10 +166,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("executionInfo"u8); writer.WriteObjectValue(ExecutionInfo, options); } - if (options.Format != "W" && Optional.IsDefined(Stats)) + if (options.Format != "W" && Optional.IsDefined(JobStatistics)) { writer.WritePropertyName("stats"u8); - writer.WriteObjectValue(Stats, options); + writer.WriteObjectValue(JobStatistics, options); } if (options.Format != "W" && _serializedAdditionalRawData != null) { @@ -211,8 +211,8 @@ internal static BatchJob DeserializeBatchJob(JsonElement element, ModelReaderWri string id = default; string displayName = default; bool? usesTaskDependencies = default; - string url = default; - string eTag = default; + Uri url = default; + ETag? eTag = default; DateTimeOffset? lastModified = default; DateTimeOffset? creationTime = default; BatchJobState? state = default; @@ -228,10 +228,10 @@ internal static BatchJob DeserializeBatchJob(JsonElement element, ModelReaderWri BatchJobReleaseTask jobReleaseTask = default; IReadOnlyList commonEnvironmentSettings = default; BatchPoolInfo poolInfo = default; - OnAllBatchTasksComplete? onAllTasksComplete = default; - OnBatchTaskFailure? onTaskFailure = default; + BatchAllTasksCompleteMode? onAllTasksComplete = default; + BatchTaskFailureMode? onTaskFailure = default; BatchJobNetworkConfiguration networkConfiguration = default; - IList metadata = default; + IList metadata = default; BatchJobExecutionInfo executionInfo = default; BatchJobStatistics stats = default; IDictionary serializedAdditionalRawData = default; @@ -259,12 +259,20 @@ internal static BatchJob DeserializeBatchJob(JsonElement element, ModelReaderWri } if (property.NameEquals("url"u8)) { - url = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + url = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("eTag"u8)) { - eTag = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + eTag = new ETag(property.Value.GetString()); continue; } if (property.NameEquals("lastModified"u8)) @@ -409,7 +417,7 @@ internal static BatchJob DeserializeBatchJob(JsonElement element, ModelReaderWri { continue; } - onAllTasksComplete = new OnAllBatchTasksComplete(property.Value.GetString()); + onAllTasksComplete = new BatchAllTasksCompleteMode(property.Value.GetString()); continue; } if (property.NameEquals("onTaskFailure"u8)) @@ -418,7 +426,7 @@ internal static BatchJob DeserializeBatchJob(JsonElement element, ModelReaderWri { continue; } - onTaskFailure = new OnBatchTaskFailure(property.Value.GetString()); + onTaskFailure = new BatchTaskFailureMode(property.Value.GetString()); continue; } if (property.NameEquals("networkConfiguration"u8)) @@ -436,10 +444,10 @@ internal static BatchJob DeserializeBatchJob(JsonElement element, ModelReaderWri { continue; } - List array = new List(); + List array = new List(); foreach (var item in property.Value.EnumerateArray()) { - array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + array.Add(BatchMetadataItem.DeserializeBatchMetadataItem(item, options)); } metadata = array; continue; @@ -492,7 +500,7 @@ internal static BatchJob DeserializeBatchJob(JsonElement element, ModelReaderWri onAllTasksComplete, onTaskFailure, networkConfiguration, - metadata ?? new ChangeTrackingList(), + metadata ?? new ChangeTrackingList(), executionInfo, stats, serializedAdditionalRawData); diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.cs index 3084acc36928..9be8090d6d21 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJob.cs @@ -54,14 +54,14 @@ public BatchJob(BatchPoolInfo poolInfo) CommonEnvironmentSettings = new ChangeTrackingList(); PoolInfo = poolInfo; - Metadata = new ChangeTrackingList(); + Metadata = new ChangeTrackingList(); } /// Initializes a new instance of . /// A string that uniquely identifies the Job within the Account. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). /// The display name for the Job. /// Whether Tasks in the Job can define dependencies on each other. The default is false. - /// The URL of the Job. + /// The URL of the Job. /// The ETag of the Job. This is an opaque string. You can use it to detect whether the Job has changed between requests. In particular, you can be pass the ETag when updating a Job to specify that your changes should take effect only if nobody else has modified the Job in the meantime. /// The last modified time of the Job. This is the last time at which the Job level data, such as the Job state or priority, changed. It does not factor in task-level changes such as adding new Tasks or Tasks changing state. /// The creation time of the Job. @@ -78,19 +78,19 @@ public BatchJob(BatchPoolInfo poolInfo) /// The Job Release Task. The Job Release Task is a special Task run at the end of the Job on each Compute Node that has run any other Task of the Job. /// The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. /// The Pool settings associated with the Job. - /// The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. - /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + /// The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. + /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. /// The network configuration for the Job. /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. /// The execution information for the Job. - /// Resource usage statistics for the entire lifetime of the Job. This property is populated only if the BatchJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// Resource usage statistics for the entire lifetime of the Job. This property is populated only if the BatchJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. /// Keeps track of any properties unknown to the library. - internal BatchJob(string id, string displayName, bool? usesTaskDependencies, string url, string eTag, DateTimeOffset? lastModified, DateTimeOffset? creationTime, BatchJobState? state, DateTimeOffset? stateTransitionTime, BatchJobState? previousState, DateTimeOffset? previousStateTransitionTime, int? priority, bool? allowTaskPreemption, int? maxParallelTasks, BatchJobConstraints constraints, BatchJobManagerTask jobManagerTask, BatchJobPreparationTask jobPreparationTask, BatchJobReleaseTask jobReleaseTask, IReadOnlyList commonEnvironmentSettings, BatchPoolInfo poolInfo, OnAllBatchTasksComplete? onAllTasksComplete, OnBatchTaskFailure? onTaskFailure, BatchJobNetworkConfiguration networkConfiguration, IList metadata, BatchJobExecutionInfo executionInfo, BatchJobStatistics stats, IDictionary serializedAdditionalRawData) + internal BatchJob(string id, string displayName, bool? usesTaskDependencies, Uri uri, ETag? eTag, DateTimeOffset? lastModified, DateTimeOffset? creationTime, BatchJobState? state, DateTimeOffset? stateTransitionTime, BatchJobState? previousState, DateTimeOffset? previousStateTransitionTime, int? priority, bool? allowTaskPreemption, int? maxParallelTasks, BatchJobConstraints constraints, BatchJobManagerTask jobManagerTask, BatchJobPreparationTask jobPreparationTask, BatchJobReleaseTask jobReleaseTask, IReadOnlyList commonEnvironmentSettings, BatchPoolInfo poolInfo, BatchAllTasksCompleteMode? allTasksCompleteMode, BatchTaskFailureMode? taskFailureMode, BatchJobNetworkConfiguration networkConfiguration, IList metadata, BatchJobExecutionInfo executionInfo, BatchJobStatistics jobStatistics, IDictionary serializedAdditionalRawData) { Id = id; DisplayName = displayName; UsesTaskDependencies = usesTaskDependencies; - Url = url; + Uri = uri; ETag = eTag; LastModified = lastModified; CreationTime = creationTime; @@ -107,12 +107,12 @@ internal BatchJob(string id, string displayName, bool? usesTaskDependencies, str JobReleaseTask = jobReleaseTask; CommonEnvironmentSettings = commonEnvironmentSettings; PoolInfo = poolInfo; - OnAllTasksComplete = onAllTasksComplete; - OnTaskFailure = onTaskFailure; + AllTasksCompleteMode = allTasksCompleteMode; + TaskFailureMode = taskFailureMode; NetworkConfiguration = networkConfiguration; Metadata = metadata; ExecutionInfo = executionInfo; - Stats = stats; + JobStatistics = jobStatistics; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -128,9 +128,9 @@ internal BatchJob() /// Whether Tasks in the Job can define dependencies on each other. The default is false. public bool? UsesTaskDependencies { get; } /// The URL of the Job. - public string Url { get; } + public Uri Uri { get; } /// The ETag of the Job. This is an opaque string. You can use it to detect whether the Job has changed between requests. In particular, you can be pass the ETag when updating a Job to specify that your changes should take effect only if nobody else has modified the Job in the meantime. - public string ETag { get; } + public ETag? ETag { get; } /// The last modified time of the Job. This is the last time at which the Job level data, such as the Job state or priority, changed. It does not factor in task-level changes such as adding new Tasks or Tasks changing state. public DateTimeOffset? LastModified { get; } /// The creation time of the Job. @@ -162,16 +162,16 @@ internal BatchJob() /// The Pool settings associated with the Job. public BatchPoolInfo PoolInfo { get; set; } /// The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. - public OnAllBatchTasksComplete? OnAllTasksComplete { get; set; } + public BatchAllTasksCompleteMode? AllTasksCompleteMode { get; set; } /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. - public OnBatchTaskFailure? OnTaskFailure { get; } + public BatchTaskFailureMode? TaskFailureMode { get; } /// The network configuration for the Job. public BatchJobNetworkConfiguration NetworkConfiguration { get; } /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - public IList Metadata { get; } + public IList Metadata { get; } /// The execution information for the Job. public BatchJobExecutionInfo ExecutionInfo { get; } /// Resource usage statistics for the entire lifetime of the Job. This property is populated only if the BatchJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. - public BatchJobStatistics Stats { get; } + public BatchJobStatistics JobStatistics { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobAction.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobActionKind.cs similarity index 61% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobAction.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobActionKind.cs index f5ef8f5b32ae..d5d9ae83a361 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobAction.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobActionKind.cs @@ -11,13 +11,13 @@ namespace Azure.Compute.Batch { /// BatchJobAction enums. - public readonly partial struct BatchJobAction : IEquatable + public readonly partial struct BatchJobActionKind : IEquatable { private readonly string _value; - /// Initializes a new instance of . + /// Initializes a new instance of . /// is null. - public BatchJobAction(string value) + public BatchJobActionKind(string value) { _value = value ?? throw new ArgumentNullException(nameof(value)); } @@ -27,23 +27,23 @@ public BatchJobAction(string value) private const string TerminateValue = "terminate"; /// Take no action. - public static BatchJobAction None { get; } = new BatchJobAction(NoneValue); + public static BatchJobActionKind None { get; } = new BatchJobActionKind(NoneValue); /// Disable the Job. This is equivalent to calling the disable Job API, with a disableTasks value of requeue. - public static BatchJobAction Disable { get; } = new BatchJobAction(DisableValue); + public static BatchJobActionKind Disable { get; } = new BatchJobActionKind(DisableValue); /// Terminate the Job. The terminationReason in the Job's executionInfo is set to "TaskFailed". - public static BatchJobAction Terminate { get; } = new BatchJobAction(TerminateValue); - /// Determines if two values are the same. - public static bool operator ==(BatchJobAction left, BatchJobAction right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(BatchJobAction left, BatchJobAction right) => !left.Equals(right); - /// Converts a to a . - public static implicit operator BatchJobAction(string value) => new BatchJobAction(value); + public static BatchJobActionKind Terminate { get; } = new BatchJobActionKind(TerminateValue); + /// Determines if two values are the same. + public static bool operator ==(BatchJobActionKind left, BatchJobActionKind right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchJobActionKind left, BatchJobActionKind right) => !left.Equals(right); + /// Converts a to a . + public static implicit operator BatchJobActionKind(string value) => new BatchJobActionKind(value); /// [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is BatchJobAction other && Equals(other); + public override bool Equals(object obj) => obj is BatchJobActionKind other && Equals(other); /// - public bool Equals(BatchJobAction other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + public bool Equals(BatchJobActionKind other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); /// [EditorBrowsable(EditorBrowsableState.Never)] diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateOptions.Serialization.cs similarity index 85% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateOptions.Serialization.cs index 2c05e637915d..b9191e19cb47 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchJobCreateContent : IUtf8JsonSerializable, IJsonModel + public partial class BatchJobCreateOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderW /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchJobCreateContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchJobCreateOptions)} does not support writing '{format}' format."); } writer.WritePropertyName("id"u8); @@ -93,15 +93,15 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } writer.WritePropertyName("poolInfo"u8); writer.WriteObjectValue(PoolInfo, options); - if (Optional.IsDefined(OnAllTasksComplete)) + if (Optional.IsDefined(AllTasksCompleteMode)) { writer.WritePropertyName("onAllTasksComplete"u8); - writer.WriteStringValue(OnAllTasksComplete.Value.ToString()); + writer.WriteStringValue(AllTasksCompleteMode.Value.ToString()); } - if (Optional.IsDefined(OnTaskFailure)) + if (Optional.IsDefined(TaskFailureMode)) { writer.WritePropertyName("onTaskFailure"u8); - writer.WriteStringValue(OnTaskFailure.Value.ToString()); + writer.WriteStringValue(TaskFailureMode.Value.ToString()); } if (Optional.IsDefined(NetworkConfiguration)) { @@ -135,19 +135,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchJobCreateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchJobCreateOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchJobCreateContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchJobCreateOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchJobCreateContent(document.RootElement, options); + return DeserializeBatchJobCreateOptions(document.RootElement, options); } - internal static BatchJobCreateContent DeserializeBatchJobCreateContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchJobCreateOptions DeserializeBatchJobCreateOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -167,10 +167,10 @@ internal static BatchJobCreateContent DeserializeBatchJobCreateContent(JsonEleme BatchJobReleaseTask jobReleaseTask = default; IList commonEnvironmentSettings = default; BatchPoolInfo poolInfo = default; - OnAllBatchTasksComplete? onAllTasksComplete = default; - OnBatchTaskFailure? onTaskFailure = default; + BatchAllTasksCompleteMode? onAllTasksComplete = default; + BatchTaskFailureMode? onTaskFailure = default; BatchJobNetworkConfiguration networkConfiguration = default; - IList metadata = default; + IList metadata = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -282,7 +282,7 @@ internal static BatchJobCreateContent DeserializeBatchJobCreateContent(JsonEleme { continue; } - onAllTasksComplete = new OnAllBatchTasksComplete(property.Value.GetString()); + onAllTasksComplete = new BatchAllTasksCompleteMode(property.Value.GetString()); continue; } if (property.NameEquals("onTaskFailure"u8)) @@ -291,7 +291,7 @@ internal static BatchJobCreateContent DeserializeBatchJobCreateContent(JsonEleme { continue; } - onTaskFailure = new OnBatchTaskFailure(property.Value.GetString()); + onTaskFailure = new BatchTaskFailureMode(property.Value.GetString()); continue; } if (property.NameEquals("networkConfiguration"u8)) @@ -309,10 +309,10 @@ internal static BatchJobCreateContent DeserializeBatchJobCreateContent(JsonEleme { continue; } - List array = new List(); + List array = new List(); foreach (var item in property.Value.EnumerateArray()) { - array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + array.Add(BatchMetadataItem.DeserializeBatchMetadataItem(item, options)); } metadata = array; continue; @@ -323,7 +323,7 @@ internal static BatchJobCreateContent DeserializeBatchJobCreateContent(JsonEleme } } serializedAdditionalRawData = rawDataDictionary; - return new BatchJobCreateContent( + return new BatchJobCreateOptions( id, displayName, usesTaskDependencies, @@ -339,47 +339,47 @@ internal static BatchJobCreateContent DeserializeBatchJobCreateContent(JsonEleme onAllTasksComplete, onTaskFailure, networkConfiguration, - metadata ?? new ChangeTrackingList(), + metadata ?? new ChangeTrackingList(), serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchJobCreateContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchJobCreateOptions)} does not support writing '{options.Format}' format."); } } - BatchJobCreateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchJobCreateOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchJobCreateContent(document.RootElement, options); + return DeserializeBatchJobCreateOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchJobCreateContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchJobCreateOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchJobCreateContent FromResponse(Response response) + internal static BatchJobCreateOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchJobCreateContent(document.RootElement); + return DeserializeBatchJobCreateOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateOptions.cs similarity index 87% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateOptions.cs index ea1cd55c27ed..36c63ea63d24 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobCreateOptions.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Parameters for creating an Azure Batch Job. - public partial class BatchJobCreateContent + public partial class BatchJobCreateOptions { /// /// Keeps track of any properties unknown to the library. @@ -45,11 +45,11 @@ public partial class BatchJobCreateContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// A string that uniquely identifies the Job within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). /// The Pool on which the Batch service runs the Job's Tasks. /// or is null. - public BatchJobCreateContent(string id, BatchPoolInfo poolInfo) + public BatchJobCreateOptions(string id, BatchPoolInfo poolInfo) { Argument.AssertNotNull(id, nameof(id)); Argument.AssertNotNull(poolInfo, nameof(poolInfo)); @@ -57,10 +57,10 @@ public BatchJobCreateContent(string id, BatchPoolInfo poolInfo) Id = id; CommonEnvironmentSettings = new ChangeTrackingList(); PoolInfo = poolInfo; - Metadata = new ChangeTrackingList(); + Metadata = new ChangeTrackingList(); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// A string that uniquely identifies the Job within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). /// The display name for the Job. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. /// Whether Tasks in the Job can define dependencies on each other. The default is false. @@ -73,12 +73,12 @@ public BatchJobCreateContent(string id, BatchPoolInfo poolInfo) /// The Job Release Task. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Nodes that have run the Job Preparation Task. The primary purpose of the Job Release Task is to undo changes to Compute Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. /// The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. /// The Pool on which the Batch service runs the Job's Tasks. - /// The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. - /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + /// The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. + /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. /// The network configuration for the Job. /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. /// Keeps track of any properties unknown to the library. - internal BatchJobCreateContent(string id, string displayName, bool? usesTaskDependencies, int? priority, bool? allowTaskPreemption, int? maxParallelTasks, BatchJobConstraints constraints, BatchJobManagerTask jobManagerTask, BatchJobPreparationTask jobPreparationTask, BatchJobReleaseTask jobReleaseTask, IList commonEnvironmentSettings, BatchPoolInfo poolInfo, OnAllBatchTasksComplete? onAllTasksComplete, OnBatchTaskFailure? onTaskFailure, BatchJobNetworkConfiguration networkConfiguration, IList metadata, IDictionary serializedAdditionalRawData) + internal BatchJobCreateOptions(string id, string displayName, bool? usesTaskDependencies, int? priority, bool? allowTaskPreemption, int? maxParallelTasks, BatchJobConstraints constraints, BatchJobManagerTask jobManagerTask, BatchJobPreparationTask jobPreparationTask, BatchJobReleaseTask jobReleaseTask, IList commonEnvironmentSettings, BatchPoolInfo poolInfo, BatchAllTasksCompleteMode? allTasksCompleteMode, BatchTaskFailureMode? taskFailureMode, BatchJobNetworkConfiguration networkConfiguration, IList metadata, IDictionary serializedAdditionalRawData) { Id = id; DisplayName = displayName; @@ -92,15 +92,15 @@ internal BatchJobCreateContent(string id, string displayName, bool? usesTaskDepe JobReleaseTask = jobReleaseTask; CommonEnvironmentSettings = commonEnvironmentSettings; PoolInfo = poolInfo; - OnAllTasksComplete = onAllTasksComplete; - OnTaskFailure = onTaskFailure; + AllTasksCompleteMode = allTasksCompleteMode; + TaskFailureMode = taskFailureMode; NetworkConfiguration = networkConfiguration; Metadata = metadata; _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal BatchJobCreateContent() + /// Initializes a new instance of for deserialization. + internal BatchJobCreateOptions() { } @@ -129,12 +129,12 @@ internal BatchJobCreateContent() /// The Pool on which the Batch service runs the Job's Tasks. public BatchPoolInfo PoolInfo { get; } /// The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. - public OnAllBatchTasksComplete? OnAllTasksComplete { get; set; } + public BatchAllTasksCompleteMode? AllTasksCompleteMode { get; set; } /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. - public OnBatchTaskFailure? OnTaskFailure { get; set; } + public BatchTaskFailureMode? TaskFailureMode { get; set; } /// The network configuration for the Job. public BatchJobNetworkConfiguration NetworkConfiguration { get; set; } /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - public IList Metadata { get; } + public IList Metadata { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableOptions.Serialization.cs similarity index 76% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableOptions.Serialization.cs index 1cc17efadd01..84a71db011fc 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchJobDisableContent : IUtf8JsonSerializable, IJsonModel + internal partial class BatchJobDisableOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReader /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchJobDisableContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchJobDisableOptions)} does not support writing '{format}' format."); } writer.WritePropertyName("disableTasks"u8); @@ -53,19 +53,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchJobDisableContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchJobDisableOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchJobDisableContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchJobDisableOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchJobDisableContent(document.RootElement, options); + return DeserializeBatchJobDisableOptions(document.RootElement, options); } - internal static BatchJobDisableContent DeserializeBatchJobDisableContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchJobDisableOptions DeserializeBatchJobDisableOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -89,46 +89,46 @@ internal static BatchJobDisableContent DeserializeBatchJobDisableContent(JsonEle } } serializedAdditionalRawData = rawDataDictionary; - return new BatchJobDisableContent(disableTasks, serializedAdditionalRawData); + return new BatchJobDisableOptions(disableTasks, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchJobDisableContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchJobDisableOptions)} does not support writing '{options.Format}' format."); } } - BatchJobDisableContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchJobDisableOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchJobDisableContent(document.RootElement, options); + return DeserializeBatchJobDisableOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchJobDisableContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchJobDisableOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchJobDisableContent FromResponse(Response response) + internal static BatchJobDisableOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchJobDisableContent(document.RootElement); + return DeserializeBatchJobDisableOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableOptions.cs similarity index 88% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableOptions.cs index 3ad54193906b..67440cde67ec 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobDisableOptions.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Parameters for disabling an Azure Batch Job. - public partial class BatchJobDisableContent + internal partial class BatchJobDisableOptions { /// /// Keeps track of any properties unknown to the library. @@ -45,24 +45,24 @@ public partial class BatchJobDisableContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// What to do with active Tasks associated with the Job. - public BatchJobDisableContent(DisableBatchJobOption disableTasks) + public BatchJobDisableOptions(DisableBatchJobOption disableTasks) { DisableTasks = disableTasks; } - /// Initializes a new instance of . + /// Initializes a new instance of . /// What to do with active Tasks associated with the Job. /// Keeps track of any properties unknown to the library. - internal BatchJobDisableContent(DisableBatchJobOption disableTasks, IDictionary serializedAdditionalRawData) + internal BatchJobDisableOptions(DisableBatchJobOption disableTasks, IDictionary serializedAdditionalRawData) { DisableTasks = disableTasks; _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal BatchJobDisableContent() + /// Initializes a new instance of for deserialization. + internal BatchJobDisableOptions() { } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.Serialization.cs index 0a1ae7ceca1c..ef3393ac7953 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.Serialization.cs @@ -37,7 +37,7 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("subnetId"u8); writer.WriteStringValue(SubnetId); writer.WritePropertyName("skipWithdrawFromVNet"u8); - writer.WriteBooleanValue(SkipWithdrawFromVNet); + writer.WriteBooleanValue(SkipWithdrawFromVnet); if (options.Format != "W" && _serializedAdditionalRawData != null) { foreach (var item in _serializedAdditionalRawData) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.cs index 931e05614029..577f3a4fc553 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobNetworkConfiguration.cs @@ -47,24 +47,24 @@ public partial class BatchJobNetworkConfiguration /// Initializes a new instance of . /// The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://learn.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - /// Whether to withdraw Compute Nodes from the virtual network to DNC when the job is terminated or deleted. If true, nodes will remain joined to the virtual network to DNC. If false, nodes will automatically withdraw when the job ends. Defaults to false. + /// Whether to withdraw Compute Nodes from the virtual network to DNC when the job is terminated or deleted. If true, nodes will remain joined to the virtual network to DNC. If false, nodes will automatically withdraw when the job ends. Defaults to false. /// is null. - public BatchJobNetworkConfiguration(string subnetId, bool skipWithdrawFromVNet) + public BatchJobNetworkConfiguration(string subnetId, bool skipWithdrawFromVnet) { Argument.AssertNotNull(subnetId, nameof(subnetId)); SubnetId = subnetId; - SkipWithdrawFromVNet = skipWithdrawFromVNet; + SkipWithdrawFromVnet = skipWithdrawFromVnet; } /// Initializes a new instance of . /// The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://learn.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - /// Whether to withdraw Compute Nodes from the virtual network to DNC when the job is terminated or deleted. If true, nodes will remain joined to the virtual network to DNC. If false, nodes will automatically withdraw when the job ends. Defaults to false. + /// Whether to withdraw Compute Nodes from the virtual network to DNC when the job is terminated or deleted. If true, nodes will remain joined to the virtual network to DNC. If false, nodes will automatically withdraw when the job ends. Defaults to false. /// Keeps track of any properties unknown to the library. - internal BatchJobNetworkConfiguration(string subnetId, bool skipWithdrawFromVNet, IDictionary serializedAdditionalRawData) + internal BatchJobNetworkConfiguration(string subnetId, bool skipWithdrawFromVnet, IDictionary serializedAdditionalRawData) { SubnetId = subnetId; - SkipWithdrawFromVNet = skipWithdrawFromVNet; + SkipWithdrawFromVnet = skipWithdrawFromVnet; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -76,6 +76,6 @@ internal BatchJobNetworkConfiguration() /// The ARM resource identifier of the virtual network subnet which Compute Nodes running Tasks from the Job will join for the duration of the Task. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes which will run Tasks from the Job. This can be up to the number of Compute Nodes in the Pool. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet so that Azure Batch service can schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication from the Azure Batch service. For Pools created with a Virtual Machine configuration, enable ports 29876 and 29877, as well as port 22 for Linux and port 3389 for Windows. Port 443 is also required to be open for outbound connections for communications to Azure Storage. For more details see: https://learn.microsoft.com/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. public string SubnetId { get; set; } /// Whether to withdraw Compute Nodes from the virtual network to DNC when the job is terminated or deleted. If true, nodes will remain joined to the virtual network to DNC. If false, nodes will automatically withdraw when the job ends. Defaults to false. - public bool SkipWithdrawFromVNet { get; set; } + public bool SkipWithdrawFromVnet { get; set; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationAndReleaseTaskStatus.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationAndReleaseTaskStatus.Serialization.cs index 11ec2ab4ff4f..f467d88da73d 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationAndReleaseTaskStatus.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationAndReleaseTaskStatus.Serialization.cs @@ -44,10 +44,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("nodeId"u8); writer.WriteStringValue(NodeId); } - if (Optional.IsDefined(NodeUrl)) + if (Optional.IsDefined(NodeUri)) { writer.WritePropertyName("nodeUrl"u8); - writer.WriteStringValue(NodeUrl); + writer.WriteStringValue(NodeUri.AbsoluteUri); } if (Optional.IsDefined(JobPreparationTaskExecutionInfo)) { @@ -98,7 +98,7 @@ internal static BatchJobPreparationAndReleaseTaskStatus DeserializeBatchJobPrepa } string poolId = default; string nodeId = default; - string nodeUrl = default; + Uri nodeUrl = default; BatchJobPreparationTaskExecutionInfo jobPreparationTaskExecutionInfo = default; BatchJobReleaseTaskExecutionInfo jobReleaseTaskExecutionInfo = default; IDictionary serializedAdditionalRawData = default; @@ -117,7 +117,11 @@ internal static BatchJobPreparationAndReleaseTaskStatus DeserializeBatchJobPrepa } if (property.NameEquals("nodeUrl"u8)) { - nodeUrl = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + nodeUrl = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("jobPreparationTaskExecutionInfo"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationAndReleaseTaskStatus.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationAndReleaseTaskStatus.cs index 722bcb9a92d7..b4c1b24e9fea 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationAndReleaseTaskStatus.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationAndReleaseTaskStatus.cs @@ -53,15 +53,15 @@ internal BatchJobPreparationAndReleaseTaskStatus() /// Initializes a new instance of . /// The ID of the Pool containing the Compute Node to which this entry refers. /// The ID of the Compute Node to which this entry refers. - /// The URL of the Compute Node to which this entry refers. + /// The URL of the Compute Node to which this entry refers. /// Information about the execution status of the Job Preparation Task on this Compute Node. /// Information about the execution status of the Job Release Task on this Compute Node. This property is set only if the Job Release Task has run on the Compute Node. /// Keeps track of any properties unknown to the library. - internal BatchJobPreparationAndReleaseTaskStatus(string poolId, string nodeId, string nodeUrl, BatchJobPreparationTaskExecutionInfo jobPreparationTaskExecutionInfo, BatchJobReleaseTaskExecutionInfo jobReleaseTaskExecutionInfo, IDictionary serializedAdditionalRawData) + internal BatchJobPreparationAndReleaseTaskStatus(string poolId, string nodeId, Uri nodeUri, BatchJobPreparationTaskExecutionInfo jobPreparationTaskExecutionInfo, BatchJobReleaseTaskExecutionInfo jobReleaseTaskExecutionInfo, IDictionary serializedAdditionalRawData) { PoolId = poolId; NodeId = nodeId; - NodeUrl = nodeUrl; + NodeUri = nodeUri; JobPreparationTaskExecutionInfo = jobPreparationTaskExecutionInfo; JobReleaseTaskExecutionInfo = jobReleaseTaskExecutionInfo; _serializedAdditionalRawData = serializedAdditionalRawData; @@ -72,7 +72,7 @@ internal BatchJobPreparationAndReleaseTaskStatus(string poolId, string nodeId, s /// The ID of the Compute Node to which this entry refers. public string NodeId { get; } /// The URL of the Compute Node to which this entry refers. - public string NodeUrl { get; } + public Uri NodeUri { get; } /// Information about the execution status of the Job Preparation Task on this Compute Node. public BatchJobPreparationTaskExecutionInfo JobPreparationTaskExecutionInfo { get; } /// Information about the execution status of the Job Release Task on this Compute Node. This property is set only if the Job Release Task has run on the Compute Node. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTaskExecutionInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTaskExecutionInfo.Serialization.cs index 47e6c59f2137..9dcbdfad610d 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTaskExecutionInfo.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTaskExecutionInfo.Serialization.cs @@ -48,10 +48,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("taskRootDirectory"u8); writer.WriteStringValue(TaskRootDirectory); } - if (Optional.IsDefined(TaskRootDirectoryUrl)) + if (Optional.IsDefined(TaskRootDirectoryUri)) { writer.WritePropertyName("taskRootDirectoryUrl"u8); - writer.WriteStringValue(TaskRootDirectoryUrl); + writer.WriteStringValue(TaskRootDirectoryUri.AbsoluteUri); } if (Optional.IsDefined(ExitCode)) { @@ -121,7 +121,7 @@ internal static BatchJobPreparationTaskExecutionInfo DeserializeBatchJobPreparat DateTimeOffset? endTime = default; BatchJobPreparationTaskState state = default; string taskRootDirectory = default; - string taskRootDirectoryUrl = default; + Uri taskRootDirectoryUrl = default; int? exitCode = default; BatchTaskContainerExecutionInfo containerInfo = default; BatchTaskFailureInfo failureInfo = default; @@ -158,7 +158,11 @@ internal static BatchJobPreparationTaskExecutionInfo DeserializeBatchJobPreparat } if (property.NameEquals("taskRootDirectoryUrl"u8)) { - taskRootDirectoryUrl = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + taskRootDirectoryUrl = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("exitCode"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTaskExecutionInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTaskExecutionInfo.cs index 8d65cf18954e..819db771835f 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTaskExecutionInfo.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobPreparationTaskExecutionInfo.cs @@ -64,7 +64,7 @@ internal BatchJobPreparationTaskExecutionInfo(DateTimeOffset startTime, BatchJob /// The time at which the Job Preparation Task completed. This property is set only if the Task is in the Completed state. /// The current state of the Job Preparation Task on the Compute Node. /// The root directory of the Job Preparation Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. - /// The URL to the root directory of the Job Preparation Task on the Compute Node. + /// The URL to the root directory of the Job Preparation Task on the Compute Node. /// The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. @@ -72,13 +72,13 @@ internal BatchJobPreparationTaskExecutionInfo(DateTimeOffset startTime, BatchJob /// The most recent time at which a retry of the Job Preparation Task started running. This property is set only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. /// Keeps track of any properties unknown to the library. - internal BatchJobPreparationTaskExecutionInfo(DateTimeOffset startTime, DateTimeOffset? endTime, BatchJobPreparationTaskState state, string taskRootDirectory, string taskRootDirectoryUrl, int? exitCode, BatchTaskContainerExecutionInfo containerInfo, BatchTaskFailureInfo failureInfo, int retryCount, DateTimeOffset? lastRetryTime, BatchTaskExecutionResult? result, IDictionary serializedAdditionalRawData) + internal BatchJobPreparationTaskExecutionInfo(DateTimeOffset startTime, DateTimeOffset? endTime, BatchJobPreparationTaskState state, string taskRootDirectory, Uri taskRootDirectoryUri, int? exitCode, BatchTaskContainerExecutionInfo containerInfo, BatchTaskFailureInfo failureInfo, int retryCount, DateTimeOffset? lastRetryTime, BatchTaskExecutionResult? result, IDictionary serializedAdditionalRawData) { StartTime = startTime; EndTime = endTime; State = state; TaskRootDirectory = taskRootDirectory; - TaskRootDirectoryUrl = taskRootDirectoryUrl; + TaskRootDirectoryUri = taskRootDirectoryUri; ExitCode = exitCode; ContainerInfo = containerInfo; FailureInfo = failureInfo; @@ -102,7 +102,7 @@ internal BatchJobPreparationTaskExecutionInfo() /// The root directory of the Job Preparation Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. public string TaskRootDirectory { get; } /// The URL to the root directory of the Job Preparation Task on the Compute Node. - public string TaskRootDirectoryUrl { get; } + public Uri TaskRootDirectoryUri { get; } /// The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. public int? ExitCode { get; } /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTaskExecutionInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTaskExecutionInfo.Serialization.cs index b34cf5498d49..2bc035e9a2f7 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTaskExecutionInfo.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTaskExecutionInfo.Serialization.cs @@ -48,10 +48,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("taskRootDirectory"u8); writer.WriteStringValue(TaskRootDirectory); } - if (Optional.IsDefined(TaskRootDirectoryUrl)) + if (Optional.IsDefined(TaskRootDirectoryUri)) { writer.WritePropertyName("taskRootDirectoryUrl"u8); - writer.WriteStringValue(TaskRootDirectoryUrl); + writer.WriteStringValue(TaskRootDirectoryUri.AbsoluteUri); } if (Optional.IsDefined(ExitCode)) { @@ -114,7 +114,7 @@ internal static BatchJobReleaseTaskExecutionInfo DeserializeBatchJobReleaseTaskE DateTimeOffset? endTime = default; BatchJobReleaseTaskState state = default; string taskRootDirectory = default; - string taskRootDirectoryUrl = default; + Uri taskRootDirectoryUrl = default; int? exitCode = default; BatchTaskContainerExecutionInfo containerInfo = default; BatchTaskFailureInfo failureInfo = default; @@ -149,7 +149,11 @@ internal static BatchJobReleaseTaskExecutionInfo DeserializeBatchJobReleaseTaskE } if (property.NameEquals("taskRootDirectoryUrl"u8)) { - taskRootDirectoryUrl = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + taskRootDirectoryUrl = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("exitCode"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTaskExecutionInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTaskExecutionInfo.cs index 36edc24ff92a..be2e837cb2c8 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTaskExecutionInfo.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobReleaseTaskExecutionInfo.cs @@ -62,19 +62,19 @@ internal BatchJobReleaseTaskExecutionInfo(DateTimeOffset startTime, BatchJobRele /// The time at which the Job Release Task completed. This property is set only if the Task is in the Completed state. /// The current state of the Job Release Task on the Compute Node. /// The root directory of the Job Release Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. - /// The URL to the root directory of the Job Release Task on the Compute Node. + /// The URL to the root directory of the Job Release Task on the Compute Node. /// The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. /// Keeps track of any properties unknown to the library. - internal BatchJobReleaseTaskExecutionInfo(DateTimeOffset startTime, DateTimeOffset? endTime, BatchJobReleaseTaskState state, string taskRootDirectory, string taskRootDirectoryUrl, int? exitCode, BatchTaskContainerExecutionInfo containerInfo, BatchTaskFailureInfo failureInfo, BatchTaskExecutionResult? result, IDictionary serializedAdditionalRawData) + internal BatchJobReleaseTaskExecutionInfo(DateTimeOffset startTime, DateTimeOffset? endTime, BatchJobReleaseTaskState state, string taskRootDirectory, Uri taskRootDirectoryUri, int? exitCode, BatchTaskContainerExecutionInfo containerInfo, BatchTaskFailureInfo failureInfo, BatchTaskExecutionResult? result, IDictionary serializedAdditionalRawData) { StartTime = startTime; EndTime = endTime; State = state; TaskRootDirectory = taskRootDirectory; - TaskRootDirectoryUrl = taskRootDirectoryUrl; + TaskRootDirectoryUri = taskRootDirectoryUri; ExitCode = exitCode; ContainerInfo = containerInfo; FailureInfo = failureInfo; @@ -96,7 +96,7 @@ internal BatchJobReleaseTaskExecutionInfo() /// The root directory of the Job Release Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. public string TaskRootDirectory { get; } /// The URL to the root directory of the Job Release Task on the Compute Node. - public string TaskRootDirectoryUrl { get; } + public Uri TaskRootDirectoryUri { get; } /// The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. public int? ExitCode { get; } /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedule.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedule.Serialization.cs index 0dda3fa74bc8..5f562398f1f3 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedule.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedule.Serialization.cs @@ -44,15 +44,15 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("displayName"u8); writer.WriteStringValue(DisplayName); } - if (options.Format != "W" && Optional.IsDefined(Url)) + if (options.Format != "W" && Optional.IsDefined(Uri)) { writer.WritePropertyName("url"u8); - writer.WriteStringValue(Url); + writer.WriteStringValue(Uri.AbsoluteUri); } if (options.Format != "W" && Optional.IsDefined(ETag)) { writer.WritePropertyName("eTag"u8); - writer.WriteStringValue(ETag); + writer.WriteStringValue(ETag.Value.ToString()); } if (options.Format != "W" && Optional.IsDefined(LastModified)) { @@ -106,10 +106,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } writer.WriteEndArray(); } - if (options.Format != "W" && Optional.IsDefined(Stats)) + if (options.Format != "W" && Optional.IsDefined(JobScheduleStatistics)) { writer.WritePropertyName("stats"u8); - writer.WriteObjectValue(Stats, options); + writer.WriteObjectValue(JobScheduleStatistics, options); } if (options.Format != "W" && _serializedAdditionalRawData != null) { @@ -150,8 +150,8 @@ internal static BatchJobSchedule DeserializeBatchJobSchedule(JsonElement element } string id = default; string displayName = default; - string url = default; - string eTag = default; + Uri url = default; + ETag? eTag = default; DateTimeOffset? lastModified = default; DateTimeOffset? creationTime = default; BatchJobScheduleState? state = default; @@ -161,7 +161,7 @@ internal static BatchJobSchedule DeserializeBatchJobSchedule(JsonElement element BatchJobScheduleConfiguration schedule = default; BatchJobSpecification jobSpecification = default; BatchJobScheduleExecutionInfo executionInfo = default; - IList metadata = default; + IList metadata = default; BatchJobScheduleStatistics stats = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -179,12 +179,20 @@ internal static BatchJobSchedule DeserializeBatchJobSchedule(JsonElement element } if (property.NameEquals("url"u8)) { - url = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + url = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("eTag"u8)) { - eTag = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + eTag = new ETag(property.Value.GetString()); continue; } if (property.NameEquals("lastModified"u8)) @@ -270,10 +278,10 @@ internal static BatchJobSchedule DeserializeBatchJobSchedule(JsonElement element { continue; } - List array = new List(); + List array = new List(); foreach (var item in property.Value.EnumerateArray()) { - array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + array.Add(BatchMetadataItem.DeserializeBatchMetadataItem(item, options)); } metadata = array; continue; @@ -307,7 +315,7 @@ internal static BatchJobSchedule DeserializeBatchJobSchedule(JsonElement element schedule, jobSpecification, executionInfo, - metadata ?? new ChangeTrackingList(), + metadata ?? new ChangeTrackingList(), stats, serializedAdditionalRawData); } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedule.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedule.cs index 9dffba45d20b..cc96269f5fda 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedule.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedule.cs @@ -56,13 +56,13 @@ public BatchJobSchedule(BatchJobSpecification jobSpecification) Argument.AssertNotNull(jobSpecification, nameof(jobSpecification)); JobSpecification = jobSpecification; - Metadata = new ChangeTrackingList(); + Metadata = new ChangeTrackingList(); } /// Initializes a new instance of . /// A string that uniquely identifies the schedule within the Account. /// The display name for the schedule. - /// The URL of the Job Schedule. + /// The URL of the Job Schedule. /// The ETag of the Job Schedule. This is an opaque string. You can use it to detect whether the Job Schedule has changed between requests. In particular, you can be pass the ETag with an Update Job Schedule request to specify that your changes should take effect only if nobody else has modified the schedule in the meantime. /// The last modified time of the Job Schedule. This is the last time at which the schedule level data, such as the Job specification or recurrence information, changed. It does not factor in job-level changes such as new Jobs being created or Jobs changing state. /// The creation time of the Job Schedule. @@ -74,13 +74,13 @@ public BatchJobSchedule(BatchJobSpecification jobSpecification) /// The details of the Jobs to be created on this schedule. /// Information about Jobs that have been and will be run under this schedule. /// A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - /// The lifetime resource usage statistics for the Job Schedule. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// The lifetime resource usage statistics for the Job Schedule. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. /// Keeps track of any properties unknown to the library. - internal BatchJobSchedule(string id, string displayName, string url, string eTag, DateTimeOffset? lastModified, DateTimeOffset? creationTime, BatchJobScheduleState? state, DateTimeOffset? stateTransitionTime, BatchJobScheduleState? previousState, DateTimeOffset? previousStateTransitionTime, BatchJobScheduleConfiguration schedule, BatchJobSpecification jobSpecification, BatchJobScheduleExecutionInfo executionInfo, IList metadata, BatchJobScheduleStatistics stats, IDictionary serializedAdditionalRawData) + internal BatchJobSchedule(string id, string displayName, Uri uri, ETag? eTag, DateTimeOffset? lastModified, DateTimeOffset? creationTime, BatchJobScheduleState? state, DateTimeOffset? stateTransitionTime, BatchJobScheduleState? previousState, DateTimeOffset? previousStateTransitionTime, BatchJobScheduleConfiguration schedule, BatchJobSpecification jobSpecification, BatchJobScheduleExecutionInfo executionInfo, IList metadata, BatchJobScheduleStatistics jobScheduleStatistics, IDictionary serializedAdditionalRawData) { Id = id; DisplayName = displayName; - Url = url; + Uri = uri; ETag = eTag; LastModified = lastModified; CreationTime = creationTime; @@ -92,7 +92,7 @@ internal BatchJobSchedule(string id, string displayName, string url, string eTag JobSpecification = jobSpecification; ExecutionInfo = executionInfo; Metadata = metadata; - Stats = stats; + JobScheduleStatistics = jobScheduleStatistics; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -106,9 +106,9 @@ internal BatchJobSchedule() /// The display name for the schedule. public string DisplayName { get; } /// The URL of the Job Schedule. - public string Url { get; } + public Uri Uri { get; } /// The ETag of the Job Schedule. This is an opaque string. You can use it to detect whether the Job Schedule has changed between requests. In particular, you can be pass the ETag with an Update Job Schedule request to specify that your changes should take effect only if nobody else has modified the schedule in the meantime. - public string ETag { get; } + public ETag? ETag { get; } /// The last modified time of the Job Schedule. This is the last time at which the schedule level data, such as the Job specification or recurrence information, changed. It does not factor in job-level changes such as new Jobs being created or Jobs changing state. public DateTimeOffset? LastModified { get; } /// The creation time of the Job Schedule. @@ -128,8 +128,8 @@ internal BatchJobSchedule() /// Information about Jobs that have been and will be run under this schedule. public BatchJobScheduleExecutionInfo ExecutionInfo { get; } /// A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - public IList Metadata { get; } + public IList Metadata { get; } /// The lifetime resource usage statistics for the Job Schedule. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. - public BatchJobScheduleStatistics Stats { get; } + public BatchJobScheduleStatistics JobScheduleStatistics { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateOptions.Serialization.cs similarity index 78% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateOptions.Serialization.cs index b37d6cd72453..81c3a91b4c11 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchJobScheduleCreateContent : IUtf8JsonSerializable, IJsonModel + public partial class BatchJobScheduleCreateOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, Mode /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchJobScheduleCreateContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchJobScheduleCreateOptions)} does not support writing '{format}' format."); } writer.WritePropertyName("id"u8); @@ -72,19 +72,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchJobScheduleCreateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchJobScheduleCreateOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchJobScheduleCreateContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchJobScheduleCreateOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchJobScheduleCreateContent(document.RootElement, options); + return DeserializeBatchJobScheduleCreateOptions(document.RootElement, options); } - internal static BatchJobScheduleCreateContent DeserializeBatchJobScheduleCreateContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchJobScheduleCreateOptions DeserializeBatchJobScheduleCreateOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -96,7 +96,7 @@ internal static BatchJobScheduleCreateContent DeserializeBatchJobScheduleCreateC string displayName = default; BatchJobScheduleConfiguration schedule = default; BatchJobSpecification jobSpecification = default; - IList metadata = default; + IList metadata = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -127,10 +127,10 @@ internal static BatchJobScheduleCreateContent DeserializeBatchJobScheduleCreateC { continue; } - List array = new List(); + List array = new List(); foreach (var item in property.Value.EnumerateArray()) { - array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + array.Add(BatchMetadataItem.DeserializeBatchMetadataItem(item, options)); } metadata = array; continue; @@ -141,52 +141,52 @@ internal static BatchJobScheduleCreateContent DeserializeBatchJobScheduleCreateC } } serializedAdditionalRawData = rawDataDictionary; - return new BatchJobScheduleCreateContent( + return new BatchJobScheduleCreateOptions( id, displayName, schedule, jobSpecification, - metadata ?? new ChangeTrackingList(), + metadata ?? new ChangeTrackingList(), serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchJobScheduleCreateContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchJobScheduleCreateOptions)} does not support writing '{options.Format}' format."); } } - BatchJobScheduleCreateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchJobScheduleCreateOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchJobScheduleCreateContent(document.RootElement, options); + return DeserializeBatchJobScheduleCreateOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchJobScheduleCreateContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchJobScheduleCreateOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchJobScheduleCreateContent FromResponse(Response response) + internal static BatchJobScheduleCreateOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchJobScheduleCreateContent(document.RootElement); + return DeserializeBatchJobScheduleCreateOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateOptions.cs similarity index 90% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateOptions.cs index 624f65419a5e..31c66cbdd874 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleCreateOptions.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Parameters for creating an Azure Batch Job Schedule. - public partial class BatchJobScheduleCreateContent + public partial class BatchJobScheduleCreateOptions { /// /// Keeps track of any properties unknown to the library. @@ -45,12 +45,12 @@ public partial class BatchJobScheduleCreateContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// A string that uniquely identifies the schedule within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). /// The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. /// The details of the Jobs to be created on this schedule. /// , or is null. - public BatchJobScheduleCreateContent(string id, BatchJobScheduleConfiguration schedule, BatchJobSpecification jobSpecification) + public BatchJobScheduleCreateOptions(string id, BatchJobScheduleConfiguration schedule, BatchJobSpecification jobSpecification) { Argument.AssertNotNull(id, nameof(id)); Argument.AssertNotNull(schedule, nameof(schedule)); @@ -59,17 +59,17 @@ public BatchJobScheduleCreateContent(string id, BatchJobScheduleConfiguration sc Id = id; Schedule = schedule; JobSpecification = jobSpecification; - Metadata = new ChangeTrackingList(); + Metadata = new ChangeTrackingList(); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// A string that uniquely identifies the schedule within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). /// The display name for the schedule. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. /// The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. /// The details of the Jobs to be created on this schedule. /// A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. /// Keeps track of any properties unknown to the library. - internal BatchJobScheduleCreateContent(string id, string displayName, BatchJobScheduleConfiguration schedule, BatchJobSpecification jobSpecification, IList metadata, IDictionary serializedAdditionalRawData) + internal BatchJobScheduleCreateOptions(string id, string displayName, BatchJobScheduleConfiguration schedule, BatchJobSpecification jobSpecification, IList metadata, IDictionary serializedAdditionalRawData) { Id = id; DisplayName = displayName; @@ -79,8 +79,8 @@ internal BatchJobScheduleCreateContent(string id, string displayName, BatchJobSc _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal BatchJobScheduleCreateContent() + /// Initializes a new instance of for deserialization. + internal BatchJobScheduleCreateOptions() { } @@ -93,6 +93,6 @@ internal BatchJobScheduleCreateContent() /// The details of the Jobs to be created on this schedule. public BatchJobSpecification JobSpecification { get; } /// A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - public IList Metadata { get; } + public IList Metadata { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.Serialization.cs index ddd83b62503d..04cbbf20d1a0 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.Serialization.cs @@ -35,7 +35,7 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } writer.WritePropertyName("url"u8); - writer.WriteStringValue(Url); + writer.WriteStringValue(Uri.AbsoluteUri); writer.WritePropertyName("startTime"u8); writer.WriteStringValue(StartTime, "O"); writer.WritePropertyName("lastUpdateTime"u8); @@ -47,19 +47,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("wallClockTime"u8); writer.WriteStringValue(WallClockTime, "P"); writer.WritePropertyName("readIOps"u8); - writer.WriteStringValue(ReadIOps.ToString()); + writer.WriteStringValue(ReadIops.ToString()); writer.WritePropertyName("writeIOps"u8); - writer.WriteStringValue(WriteIOps.ToString()); + writer.WriteStringValue(WriteIops.ToString()); writer.WritePropertyName("readIOGiB"u8); - writer.WriteNumberValue(ReadIOGiB); + writer.WriteNumberValue(ReadIoGiB); writer.WritePropertyName("writeIOGiB"u8); - writer.WriteNumberValue(WriteIOGiB); + writer.WriteNumberValue(WriteIoGiB); writer.WritePropertyName("numSucceededTasks"u8); - writer.WriteStringValue(NumSucceededTasks.ToString()); + writer.WriteStringValue(SucceededTasksCount.ToString()); writer.WritePropertyName("numFailedTasks"u8); - writer.WriteStringValue(NumFailedTasks.ToString()); + writer.WriteStringValue(FailedTasksCount.ToString()); writer.WritePropertyName("numTaskRetries"u8); - writer.WriteStringValue(NumTaskRetries.ToString()); + writer.WriteStringValue(TaskRetriesCount.ToString()); writer.WritePropertyName("waitTime"u8); writer.WriteStringValue(WaitTime, "P"); if (options.Format != "W" && _serializedAdditionalRawData != null) @@ -99,7 +99,7 @@ internal static BatchJobScheduleStatistics DeserializeBatchJobScheduleStatistics { return null; } - string url = default; + Uri url = default; DateTimeOffset startTime = default; DateTimeOffset lastUpdateTime = default; TimeSpan userCPUTime = default; @@ -119,7 +119,7 @@ internal static BatchJobScheduleStatistics DeserializeBatchJobScheduleStatistics { if (property.NameEquals("url"u8)) { - url = property.Value.GetString(); + url = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("startTime"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.cs index e5b81b22f742..2579c3eb0abe 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleStatistics.cs @@ -46,72 +46,72 @@ public partial class BatchJobScheduleStatistics private IDictionary _serializedAdditionalRawData; /// Initializes a new instance of . - /// The URL of the statistics. + /// The URL of the statistics. /// The start time of the time range covered by the statistics. /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. /// The total wall clock time of all the Tasks in all the Jobs created under the schedule. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. - /// The total number of disk read operations made by all Tasks in all Jobs created under the schedule. - /// The total number of disk write operations made by all Tasks in all Jobs created under the schedule. - /// The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. - /// The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. - /// The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. - /// The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. - /// The total number of retries during the given time range on all Tasks in all Jobs created under the schedule. + /// The total number of disk read operations made by all Tasks in all Jobs created under the schedule. + /// The total number of disk write operations made by all Tasks in all Jobs created under the schedule. + /// The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. + /// The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. + /// The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. + /// The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. + /// The total number of retries during the given time range on all Tasks in all Jobs created under the schedule. /// The total wait time of all Tasks in all Jobs created under the schedule. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. - /// is null. - internal BatchJobScheduleStatistics(string url, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIOps, long writeIOps, float readIOGiB, float writeIOGiB, long numSucceededTasks, long numFailedTasks, long numTaskRetries, TimeSpan waitTime) + /// is null. + internal BatchJobScheduleStatistics(Uri uri, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIops, long writeIops, float readIoGiB, float writeIoGiB, long succeededTasksCount, long failedTasksCount, long taskRetriesCount, TimeSpan waitTime) { - Argument.AssertNotNull(url, nameof(url)); + Argument.AssertNotNull(uri, nameof(uri)); - Url = url; + Uri = uri; StartTime = startTime; LastUpdateTime = lastUpdateTime; UserCpuTime = userCpuTime; KernelCpuTime = kernelCpuTime; WallClockTime = wallClockTime; - ReadIOps = readIOps; - WriteIOps = writeIOps; - ReadIOGiB = readIOGiB; - WriteIOGiB = writeIOGiB; - NumSucceededTasks = numSucceededTasks; - NumFailedTasks = numFailedTasks; - NumTaskRetries = numTaskRetries; + ReadIops = readIops; + WriteIops = writeIops; + ReadIoGiB = readIoGiB; + WriteIoGiB = writeIoGiB; + SucceededTasksCount = succeededTasksCount; + FailedTasksCount = failedTasksCount; + TaskRetriesCount = taskRetriesCount; WaitTime = waitTime; } /// Initializes a new instance of . - /// The URL of the statistics. + /// The URL of the statistics. /// The start time of the time range covered by the statistics. /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. /// The total wall clock time of all the Tasks in all the Jobs created under the schedule. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. - /// The total number of disk read operations made by all Tasks in all Jobs created under the schedule. - /// The total number of disk write operations made by all Tasks in all Jobs created under the schedule. - /// The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. - /// The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. - /// The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. - /// The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. - /// The total number of retries during the given time range on all Tasks in all Jobs created under the schedule. + /// The total number of disk read operations made by all Tasks in all Jobs created under the schedule. + /// The total number of disk write operations made by all Tasks in all Jobs created under the schedule. + /// The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. + /// The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. + /// The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. + /// The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. + /// The total number of retries during the given time range on all Tasks in all Jobs created under the schedule. /// The total wait time of all Tasks in all Jobs created under the schedule. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. /// Keeps track of any properties unknown to the library. - internal BatchJobScheduleStatistics(string url, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIOps, long writeIOps, float readIOGiB, float writeIOGiB, long numSucceededTasks, long numFailedTasks, long numTaskRetries, TimeSpan waitTime, IDictionary serializedAdditionalRawData) + internal BatchJobScheduleStatistics(Uri uri, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIops, long writeIops, float readIoGiB, float writeIoGiB, long succeededTasksCount, long failedTasksCount, long taskRetriesCount, TimeSpan waitTime, IDictionary serializedAdditionalRawData) { - Url = url; + Uri = uri; StartTime = startTime; LastUpdateTime = lastUpdateTime; UserCpuTime = userCpuTime; KernelCpuTime = kernelCpuTime; WallClockTime = wallClockTime; - ReadIOps = readIOps; - WriteIOps = writeIOps; - ReadIOGiB = readIOGiB; - WriteIOGiB = writeIOGiB; - NumSucceededTasks = numSucceededTasks; - NumFailedTasks = numFailedTasks; - NumTaskRetries = numTaskRetries; + ReadIops = readIops; + WriteIops = writeIops; + ReadIoGiB = readIoGiB; + WriteIoGiB = writeIoGiB; + SucceededTasksCount = succeededTasksCount; + FailedTasksCount = failedTasksCount; + TaskRetriesCount = taskRetriesCount; WaitTime = waitTime; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -122,7 +122,7 @@ internal BatchJobScheduleStatistics() } /// The URL of the statistics. - public string Url { get; } + public Uri Uri { get; } /// The start time of the time range covered by the statistics. public DateTimeOffset StartTime { get; } /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. @@ -134,19 +134,19 @@ internal BatchJobScheduleStatistics() /// The total wall clock time of all the Tasks in all the Jobs created under the schedule. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. public TimeSpan WallClockTime { get; } /// The total number of disk read operations made by all Tasks in all Jobs created under the schedule. - public long ReadIOps { get; } + public long ReadIops { get; } /// The total number of disk write operations made by all Tasks in all Jobs created under the schedule. - public long WriteIOps { get; } + public long WriteIops { get; } /// The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. - public float ReadIOGiB { get; } + public float ReadIoGiB { get; } /// The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. - public float WriteIOGiB { get; } + public float WriteIoGiB { get; } /// The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. - public long NumSucceededTasks { get; } + public long SucceededTasksCount { get; } /// The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. - public long NumFailedTasks { get; } + public long FailedTasksCount { get; } /// The total number of retries during the given time range on all Tasks in all Jobs created under the schedule. - public long NumTaskRetries { get; } + public long TaskRetriesCount { get; } /// The total wait time of all Tasks in all Jobs created under the schedule. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. public TimeSpan WaitTime { get; } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleUpdateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleUpdateContent.Serialization.cs deleted file mode 100644 index 8c8534c752f6..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleUpdateContent.Serialization.cs +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; -using Azure.Core; - -namespace Azure.Compute.Batch -{ - public partial class BatchJobScheduleUpdateContent : IUtf8JsonSerializable, IJsonModel - { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - writer.WriteStartObject(); - JsonModelWriteCore(writer, options); - writer.WriteEndObject(); - } - - /// The JSON writer. - /// The client options for reading and writing models. - protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(BatchJobScheduleUpdateContent)} does not support writing '{format}' format."); - } - - if (Optional.IsDefined(Schedule)) - { - writer.WritePropertyName("schedule"u8); - writer.WriteObjectValue(Schedule, options); - } - if (Optional.IsDefined(JobSpecification)) - { - writer.WritePropertyName("jobSpecification"u8); - writer.WriteObjectValue(JobSpecification, options); - } - if (Optional.IsCollectionDefined(Metadata)) - { - writer.WritePropertyName("metadata"u8); - writer.WriteStartArray(); - foreach (var item in Metadata) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); - } - if (options.Format != "W" && _serializedAdditionalRawData != null) - { - foreach (var item in _serializedAdditionalRawData) - { - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - } - - BatchJobScheduleUpdateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(BatchJobScheduleUpdateContent)} does not support reading '{format}' format."); - } - - using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchJobScheduleUpdateContent(document.RootElement, options); - } - - internal static BatchJobScheduleUpdateContent DeserializeBatchJobScheduleUpdateContent(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= ModelSerializationExtensions.WireOptions; - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - BatchJobScheduleConfiguration schedule = default; - BatchJobSpecification jobSpecification = default; - IList metadata = default; - IDictionary serializedAdditionalRawData = default; - Dictionary rawDataDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("schedule"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - schedule = BatchJobScheduleConfiguration.DeserializeBatchJobScheduleConfiguration(property.Value, options); - continue; - } - if (property.NameEquals("jobSpecification"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - jobSpecification = BatchJobSpecification.DeserializeBatchJobSpecification(property.Value, options); - continue; - } - if (property.NameEquals("metadata"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(MetadataItem.DeserializeMetadataItem(item, options)); - } - metadata = array; - continue; - } - if (options.Format != "W") - { - rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = rawDataDictionary; - return new BatchJobScheduleUpdateContent(schedule, jobSpecification, metadata ?? new ChangeTrackingList(), serializedAdditionalRawData); - } - - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); - default: - throw new FormatException($"The model {nameof(BatchJobScheduleUpdateContent)} does not support writing '{options.Format}' format."); - } - } - - BatchJobScheduleUpdateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - { - using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchJobScheduleUpdateContent(document.RootElement, options); - } - default: - throw new FormatException($"The model {nameof(BatchJobScheduleUpdateContent)} does not support reading '{options.Format}' format."); - } - } - - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - /// Deserializes the model from a raw response. - /// The response to deserialize the model from. - internal static BatchJobScheduleUpdateContent FromResponse(Response response) - { - using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchJobScheduleUpdateContent(document.RootElement); - } - - /// Convert into a . - internal virtual RequestContent ToRequestContent() - { - var content = new Utf8JsonRequestContent(); - content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); - return content; - } - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleUpdateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleUpdateContent.cs deleted file mode 100644 index 6136aa6bbdc4..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobScheduleUpdateContent.cs +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace Azure.Compute.Batch -{ - /// Parameters for updating an Azure Batch Job Schedule. - public partial class BatchJobScheduleUpdateContent - { - /// - /// Keeps track of any properties unknown to the library. - /// - /// To assign an object to the value of this property use . - /// - /// - /// To assign an already formatted json string to this property use . - /// - /// - /// Examples: - /// - /// - /// BinaryData.FromObjectAsJson("foo") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromString("\"foo\"") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromObjectAsJson(new { key = "value" }) - /// Creates a payload of { "key": "value" }. - /// - /// - /// BinaryData.FromString("{\"key\": \"value\"}") - /// Creates a payload of { "key": "value" }. - /// - /// - /// - /// - private IDictionary _serializedAdditionalRawData; - - /// Initializes a new instance of . - public BatchJobScheduleUpdateContent() - { - Metadata = new ChangeTrackingList(); - } - - /// Initializes a new instance of . - /// The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. If you do not specify this element, the existing schedule is left unchanged. - /// The details of the Jobs to be created on this schedule. Updates affect only Jobs that are started after the update has taken place. Any currently active Job continues with the older specification. - /// A list of name-value pairs associated with the Job Schedule as metadata. If you do not specify this element, existing metadata is left unchanged. - /// Keeps track of any properties unknown to the library. - internal BatchJobScheduleUpdateContent(BatchJobScheduleConfiguration schedule, BatchJobSpecification jobSpecification, IList metadata, IDictionary serializedAdditionalRawData) - { - Schedule = schedule; - JobSpecification = jobSpecification; - Metadata = metadata; - _serializedAdditionalRawData = serializedAdditionalRawData; - } - - /// The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. If you do not specify this element, the existing schedule is left unchanged. - public BatchJobScheduleConfiguration Schedule { get; set; } - /// The details of the Jobs to be created on this schedule. Updates affect only Jobs that are started after the update has taken place. Any currently active Job continues with the older specification. - public BatchJobSpecification JobSpecification { get; set; } - /// A list of name-value pairs associated with the Job Schedule as metadata. If you do not specify this element, existing metadata is left unchanged. - public IList Metadata { get; } - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedulingError.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedulingError.Serialization.cs index 71e6a42fe2cd..4532de4e9f91 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedulingError.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedulingError.Serialization.cs @@ -93,7 +93,7 @@ internal static BatchJobSchedulingError DeserializeBatchJobSchedulingError(JsonE { return null; } - ErrorCategory category = default; + BatchErrorSourceCategory category = default; string code = default; string message = default; IReadOnlyList details = default; @@ -103,7 +103,7 @@ internal static BatchJobSchedulingError DeserializeBatchJobSchedulingError(JsonE { if (property.NameEquals("category"u8)) { - category = new ErrorCategory(property.Value.GetString()); + category = new BatchErrorSourceCategory(property.Value.GetString()); continue; } if (property.NameEquals("code"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedulingError.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedulingError.cs index e3913cde134b..e303e5c1fcb1 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedulingError.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSchedulingError.cs @@ -47,7 +47,7 @@ public partial class BatchJobSchedulingError /// Initializes a new instance of . /// The category of the Job scheduling error. - internal BatchJobSchedulingError(ErrorCategory category) + internal BatchJobSchedulingError(BatchErrorSourceCategory category) { Category = category; Details = new ChangeTrackingList(); @@ -59,7 +59,7 @@ internal BatchJobSchedulingError(ErrorCategory category) /// A message describing the Job scheduling error, intended to be suitable for display in a user interface. /// A list of additional error details related to the scheduling error. /// Keeps track of any properties unknown to the library. - internal BatchJobSchedulingError(ErrorCategory category, string code, string message, IReadOnlyList details, IDictionary serializedAdditionalRawData) + internal BatchJobSchedulingError(BatchErrorSourceCategory category, string code, string message, IReadOnlyList details, IDictionary serializedAdditionalRawData) { Category = category; Code = code; @@ -74,7 +74,7 @@ internal BatchJobSchedulingError() } /// The category of the Job scheduling error. - public ErrorCategory Category { get; } + public BatchErrorSourceCategory Category { get; } /// An identifier for the Job scheduling error. Codes are invariant and are intended to be consumed programmatically. public string Code { get; } /// A message describing the Job scheduling error, intended to be suitable for display in a user interface. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.Serialization.cs index f3e1b3cf32d8..1a0bbcaccc3d 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.Serialization.cs @@ -59,15 +59,15 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("usesTaskDependencies"u8); writer.WriteBooleanValue(UsesTaskDependencies.Value); } - if (Optional.IsDefined(OnAllTasksComplete)) + if (Optional.IsDefined(AllTasksCompleteMode)) { writer.WritePropertyName("onAllTasksComplete"u8); - writer.WriteStringValue(OnAllTasksComplete.Value.ToString()); + writer.WriteStringValue(AllTasksCompleteMode.Value.ToString()); } - if (Optional.IsDefined(OnTaskFailure)) + if (Optional.IsDefined(TaskFailureMode)) { writer.WritePropertyName("onTaskFailure"u8); - writer.WriteStringValue(OnTaskFailure.Value.ToString()); + writer.WriteStringValue(TaskFailureMode.Value.ToString()); } if (Optional.IsDefined(NetworkConfiguration)) { @@ -158,8 +158,8 @@ internal static BatchJobSpecification DeserializeBatchJobSpecification(JsonEleme int? maxParallelTasks = default; string displayName = default; bool? usesTaskDependencies = default; - OnAllBatchTasksComplete? onAllTasksComplete = default; - OnBatchTaskFailure? onTaskFailure = default; + BatchAllTasksCompleteMode? onAllTasksComplete = default; + BatchTaskFailureMode? onTaskFailure = default; BatchJobNetworkConfiguration networkConfiguration = default; BatchJobConstraints constraints = default; BatchJobManagerTask jobManagerTask = default; @@ -167,7 +167,7 @@ internal static BatchJobSpecification DeserializeBatchJobSpecification(JsonEleme BatchJobReleaseTask jobReleaseTask = default; IList commonEnvironmentSettings = default; BatchPoolInfo poolInfo = default; - IList metadata = default; + IList metadata = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -219,7 +219,7 @@ internal static BatchJobSpecification DeserializeBatchJobSpecification(JsonEleme { continue; } - onAllTasksComplete = new OnAllBatchTasksComplete(property.Value.GetString()); + onAllTasksComplete = new BatchAllTasksCompleteMode(property.Value.GetString()); continue; } if (property.NameEquals("onTaskFailure"u8)) @@ -228,7 +228,7 @@ internal static BatchJobSpecification DeserializeBatchJobSpecification(JsonEleme { continue; } - onTaskFailure = new OnBatchTaskFailure(property.Value.GetString()); + onTaskFailure = new BatchTaskFailureMode(property.Value.GetString()); continue; } if (property.NameEquals("networkConfiguration"u8)) @@ -301,10 +301,10 @@ internal static BatchJobSpecification DeserializeBatchJobSpecification(JsonEleme { continue; } - List array = new List(); + List array = new List(); foreach (var item in property.Value.EnumerateArray()) { - array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + array.Add(BatchMetadataItem.DeserializeBatchMetadataItem(item, options)); } metadata = array; continue; @@ -330,7 +330,7 @@ internal static BatchJobSpecification DeserializeBatchJobSpecification(JsonEleme jobReleaseTask, commonEnvironmentSettings ?? new ChangeTrackingList(), poolInfo, - metadata ?? new ChangeTrackingList(), + metadata ?? new ChangeTrackingList(), serializedAdditionalRawData); } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.cs index 692c7b69ba71..ed0457802812 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobSpecification.cs @@ -54,7 +54,7 @@ public BatchJobSpecification(BatchPoolInfo poolInfo) CommonEnvironmentSettings = new ChangeTrackingList(); PoolInfo = poolInfo; - Metadata = new ChangeTrackingList(); + Metadata = new ChangeTrackingList(); } /// Initializes a new instance of . @@ -63,8 +63,8 @@ public BatchJobSpecification(BatchPoolInfo poolInfo) /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. /// The display name for Jobs created under this schedule. The name need not be unique and can contain any Unicode characters up to a maximum length of 1024. /// Whether Tasks in the Job can define dependencies on each other. The default is false. - /// The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. - /// The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + /// The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. + /// The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. /// The network configuration for the Job. /// The execution constraints for Jobs created under this schedule. /// The details of a Job Manager Task to be launched when a Job is started under this schedule. If the Job does not specify a Job Manager Task, the user must explicitly add Tasks to the Job using the Task API. If the Job does specify a Job Manager Task, the Batch service creates the Job Manager Task when the Job is created, and will try to schedule the Job Manager Task before scheduling other Tasks in the Job. @@ -74,15 +74,15 @@ public BatchJobSpecification(BatchPoolInfo poolInfo) /// The Pool on which the Batch service runs the Tasks of Jobs created under this schedule. /// A list of name-value pairs associated with each Job created under this schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. /// Keeps track of any properties unknown to the library. - internal BatchJobSpecification(int? priority, bool? allowTaskPreemption, int? maxParallelTasks, string displayName, bool? usesTaskDependencies, OnAllBatchTasksComplete? onAllTasksComplete, OnBatchTaskFailure? onTaskFailure, BatchJobNetworkConfiguration networkConfiguration, BatchJobConstraints constraints, BatchJobManagerTask jobManagerTask, BatchJobPreparationTask jobPreparationTask, BatchJobReleaseTask jobReleaseTask, IList commonEnvironmentSettings, BatchPoolInfo poolInfo, IList metadata, IDictionary serializedAdditionalRawData) + internal BatchJobSpecification(int? priority, bool? allowTaskPreemption, int? maxParallelTasks, string displayName, bool? usesTaskDependencies, BatchAllTasksCompleteMode? allTasksCompleteMode, BatchTaskFailureMode? taskFailureMode, BatchJobNetworkConfiguration networkConfiguration, BatchJobConstraints constraints, BatchJobManagerTask jobManagerTask, BatchJobPreparationTask jobPreparationTask, BatchJobReleaseTask jobReleaseTask, IList commonEnvironmentSettings, BatchPoolInfo poolInfo, IList metadata, IDictionary serializedAdditionalRawData) { Priority = priority; AllowTaskPreemption = allowTaskPreemption; MaxParallelTasks = maxParallelTasks; DisplayName = displayName; UsesTaskDependencies = usesTaskDependencies; - OnAllTasksComplete = onAllTasksComplete; - OnTaskFailure = onTaskFailure; + AllTasksCompleteMode = allTasksCompleteMode; + TaskFailureMode = taskFailureMode; NetworkConfiguration = networkConfiguration; Constraints = constraints; JobManagerTask = jobManagerTask; @@ -110,9 +110,9 @@ internal BatchJobSpecification() /// Whether Tasks in the Job can define dependencies on each other. The default is false. public bool? UsesTaskDependencies { get; set; } /// The action the Batch service should take when all Tasks in a Job created under this schedule are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. - public OnAllBatchTasksComplete? OnAllTasksComplete { get; set; } + public BatchAllTasksCompleteMode? AllTasksCompleteMode { get; set; } /// The action the Batch service should take when any Task fails in a Job created under this schedule. A Task is considered to have failed if it have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. - public OnBatchTaskFailure? OnTaskFailure { get; set; } + public BatchTaskFailureMode? TaskFailureMode { get; set; } /// The network configuration for the Job. public BatchJobNetworkConfiguration NetworkConfiguration { get; set; } /// The execution constraints for Jobs created under this schedule. @@ -128,6 +128,6 @@ internal BatchJobSpecification() /// The Pool on which the Batch service runs the Tasks of Jobs created under this schedule. public BatchPoolInfo PoolInfo { get; set; } /// A list of name-value pairs associated with each Job created under this schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - public IList Metadata { get; } + public IList Metadata { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.Serialization.cs index 6cd377477e69..d21250c8101d 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.Serialization.cs @@ -35,7 +35,7 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } writer.WritePropertyName("url"u8); - writer.WriteStringValue(Url); + writer.WriteStringValue(Uri.AbsoluteUri); writer.WritePropertyName("startTime"u8); writer.WriteStringValue(StartTime, "O"); writer.WritePropertyName("lastUpdateTime"u8); @@ -47,19 +47,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("wallClockTime"u8); writer.WriteStringValue(WallClockTime, "P"); writer.WritePropertyName("readIOps"u8); - writer.WriteStringValue(ReadIOps.ToString()); + writer.WriteStringValue(ReadIops.ToString()); writer.WritePropertyName("writeIOps"u8); - writer.WriteStringValue(WriteIOps.ToString()); + writer.WriteStringValue(WriteIops.ToString()); writer.WritePropertyName("readIOGiB"u8); - writer.WriteNumberValue(ReadIOGiB); + writer.WriteNumberValue(ReadIoGiB); writer.WritePropertyName("writeIOGiB"u8); - writer.WriteNumberValue(WriteIOGiB); + writer.WriteNumberValue(WriteIoGiB); writer.WritePropertyName("numSucceededTasks"u8); - writer.WriteStringValue(NumSucceededTasks.ToString()); + writer.WriteStringValue(SucceededTasksCount.ToString()); writer.WritePropertyName("numFailedTasks"u8); - writer.WriteStringValue(NumFailedTasks.ToString()); + writer.WriteStringValue(FailedTasksCount.ToString()); writer.WritePropertyName("numTaskRetries"u8); - writer.WriteStringValue(NumTaskRetries.ToString()); + writer.WriteStringValue(TaskRetriesCount.ToString()); writer.WritePropertyName("waitTime"u8); writer.WriteStringValue(WaitTime, "P"); if (options.Format != "W" && _serializedAdditionalRawData != null) @@ -99,7 +99,7 @@ internal static BatchJobStatistics DeserializeBatchJobStatistics(JsonElement ele { return null; } - string url = default; + Uri url = default; DateTimeOffset startTime = default; DateTimeOffset lastUpdateTime = default; TimeSpan userCPUTime = default; @@ -119,7 +119,7 @@ internal static BatchJobStatistics DeserializeBatchJobStatistics(JsonElement ele { if (property.NameEquals("url"u8)) { - url = property.Value.GetString(); + url = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("startTime"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.cs index ca6254790eee..6a1759ce2734 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobStatistics.cs @@ -46,72 +46,72 @@ public partial class BatchJobStatistics private IDictionary _serializedAdditionalRawData; /// Initializes a new instance of . - /// The URL of the statistics. + /// The URL of the statistics. /// The start time of the time range covered by the statistics. /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. /// The total wall clock time of all Tasks in the Job. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. - /// The total number of disk read operations made by all Tasks in the Job. - /// The total number of disk write operations made by all Tasks in the Job. - /// The total amount of data in GiB read from disk by all Tasks in the Job. - /// The total amount of data in GiB written to disk by all Tasks in the Job. - /// The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. - /// The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. - /// The total number of retries on all the Tasks in the Job during the given time range. + /// The total number of disk read operations made by all Tasks in the Job. + /// The total number of disk write operations made by all Tasks in the Job. + /// The total amount of data in GiB read from disk by all Tasks in the Job. + /// The total amount of data in GiB written to disk by all Tasks in the Job. + /// The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. + /// The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. + /// The total number of retries on all the Tasks in the Job during the given time range. /// The total wait time of all Tasks in the Job. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.) This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. - /// is null. - internal BatchJobStatistics(string url, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIOps, long writeIOps, float readIOGiB, float writeIOGiB, long numSucceededTasks, long numFailedTasks, long numTaskRetries, TimeSpan waitTime) + /// is null. + internal BatchJobStatistics(Uri uri, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIops, long writeIops, float readIoGiB, float writeIoGiB, long succeededTasksCount, long failedTasksCount, long taskRetriesCount, TimeSpan waitTime) { - Argument.AssertNotNull(url, nameof(url)); + Argument.AssertNotNull(uri, nameof(uri)); - Url = url; + Uri = uri; StartTime = startTime; LastUpdateTime = lastUpdateTime; UserCpuTime = userCpuTime; KernelCpuTime = kernelCpuTime; WallClockTime = wallClockTime; - ReadIOps = readIOps; - WriteIOps = writeIOps; - ReadIOGiB = readIOGiB; - WriteIOGiB = writeIOGiB; - NumSucceededTasks = numSucceededTasks; - NumFailedTasks = numFailedTasks; - NumTaskRetries = numTaskRetries; + ReadIops = readIops; + WriteIops = writeIops; + ReadIoGiB = readIoGiB; + WriteIoGiB = writeIoGiB; + SucceededTasksCount = succeededTasksCount; + FailedTasksCount = failedTasksCount; + TaskRetriesCount = taskRetriesCount; WaitTime = waitTime; } /// Initializes a new instance of . - /// The URL of the statistics. + /// The URL of the statistics. /// The start time of the time range covered by the statistics. /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. /// The total wall clock time of all Tasks in the Job. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. - /// The total number of disk read operations made by all Tasks in the Job. - /// The total number of disk write operations made by all Tasks in the Job. - /// The total amount of data in GiB read from disk by all Tasks in the Job. - /// The total amount of data in GiB written to disk by all Tasks in the Job. - /// The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. - /// The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. - /// The total number of retries on all the Tasks in the Job during the given time range. + /// The total number of disk read operations made by all Tasks in the Job. + /// The total number of disk write operations made by all Tasks in the Job. + /// The total amount of data in GiB read from disk by all Tasks in the Job. + /// The total amount of data in GiB written to disk by all Tasks in the Job. + /// The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. + /// The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. + /// The total number of retries on all the Tasks in the Job during the given time range. /// The total wait time of all Tasks in the Job. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.) This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. /// Keeps track of any properties unknown to the library. - internal BatchJobStatistics(string url, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIOps, long writeIOps, float readIOGiB, float writeIOGiB, long numSucceededTasks, long numFailedTasks, long numTaskRetries, TimeSpan waitTime, IDictionary serializedAdditionalRawData) + internal BatchJobStatistics(Uri uri, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIops, long writeIops, float readIoGiB, float writeIoGiB, long succeededTasksCount, long failedTasksCount, long taskRetriesCount, TimeSpan waitTime, IDictionary serializedAdditionalRawData) { - Url = url; + Uri = uri; StartTime = startTime; LastUpdateTime = lastUpdateTime; UserCpuTime = userCpuTime; KernelCpuTime = kernelCpuTime; WallClockTime = wallClockTime; - ReadIOps = readIOps; - WriteIOps = writeIOps; - ReadIOGiB = readIOGiB; - WriteIOGiB = writeIOGiB; - NumSucceededTasks = numSucceededTasks; - NumFailedTasks = numFailedTasks; - NumTaskRetries = numTaskRetries; + ReadIops = readIops; + WriteIops = writeIops; + ReadIoGiB = readIoGiB; + WriteIoGiB = writeIoGiB; + SucceededTasksCount = succeededTasksCount; + FailedTasksCount = failedTasksCount; + TaskRetriesCount = taskRetriesCount; WaitTime = waitTime; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -122,7 +122,7 @@ internal BatchJobStatistics() } /// The URL of the statistics. - public string Url { get; } + public Uri Uri { get; } /// The start time of the time range covered by the statistics. public DateTimeOffset StartTime { get; } /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. @@ -134,19 +134,19 @@ internal BatchJobStatistics() /// The total wall clock time of all Tasks in the Job. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. public TimeSpan WallClockTime { get; } /// The total number of disk read operations made by all Tasks in the Job. - public long ReadIOps { get; } + public long ReadIops { get; } /// The total number of disk write operations made by all Tasks in the Job. - public long WriteIOps { get; } + public long WriteIops { get; } /// The total amount of data in GiB read from disk by all Tasks in the Job. - public float ReadIOGiB { get; } + public float ReadIoGiB { get; } /// The total amount of data in GiB written to disk by all Tasks in the Job. - public float WriteIOGiB { get; } + public float WriteIoGiB { get; } /// The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. - public long NumSucceededTasks { get; } + public long SucceededTasksCount { get; } /// The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. - public long NumFailedTasks { get; } + public long FailedTasksCount { get; } /// The total number of retries on all the Tasks in the Job during the given time range. - public long NumTaskRetries { get; } + public long TaskRetriesCount { get; } /// The total wait time of all Tasks in the Job. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.) This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. public TimeSpan WaitTime { get; } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateOptions.Serialization.cs similarity index 75% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateOptions.Serialization.cs index 5b0cfae31daf..27ab9f598712 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchJobTerminateContent : IUtf8JsonSerializable, IJsonModel + internal partial class BatchJobTerminateOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelRead /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchJobTerminateContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchJobTerminateOptions)} does not support writing '{format}' format."); } if (Optional.IsDefined(TerminationReason)) @@ -56,19 +56,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchJobTerminateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchJobTerminateOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchJobTerminateContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchJobTerminateOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchJobTerminateContent(document.RootElement, options); + return DeserializeBatchJobTerminateOptions(document.RootElement, options); } - internal static BatchJobTerminateContent DeserializeBatchJobTerminateContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchJobTerminateOptions DeserializeBatchJobTerminateOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -92,46 +92,46 @@ internal static BatchJobTerminateContent DeserializeBatchJobTerminateContent(Jso } } serializedAdditionalRawData = rawDataDictionary; - return new BatchJobTerminateContent(terminateReason, serializedAdditionalRawData); + return new BatchJobTerminateOptions(terminateReason, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchJobTerminateContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchJobTerminateOptions)} does not support writing '{options.Format}' format."); } } - BatchJobTerminateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchJobTerminateOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchJobTerminateContent(document.RootElement, options); + return DeserializeBatchJobTerminateOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchJobTerminateContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchJobTerminateOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchJobTerminateContent FromResponse(Response response) + internal static BatchJobTerminateOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchJobTerminateContent(document.RootElement); + return DeserializeBatchJobTerminateOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateOptions.cs similarity index 91% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateOptions.cs index 537722bd2645..8fc1a6dc8c1e 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobTerminateOptions.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Parameters for terminating an Azure Batch Job. - public partial class BatchJobTerminateContent + internal partial class BatchJobTerminateOptions { /// /// Keeps track of any properties unknown to the library. @@ -45,15 +45,15 @@ public partial class BatchJobTerminateContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - public BatchJobTerminateContent() + /// Initializes a new instance of . + public BatchJobTerminateOptions() { } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The text you want to appear as the Job's TerminationReason. The default is 'UserTerminate'. /// Keeps track of any properties unknown to the library. - internal BatchJobTerminateContent(string terminationReason, IDictionary serializedAdditionalRawData) + internal BatchJobTerminateOptions(string terminationReason, IDictionary serializedAdditionalRawData) { TerminationReason = terminationReason; _serializedAdditionalRawData = serializedAdditionalRawData; diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.Serialization.cs deleted file mode 100644 index 6718079b6a43..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.Serialization.cs +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; -using Azure.Core; - -namespace Azure.Compute.Batch -{ - public partial class BatchJobUpdateContent : IUtf8JsonSerializable, IJsonModel - { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - writer.WriteStartObject(); - JsonModelWriteCore(writer, options); - writer.WriteEndObject(); - } - - /// The JSON writer. - /// The client options for reading and writing models. - protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(BatchJobUpdateContent)} does not support writing '{format}' format."); - } - - if (Optional.IsDefined(Priority)) - { - writer.WritePropertyName("priority"u8); - writer.WriteNumberValue(Priority.Value); - } - if (Optional.IsDefined(AllowTaskPreemption)) - { - writer.WritePropertyName("allowTaskPreemption"u8); - writer.WriteBooleanValue(AllowTaskPreemption.Value); - } - if (Optional.IsDefined(MaxParallelTasks)) - { - writer.WritePropertyName("maxParallelTasks"u8); - writer.WriteNumberValue(MaxParallelTasks.Value); - } - if (Optional.IsDefined(Constraints)) - { - writer.WritePropertyName("constraints"u8); - writer.WriteObjectValue(Constraints, options); - } - if (Optional.IsDefined(PoolInfo)) - { - writer.WritePropertyName("poolInfo"u8); - writer.WriteObjectValue(PoolInfo, options); - } - if (Optional.IsDefined(OnAllTasksComplete)) - { - writer.WritePropertyName("onAllTasksComplete"u8); - writer.WriteStringValue(OnAllTasksComplete.Value.ToString()); - } - if (Optional.IsCollectionDefined(Metadata)) - { - writer.WritePropertyName("metadata"u8); - writer.WriteStartArray(); - foreach (var item in Metadata) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); - } - if (Optional.IsDefined(NetworkConfiguration)) - { - writer.WritePropertyName("networkConfiguration"u8); - writer.WriteObjectValue(NetworkConfiguration, options); - } - if (options.Format != "W" && _serializedAdditionalRawData != null) - { - foreach (var item in _serializedAdditionalRawData) - { - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - } - - BatchJobUpdateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(BatchJobUpdateContent)} does not support reading '{format}' format."); - } - - using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchJobUpdateContent(document.RootElement, options); - } - - internal static BatchJobUpdateContent DeserializeBatchJobUpdateContent(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= ModelSerializationExtensions.WireOptions; - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - int? priority = default; - bool? allowTaskPreemption = default; - int? maxParallelTasks = default; - BatchJobConstraints constraints = default; - BatchPoolInfo poolInfo = default; - OnAllBatchTasksComplete? onAllTasksComplete = default; - IList metadata = default; - BatchJobNetworkConfiguration networkConfiguration = default; - IDictionary serializedAdditionalRawData = default; - Dictionary rawDataDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("priority"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - priority = property.Value.GetInt32(); - continue; - } - if (property.NameEquals("allowTaskPreemption"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - allowTaskPreemption = property.Value.GetBoolean(); - continue; - } - if (property.NameEquals("maxParallelTasks"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - maxParallelTasks = property.Value.GetInt32(); - continue; - } - if (property.NameEquals("constraints"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - constraints = BatchJobConstraints.DeserializeBatchJobConstraints(property.Value, options); - continue; - } - if (property.NameEquals("poolInfo"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - poolInfo = BatchPoolInfo.DeserializeBatchPoolInfo(property.Value, options); - continue; - } - if (property.NameEquals("onAllTasksComplete"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - onAllTasksComplete = new OnAllBatchTasksComplete(property.Value.GetString()); - continue; - } - if (property.NameEquals("metadata"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(MetadataItem.DeserializeMetadataItem(item, options)); - } - metadata = array; - continue; - } - if (property.NameEquals("networkConfiguration"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - networkConfiguration = BatchJobNetworkConfiguration.DeserializeBatchJobNetworkConfiguration(property.Value, options); - continue; - } - if (options.Format != "W") - { - rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = rawDataDictionary; - return new BatchJobUpdateContent( - priority, - allowTaskPreemption, - maxParallelTasks, - constraints, - poolInfo, - onAllTasksComplete, - metadata ?? new ChangeTrackingList(), - networkConfiguration, - serializedAdditionalRawData); - } - - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); - default: - throw new FormatException($"The model {nameof(BatchJobUpdateContent)} does not support writing '{options.Format}' format."); - } - } - - BatchJobUpdateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - { - using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchJobUpdateContent(document.RootElement, options); - } - default: - throw new FormatException($"The model {nameof(BatchJobUpdateContent)} does not support reading '{options.Format}' format."); - } - } - - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - /// Deserializes the model from a raw response. - /// The response to deserialize the model from. - internal static BatchJobUpdateContent FromResponse(Response response) - { - using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchJobUpdateContent(document.RootElement); - } - - /// Convert into a . - internal virtual RequestContent ToRequestContent() - { - var content = new Utf8JsonRequestContent(); - content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); - return content; - } - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.cs deleted file mode 100644 index 0cbcd028e9a1..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchJobUpdateContent.cs +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace Azure.Compute.Batch -{ - /// Parameters for updating an Azure Batch Job. - public partial class BatchJobUpdateContent - { - /// - /// Keeps track of any properties unknown to the library. - /// - /// To assign an object to the value of this property use . - /// - /// - /// To assign an already formatted json string to this property use . - /// - /// - /// Examples: - /// - /// - /// BinaryData.FromObjectAsJson("foo") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromString("\"foo\"") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromObjectAsJson(new { key = "value" }) - /// Creates a payload of { "key": "value" }. - /// - /// - /// BinaryData.FromString("{\"key\": \"value\"}") - /// Creates a payload of { "key": "value" }. - /// - /// - /// - /// - private IDictionary _serializedAdditionalRawData; - - /// Initializes a new instance of . - public BatchJobUpdateContent() - { - Metadata = new ChangeTrackingList(); - } - - /// Initializes a new instance of . - /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If omitted, the priority of the Job is left unchanged. - /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. - /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. - /// The execution constraints for the Job. If omitted, the existing execution constraints are left unchanged. - /// The Pool on which the Batch service runs the Job's Tasks. You may change the Pool for a Job only when the Job is disabled. The Patch Job call will fail if you include the poolInfo element and the Job is not disabled. If you specify an autoPoolSpecification in the poolInfo, only the keepAlive property of the autoPoolSpecification can be updated, and then only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as normal). If omitted, the Job continues to run on its current Pool. - /// The action the Batch service should take when all Tasks in the Job are in the completed state. If omitted, the completion behavior is left unchanged. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). - /// A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged. - /// The network configuration for the Job. - /// Keeps track of any properties unknown to the library. - internal BatchJobUpdateContent(int? priority, bool? allowTaskPreemption, int? maxParallelTasks, BatchJobConstraints constraints, BatchPoolInfo poolInfo, OnAllBatchTasksComplete? onAllTasksComplete, IList metadata, BatchJobNetworkConfiguration networkConfiguration, IDictionary serializedAdditionalRawData) - { - Priority = priority; - AllowTaskPreemption = allowTaskPreemption; - MaxParallelTasks = maxParallelTasks; - Constraints = constraints; - PoolInfo = poolInfo; - OnAllTasksComplete = onAllTasksComplete; - Metadata = metadata; - NetworkConfiguration = networkConfiguration; - _serializedAdditionalRawData = serializedAdditionalRawData; - } - - /// The priority of the Job. Priority values can range from -1000 to 1000, with -1000 being the lowest priority and 1000 being the highest priority. If omitted, the priority of the Job is left unchanged. - public int? Priority { get; set; } - /// Whether Tasks in this job can be preempted by other high priority jobs. If the value is set to True, other high priority jobs submitted to the system will take precedence and will be able requeue tasks from this job. You can update a job's allowTaskPreemption after it has been created using the update job API. - public bool? AllowTaskPreemption { get; set; } - /// The maximum number of tasks that can be executed in parallel for the job. The value of maxParallelTasks must be -1 or greater than 0 if specified. If not specified, the default value is -1, which means there's no limit to the number of tasks that can be run at once. You can update a job's maxParallelTasks after it has been created using the update job API. - public int? MaxParallelTasks { get; set; } - /// The execution constraints for the Job. If omitted, the existing execution constraints are left unchanged. - public BatchJobConstraints Constraints { get; set; } - /// The Pool on which the Batch service runs the Job's Tasks. You may change the Pool for a Job only when the Job is disabled. The Patch Job call will fail if you include the poolInfo element and the Job is not disabled. If you specify an autoPoolSpecification in the poolInfo, only the keepAlive property of the autoPoolSpecification can be updated, and then only if the autoPoolSpecification has a poolLifetimeOption of Job (other job properties can be updated as normal). If omitted, the Job continues to run on its current Pool. - public BatchPoolInfo PoolInfo { get; set; } - /// The action the Batch service should take when all Tasks in the Job are in the completed state. If omitted, the completion behavior is left unchanged. You may not change the value from terminatejob to noaction - that is, once you have engaged automatic Job termination, you cannot turn it off again. If you try to do this, the request fails with an 'invalid property value' error response; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). - public OnAllBatchTasksComplete? OnAllTasksComplete { get; set; } - /// A list of name-value pairs associated with the Job as metadata. If omitted, the existing Job metadata is left unchanged. - public IList Metadata { get; } - /// The network configuration for the Job. - public BatchJobNetworkConfiguration NetworkConfiguration { get; set; } - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/MetadataItem.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchMetadataItem.Serialization.cs similarity index 69% rename from sdk/batch/Azure.Compute.Batch/src/Generated/MetadataItem.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchMetadataItem.Serialization.cs index 0203af71844c..fcfde5c7adbf 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/MetadataItem.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchMetadataItem.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class MetadataItem : IUtf8JsonSerializable, IJsonModel + public partial class BatchMetadataItem : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOpti /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(MetadataItem)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchMetadataItem)} does not support writing '{format}' format."); } writer.WritePropertyName("name"u8); @@ -55,19 +55,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - MetadataItem IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchMetadataItem IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(MetadataItem)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchMetadataItem)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeMetadataItem(document.RootElement, options); + return DeserializeBatchMetadataItem(document.RootElement, options); } - internal static MetadataItem DeserializeMetadataItem(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchMetadataItem DeserializeBatchMetadataItem(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -97,46 +97,46 @@ internal static MetadataItem DeserializeMetadataItem(JsonElement element, ModelR } } serializedAdditionalRawData = rawDataDictionary; - return new MetadataItem(name, value, serializedAdditionalRawData); + return new BatchMetadataItem(name, value, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(MetadataItem)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchMetadataItem)} does not support writing '{options.Format}' format."); } } - MetadataItem IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchMetadataItem IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeMetadataItem(document.RootElement, options); + return DeserializeBatchMetadataItem(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(MetadataItem)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchMetadataItem)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static MetadataItem FromResponse(Response response) + internal static BatchMetadataItem FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeMetadataItem(document.RootElement); + return DeserializeBatchMetadataItem(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/MetadataItem.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchMetadataItem.cs similarity index 83% rename from sdk/batch/Azure.Compute.Batch/src/Generated/MetadataItem.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchMetadataItem.cs index 910a0b6b7620..c707b4c4f69d 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/MetadataItem.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchMetadataItem.cs @@ -14,7 +14,7 @@ namespace Azure.Compute.Batch /// The Batch service does not assign any meaning to this metadata; it is solely /// for the use of user code. /// - public partial class MetadataItem + public partial class BatchMetadataItem { /// /// Keeps track of any properties unknown to the library. @@ -48,11 +48,11 @@ public partial class MetadataItem /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// The name of the metadata item. /// The value of the metadata item. /// or is null. - public MetadataItem(string name, string value) + public BatchMetadataItem(string name, string value) { Argument.AssertNotNull(name, nameof(name)); Argument.AssertNotNull(value, nameof(value)); @@ -61,19 +61,19 @@ public MetadataItem(string name, string value) Value = value; } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The name of the metadata item. /// The value of the metadata item. /// Keeps track of any properties unknown to the library. - internal MetadataItem(string name, string value, IDictionary serializedAdditionalRawData) + internal BatchMetadataItem(string name, string value, IDictionary serializedAdditionalRawData) { Name = name; Value = value; _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal MetadataItem() + /// Initializes a new instance of for deserialization. + internal BatchMetadataItem() { } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.Serialization.cs index be7576c53a03..abee99c2949c 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.Serialization.cs @@ -8,6 +8,7 @@ using System; using System.ClientModel.Primitives; using System.Collections.Generic; +using System.Net; using System.Text.Json; using Azure.Core; @@ -39,10 +40,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("id"u8); writer.WriteStringValue(Id); } - if (Optional.IsDefined(Url)) + if (Optional.IsDefined(Uri)) { writer.WritePropertyName("url"u8); - writer.WriteStringValue(Url); + writer.WriteStringValue(Uri.AbsoluteUri); } if (Optional.IsDefined(State)) { @@ -72,7 +73,7 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit if (Optional.IsDefined(IpAddress)) { writer.WritePropertyName("ipAddress"u8); - writer.WriteStringValue(IpAddress); + writer.WriteStringValue(IpAddress.ToString()); } if (Optional.IsDefined(AffinityId)) { @@ -202,13 +203,13 @@ internal static BatchNode DeserializeBatchNode(JsonElement element, ModelReaderW return null; } string id = default; - string url = default; + Uri url = default; BatchNodeState? state = default; SchedulingState? schedulingState = default; DateTimeOffset? stateTransitionTime = default; DateTimeOffset? lastBootTime = default; DateTimeOffset? allocationTime = default; - string ipAddress = default; + IPAddress ipAddress = default; string affinityId = default; string vmSize = default; int? totalTasksRun = default; @@ -235,7 +236,11 @@ internal static BatchNode DeserializeBatchNode(JsonElement element, ModelReaderW } if (property.NameEquals("url"u8)) { - url = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + url = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("state"u8)) @@ -285,7 +290,11 @@ internal static BatchNode DeserializeBatchNode(JsonElement element, ModelReaderW } if (property.NameEquals("ipAddress"u8)) { - ipAddress = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + ipAddress = IPAddress.Parse(property.Value.GetString()); continue; } if (property.NameEquals("affinityId"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.cs index 0140f6c364c4..45af2e61763d 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNode.cs @@ -7,6 +7,7 @@ using System; using System.Collections.Generic; +using System.Net; namespace Azure.Compute.Batch { @@ -55,7 +56,7 @@ internal BatchNode() /// Initializes a new instance of . /// The ID of the Compute Node. Every Compute Node that is added to a Pool is assigned a unique ID. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the ID is reclaimed and could be reused for new Compute Nodes. - /// The URL of the Compute Node. + /// The URL of the Compute Node. /// The current state of the Compute Node. The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. /// Whether the Compute Node is available for Task scheduling. /// The time at which the Compute Node entered its current state. @@ -83,10 +84,10 @@ internal BatchNode() /// Information about the Compute Node agent version and the time the Compute Node upgraded to a new version. /// Info about the current state of the virtual machine. /// Keeps track of any properties unknown to the library. - internal BatchNode(string id, string url, BatchNodeState? state, SchedulingState? schedulingState, DateTimeOffset? stateTransitionTime, DateTimeOffset? lastBootTime, DateTimeOffset? allocationTime, string ipAddress, string affinityId, string vmSize, int? totalTasksRun, int? runningTasksCount, int? runningTaskSlotsCount, int? totalTasksSucceeded, IReadOnlyList recentTasks, BatchStartTask startTask, BatchStartTaskInfo startTaskInfo, IReadOnlyList certificateReferences, IReadOnlyList errors, bool? isDedicated, BatchNodeEndpointConfiguration endpointConfiguration, BatchNodeAgentInfo nodeAgentInfo, VirtualMachineInfo virtualMachineInfo, IDictionary serializedAdditionalRawData) + internal BatchNode(string id, Uri uri, BatchNodeState? state, SchedulingState? schedulingState, DateTimeOffset? stateTransitionTime, DateTimeOffset? lastBootTime, DateTimeOffset? allocationTime, IPAddress ipAddress, string affinityId, string vmSize, int? totalTasksRun, int? runningTasksCount, int? runningTaskSlotsCount, int? totalTasksSucceeded, IReadOnlyList recentTasks, BatchStartTask startTask, BatchStartTaskInfo startTaskInfo, IReadOnlyList certificateReferences, IReadOnlyList errors, bool? isDedicated, BatchNodeEndpointConfiguration endpointConfiguration, BatchNodeAgentInfo nodeAgentInfo, VirtualMachineInfo virtualMachineInfo, IDictionary serializedAdditionalRawData) { Id = id; - Url = url; + Uri = uri; State = state; SchedulingState = schedulingState; StateTransitionTime = stateTransitionTime; @@ -114,7 +115,7 @@ internal BatchNode(string id, string url, BatchNodeState? state, SchedulingState /// The ID of the Compute Node. Every Compute Node that is added to a Pool is assigned a unique ID. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the ID is reclaimed and could be reused for new Compute Nodes. public string Id { get; } /// The URL of the Compute Node. - public string Url { get; } + public Uri Uri { get; } /// The current state of the Compute Node. The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. public BatchNodeState? State { get; } /// Whether the Compute Node is available for Task scheduling. @@ -126,7 +127,7 @@ internal BatchNode(string id, string url, BatchNodeState? state, SchedulingState /// The time at which this Compute Node was allocated to the Pool. This is the time when the Compute Node was initially allocated and doesn't change once set. It is not updated when the Compute Node is service healed or preempted. public DateTimeOffset? AllocationTime { get; } /// The IP address that other Nodes can use to communicate with this Compute Node. Every Compute Node that is added to a Pool is assigned a unique IP address. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the IP address is reclaimed and could be reused for new Compute Nodes. - public string IpAddress { get; } + public IPAddress IpAddress { get; } /// An identifier which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. Note that this is just a soft affinity. If the target Compute Node is busy or unavailable at the time the Task is scheduled, then the Task will be scheduled elsewhere. public string AffinityId { get; } /// The size of the virtual machine hosting the Compute Node. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes). diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateOption.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateOption.cs index b4c883a187e5..7009bec129e5 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateOption.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateOption.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// BatchNodeDeallocateOption enums. - public readonly partial struct BatchNodeDeallocateOption : IEquatable + internal readonly partial struct BatchNodeDeallocateOption : IEquatable { private readonly string _value; diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateOptions.Serialization.cs similarity index 76% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateOptions.Serialization.cs index e8a45d471622..cffbc745d949 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchNodeDeallocateContent : IUtf8JsonSerializable, IJsonModel + internal partial class BatchNodeDeallocateOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelRe /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchNodeDeallocateContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchNodeDeallocateOptions)} does not support writing '{format}' format."); } if (Optional.IsDefined(NodeDeallocateOption)) @@ -56,19 +56,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchNodeDeallocateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchNodeDeallocateOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchNodeDeallocateContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchNodeDeallocateOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchNodeDeallocateContent(document.RootElement, options); + return DeserializeBatchNodeDeallocateOptions(document.RootElement, options); } - internal static BatchNodeDeallocateContent DeserializeBatchNodeDeallocateContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchNodeDeallocateOptions DeserializeBatchNodeDeallocateOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -96,46 +96,46 @@ internal static BatchNodeDeallocateContent DeserializeBatchNodeDeallocateContent } } serializedAdditionalRawData = rawDataDictionary; - return new BatchNodeDeallocateContent(nodeDeallocateOption, serializedAdditionalRawData); + return new BatchNodeDeallocateOptions(nodeDeallocateOption, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchNodeDeallocateContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchNodeDeallocateOptions)} does not support writing '{options.Format}' format."); } } - BatchNodeDeallocateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchNodeDeallocateOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchNodeDeallocateContent(document.RootElement, options); + return DeserializeBatchNodeDeallocateOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchNodeDeallocateContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchNodeDeallocateOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchNodeDeallocateContent FromResponse(Response response) + internal static BatchNodeDeallocateOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchNodeDeallocateContent(document.RootElement); + return DeserializeBatchNodeDeallocateOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateOptions.cs similarity index 91% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateOptions.cs index c14a7b03f81e..8ea543ed59eb 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocateOptions.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Options for deallocating a Compute Node. - public partial class BatchNodeDeallocateContent + internal partial class BatchNodeDeallocateOptions { /// /// Keeps track of any properties unknown to the library. @@ -45,15 +45,15 @@ public partial class BatchNodeDeallocateContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - public BatchNodeDeallocateContent() + /// Initializes a new instance of . + public BatchNodeDeallocateOptions() { } - /// Initializes a new instance of . + /// Initializes a new instance of . /// When to deallocate the Compute Node and what to do with currently running Tasks. The default value is requeue. /// Keeps track of any properties unknown to the library. - internal BatchNodeDeallocateContent(BatchNodeDeallocateOption? nodeDeallocateOption, IDictionary serializedAdditionalRawData) + internal BatchNodeDeallocateOptions(BatchNodeDeallocateOption? nodeDeallocateOption, IDictionary serializedAdditionalRawData) { NodeDeallocateOption = nodeDeallocateOption; _serializedAdditionalRawData = serializedAdditionalRawData; diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocationOption.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocationOption.cs index 4f614318a96d..a5ed6ecf987b 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocationOption.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDeallocationOption.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// BatchNodeDeallocationOption enums. - public readonly partial struct BatchNodeDeallocationOption : IEquatable + internal readonly partial struct BatchNodeDeallocationOption : IEquatable { private readonly string _value; diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingOptions.Serialization.cs similarity index 76% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingOptions.Serialization.cs index cda84d79a5e9..ea057f532e5d 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchNodeDisableSchedulingContent : IUtf8JsonSerializable, IJsonModel + public partial class BatchNodeDisableSchedulingOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchNodeDisableSchedulingContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchNodeDisableSchedulingOptions)} does not support writing '{format}' format."); } if (Optional.IsDefined(NodeDisableSchedulingOption)) @@ -56,19 +56,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchNodeDisableSchedulingContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchNodeDisableSchedulingOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchNodeDisableSchedulingContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchNodeDisableSchedulingOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchNodeDisableSchedulingContent(document.RootElement, options); + return DeserializeBatchNodeDisableSchedulingOptions(document.RootElement, options); } - internal static BatchNodeDisableSchedulingContent DeserializeBatchNodeDisableSchedulingContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchNodeDisableSchedulingOptions DeserializeBatchNodeDisableSchedulingOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -96,46 +96,46 @@ internal static BatchNodeDisableSchedulingContent DeserializeBatchNodeDisableSch } } serializedAdditionalRawData = rawDataDictionary; - return new BatchNodeDisableSchedulingContent(nodeDisableSchedulingOption, serializedAdditionalRawData); + return new BatchNodeDisableSchedulingOptions(nodeDisableSchedulingOption, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchNodeDisableSchedulingContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchNodeDisableSchedulingOptions)} does not support writing '{options.Format}' format."); } } - BatchNodeDisableSchedulingContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchNodeDisableSchedulingOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchNodeDisableSchedulingContent(document.RootElement, options); + return DeserializeBatchNodeDisableSchedulingOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchNodeDisableSchedulingContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchNodeDisableSchedulingOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchNodeDisableSchedulingContent FromResponse(Response response) + internal static BatchNodeDisableSchedulingOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchNodeDisableSchedulingContent(document.RootElement); + return DeserializeBatchNodeDisableSchedulingOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingOptions.cs similarity index 90% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingOptions.cs index bfbc6ff80290..1d5d5e91d5e7 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeDisableSchedulingOptions.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Parameters for disabling scheduling on an Azure Batch Compute Node. - public partial class BatchNodeDisableSchedulingContent + public partial class BatchNodeDisableSchedulingOptions { /// /// Keeps track of any properties unknown to the library. @@ -45,15 +45,15 @@ public partial class BatchNodeDisableSchedulingContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - public BatchNodeDisableSchedulingContent() + /// Initializes a new instance of . + public BatchNodeDisableSchedulingOptions() { } - /// Initializes a new instance of . + /// Initializes a new instance of . /// What to do with currently running Tasks when disabling Task scheduling on the Compute Node. The default value is requeue. /// Keeps track of any properties unknown to the library. - internal BatchNodeDisableSchedulingContent(BatchNodeDisableSchedulingOption? nodeDisableSchedulingOption, IDictionary serializedAdditionalRawData) + internal BatchNodeDisableSchedulingOptions(BatchNodeDisableSchedulingOption? nodeDisableSchedulingOption, IDictionary serializedAdditionalRawData) { NodeDisableSchedulingOption = nodeDisableSchedulingOption; _serializedAdditionalRawData = serializedAdditionalRawData; diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeFile.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeFile.Serialization.cs index 88d99d5798e4..32163d7f3eff 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeFile.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeFile.Serialization.cs @@ -39,10 +39,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("name"u8); writer.WriteStringValue(Name); } - if (Optional.IsDefined(Url)) + if (Optional.IsDefined(Uri)) { writer.WritePropertyName("url"u8); - writer.WriteStringValue(Url); + writer.WriteStringValue(Uri.AbsoluteUri); } if (Optional.IsDefined(IsDirectory)) { @@ -92,7 +92,7 @@ internal static BatchNodeFile DeserializeBatchNodeFile(JsonElement element, Mode return null; } string name = default; - string url = default; + Uri url = default; bool? isDirectory = default; FileProperties properties = default; IDictionary serializedAdditionalRawData = default; @@ -106,7 +106,11 @@ internal static BatchNodeFile DeserializeBatchNodeFile(JsonElement element, Mode } if (property.NameEquals("url"u8)) { - url = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + url = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("isDirectory"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeFile.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeFile.cs index 8e04bac0ed49..506a050db5c7 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeFile.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeFile.cs @@ -52,14 +52,14 @@ internal BatchNodeFile() /// Initializes a new instance of . /// The file path. - /// The URL of the file. + /// The URL of the file. /// Whether the object represents a directory. /// The file properties. /// Keeps track of any properties unknown to the library. - internal BatchNodeFile(string name, string url, bool? isDirectory, FileProperties properties, IDictionary serializedAdditionalRawData) + internal BatchNodeFile(string name, Uri uri, bool? isDirectory, FileProperties properties, IDictionary serializedAdditionalRawData) { Name = name; - Url = url; + Uri = uri; IsDirectory = isDirectory; Properties = properties; _serializedAdditionalRawData = serializedAdditionalRawData; @@ -68,7 +68,7 @@ internal BatchNodeFile(string name, string url, bool? isDirectory, FilePropertie /// The file path. public string Name { get; } /// The URL of the file. - public string Url { get; } + public Uri Uri { get; } /// Whether the object represents a directory. public bool? IsDirectory { get; } /// The file properties. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeIdentityReference.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeIdentityReference.Serialization.cs index 167dbb94ddb5..79d022ae501c 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeIdentityReference.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeIdentityReference.Serialization.cs @@ -76,14 +76,18 @@ internal static BatchNodeIdentityReference DeserializeBatchNodeIdentityReference { return null; } - string resourceId = default; + ResourceIdentifier resourceId = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) { if (property.NameEquals("resourceId"u8)) { - resourceId = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + resourceId = new ResourceIdentifier(property.Value.GetString()); continue; } if (options.Format != "W") diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeIdentityReference.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeIdentityReference.cs index 1c421ab9689c..e674455f97c1 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeIdentityReference.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeIdentityReference.cs @@ -7,6 +7,7 @@ using System; using System.Collections.Generic; +using Azure.Core; namespace Azure.Compute.Batch { @@ -56,13 +57,13 @@ public BatchNodeIdentityReference() /// Initializes a new instance of . /// The ARM resource id of the user assigned identity. /// Keeps track of any properties unknown to the library. - internal BatchNodeIdentityReference(string resourceId, IDictionary serializedAdditionalRawData) + internal BatchNodeIdentityReference(ResourceIdentifier resourceId, IDictionary serializedAdditionalRawData) { ResourceId = resourceId; _serializedAdditionalRawData = serializedAdditionalRawData; } /// The ARM resource id of the user assigned identity. - public string ResourceId { get; set; } + public ResourceIdentifier ResourceId { get; set; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeInfo.Serialization.cs index d220419b0441..eea36026e461 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeInfo.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeInfo.Serialization.cs @@ -39,10 +39,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("affinityId"u8); writer.WriteStringValue(AffinityId); } - if (Optional.IsDefined(NodeUrl)) + if (Optional.IsDefined(NodeUri)) { writer.WritePropertyName("nodeUrl"u8); - writer.WriteStringValue(NodeUrl); + writer.WriteStringValue(NodeUri.AbsoluteUri); } if (Optional.IsDefined(PoolId)) { @@ -59,10 +59,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("taskRootDirectory"u8); writer.WriteStringValue(TaskRootDirectory); } - if (Optional.IsDefined(TaskRootDirectoryUrl)) + if (Optional.IsDefined(TaskRootDirectoryUri)) { writer.WritePropertyName("taskRootDirectoryUrl"u8); - writer.WriteStringValue(TaskRootDirectoryUrl); + writer.WriteStringValue(TaskRootDirectoryUri.AbsoluteUri); } if (options.Format != "W" && _serializedAdditionalRawData != null) { @@ -102,11 +102,11 @@ internal static BatchNodeInfo DeserializeBatchNodeInfo(JsonElement element, Mode return null; } string affinityId = default; - string nodeUrl = default; + Uri nodeUrl = default; string poolId = default; string nodeId = default; string taskRootDirectory = default; - string taskRootDirectoryUrl = default; + Uri taskRootDirectoryUrl = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -118,7 +118,11 @@ internal static BatchNodeInfo DeserializeBatchNodeInfo(JsonElement element, Mode } if (property.NameEquals("nodeUrl"u8)) { - nodeUrl = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + nodeUrl = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("poolId"u8)) @@ -138,7 +142,11 @@ internal static BatchNodeInfo DeserializeBatchNodeInfo(JsonElement element, Mode } if (property.NameEquals("taskRootDirectoryUrl"u8)) { - taskRootDirectoryUrl = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + taskRootDirectoryUrl = new Uri(property.Value.GetString()); continue; } if (options.Format != "W") diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeInfo.cs index d28ff8ede316..2cbca9eaeed7 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeInfo.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeInfo.cs @@ -52,27 +52,27 @@ internal BatchNodeInfo() /// Initializes a new instance of . /// An identifier for the Node on which the Task ran, which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. - /// The URL of the Compute Node on which the Task ran. + /// The URL of the Compute Node on which the Task ran. /// The ID of the Pool on which the Task ran. /// The ID of the Compute Node on which the Task ran. /// The root directory of the Task on the Compute Node. - /// The URL to the root directory of the Task on the Compute Node. + /// The URL to the root directory of the Task on the Compute Node. /// Keeps track of any properties unknown to the library. - internal BatchNodeInfo(string affinityId, string nodeUrl, string poolId, string nodeId, string taskRootDirectory, string taskRootDirectoryUrl, IDictionary serializedAdditionalRawData) + internal BatchNodeInfo(string affinityId, Uri nodeUri, string poolId, string nodeId, string taskRootDirectory, Uri taskRootDirectoryUri, IDictionary serializedAdditionalRawData) { AffinityId = affinityId; - NodeUrl = nodeUrl; + NodeUri = nodeUri; PoolId = poolId; NodeId = nodeId; TaskRootDirectory = taskRootDirectory; - TaskRootDirectoryUrl = taskRootDirectoryUrl; + TaskRootDirectoryUri = taskRootDirectoryUri; _serializedAdditionalRawData = serializedAdditionalRawData; } /// An identifier for the Node on which the Task ran, which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. public string AffinityId { get; } /// The URL of the Compute Node on which the Task ran. - public string NodeUrl { get; } + public Uri NodeUri { get; } /// The ID of the Pool on which the Task ran. public string PoolId { get; } /// The ID of the Compute Node on which the Task ran. @@ -80,6 +80,6 @@ internal BatchNodeInfo(string affinityId, string nodeUrl, string poolId, string /// The root directory of the Task on the Compute Node. public string TaskRootDirectory { get; } /// The URL to the root directory of the Task on the Compute Node. - public string TaskRootDirectoryUrl { get; } + public Uri TaskRootDirectoryUri { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootOption.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootKind.cs similarity index 62% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootOption.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootKind.cs index d59f69836a9e..8a7db95001a3 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootOption.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootKind.cs @@ -10,14 +10,14 @@ namespace Azure.Compute.Batch { - /// BatchNodeRebootOption enums. - public readonly partial struct BatchNodeRebootOption : IEquatable + /// BatchNodeRebootKind enums. + internal readonly partial struct BatchNodeRebootKind : IEquatable { private readonly string _value; - /// Initializes a new instance of . + /// Initializes a new instance of . /// is null. - public BatchNodeRebootOption(string value) + public BatchNodeRebootKind(string value) { _value = value ?? throw new ArgumentNullException(nameof(value)); } @@ -28,25 +28,25 @@ public BatchNodeRebootOption(string value) private const string RetainedDataValue = "retaineddata"; /// Terminate running Task processes and requeue the Tasks. The Tasks will run again when a Compute Node is available. Restart the Compute Node as soon as Tasks have been terminated. - public static BatchNodeRebootOption Requeue { get; } = new BatchNodeRebootOption(RequeueValue); + public static BatchNodeRebootKind Requeue { get; } = new BatchNodeRebootKind(RequeueValue); /// Terminate running Tasks. The Tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Restart the Compute Node as soon as Tasks have been terminated. - public static BatchNodeRebootOption Terminate { get; } = new BatchNodeRebootOption(TerminateValue); + public static BatchNodeRebootKind Terminate { get; } = new BatchNodeRebootKind(TerminateValue); /// Allow currently running Tasks to complete. Schedule no new Tasks while waiting. Restart the Compute Node when all Tasks have completed. - public static BatchNodeRebootOption TaskCompletion { get; } = new BatchNodeRebootOption(TaskCompletionValue); + public static BatchNodeRebootKind TaskCompletion { get; } = new BatchNodeRebootKind(TaskCompletionValue); /// Allow currently running Tasks to complete, then wait for all Task data retention periods to expire. Schedule no new Tasks while waiting. Restart the Compute Node when all Task retention periods have expired. - public static BatchNodeRebootOption RetainedData { get; } = new BatchNodeRebootOption(RetainedDataValue); - /// Determines if two values are the same. - public static bool operator ==(BatchNodeRebootOption left, BatchNodeRebootOption right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(BatchNodeRebootOption left, BatchNodeRebootOption right) => !left.Equals(right); - /// Converts a to a . - public static implicit operator BatchNodeRebootOption(string value) => new BatchNodeRebootOption(value); + public static BatchNodeRebootKind RetainedData { get; } = new BatchNodeRebootKind(RetainedDataValue); + /// Determines if two values are the same. + public static bool operator ==(BatchNodeRebootKind left, BatchNodeRebootKind right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchNodeRebootKind left, BatchNodeRebootKind right) => !left.Equals(right); + /// Converts a to a . + public static implicit operator BatchNodeRebootKind(string value) => new BatchNodeRebootKind(value); /// [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is BatchNodeRebootOption other && Equals(other); + public override bool Equals(object obj) => obj is BatchNodeRebootKind other && Equals(other); /// - public bool Equals(BatchNodeRebootOption other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + public bool Equals(BatchNodeRebootKind other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); /// [EditorBrowsable(EditorBrowsableState.Never)] diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootOptions.Serialization.cs similarity index 73% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootOptions.Serialization.cs index 22d6b76b860b..dc4a7310333a 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchNodeRebootContent : IUtf8JsonSerializable, IJsonModel + internal partial class BatchNodeRebootOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,16 +28,16 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReader /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchNodeRebootContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchNodeRebootOptions)} does not support writing '{format}' format."); } - if (Optional.IsDefined(NodeRebootOption)) + if (Optional.IsDefined(NodeRebootKind)) { writer.WritePropertyName("nodeRebootOption"u8); - writer.WriteStringValue(NodeRebootOption.Value.ToString()); + writer.WriteStringValue(NodeRebootKind.Value.ToString()); } if (options.Format != "W" && _serializedAdditionalRawData != null) { @@ -56,19 +56,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchNodeRebootContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchNodeRebootOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchNodeRebootContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchNodeRebootOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchNodeRebootContent(document.RootElement, options); + return DeserializeBatchNodeRebootOptions(document.RootElement, options); } - internal static BatchNodeRebootContent DeserializeBatchNodeRebootContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchNodeRebootOptions DeserializeBatchNodeRebootOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -76,7 +76,7 @@ internal static BatchNodeRebootContent DeserializeBatchNodeRebootContent(JsonEle { return null; } - BatchNodeRebootOption? nodeRebootOption = default; + BatchNodeRebootKind? nodeRebootOption = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -87,7 +87,7 @@ internal static BatchNodeRebootContent DeserializeBatchNodeRebootContent(JsonEle { continue; } - nodeRebootOption = new BatchNodeRebootOption(property.Value.GetString()); + nodeRebootOption = new BatchNodeRebootKind(property.Value.GetString()); continue; } if (options.Format != "W") @@ -96,46 +96,46 @@ internal static BatchNodeRebootContent DeserializeBatchNodeRebootContent(JsonEle } } serializedAdditionalRawData = rawDataDictionary; - return new BatchNodeRebootContent(nodeRebootOption, serializedAdditionalRawData); + return new BatchNodeRebootOptions(nodeRebootOption, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchNodeRebootContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchNodeRebootOptions)} does not support writing '{options.Format}' format."); } } - BatchNodeRebootContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchNodeRebootOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchNodeRebootContent(document.RootElement, options); + return DeserializeBatchNodeRebootOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchNodeRebootContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchNodeRebootOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchNodeRebootContent FromResponse(Response response) + internal static BatchNodeRebootOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchNodeRebootContent(document.RootElement); + return DeserializeBatchNodeRebootOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootOptions.cs similarity index 79% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootOptions.cs index 2202761043ae..e35002dd7d08 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRebootOptions.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Parameters for rebooting an Azure Batch Compute Node. - public partial class BatchNodeRebootContent + internal partial class BatchNodeRebootOptions { /// /// Keeps track of any properties unknown to the library. @@ -45,21 +45,21 @@ public partial class BatchNodeRebootContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - public BatchNodeRebootContent() + /// Initializes a new instance of . + public BatchNodeRebootOptions() { } - /// Initializes a new instance of . - /// When to reboot the Compute Node and what to do with currently running Tasks. The default value is requeue. + /// Initializes a new instance of . + /// When to reboot the Compute Node and what to do with currently running Tasks. The default value is requeue. /// Keeps track of any properties unknown to the library. - internal BatchNodeRebootContent(BatchNodeRebootOption? nodeRebootOption, IDictionary serializedAdditionalRawData) + internal BatchNodeRebootOptions(BatchNodeRebootKind? nodeRebootKind, IDictionary serializedAdditionalRawData) { - NodeRebootOption = nodeRebootOption; + NodeRebootKind = nodeRebootKind; _serializedAdditionalRawData = serializedAdditionalRawData; } /// When to reboot the Compute Node and what to do with currently running Tasks. The default value is requeue. - public BatchNodeRebootOption? NodeRebootOption { get; set; } + public BatchNodeRebootKind? NodeRebootKind { get; set; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageOption.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageOption.cs index 1ceebbf46262..a656436c315a 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageOption.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageOption.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// BatchNodeReimageOption enums. - public readonly partial struct BatchNodeReimageOption : IEquatable + internal readonly partial struct BatchNodeReimageOption : IEquatable { private readonly string _value; diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageOptions.Serialization.cs similarity index 76% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageOptions.Serialization.cs index 5ae6e2abcddb..790b820e2c75 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchNodeReimageContent : IUtf8JsonSerializable, IJsonModel + internal partial class BatchNodeReimageOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReade /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchNodeReimageContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchNodeReimageOptions)} does not support writing '{format}' format."); } if (Optional.IsDefined(NodeReimageOption)) @@ -56,19 +56,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchNodeReimageContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchNodeReimageOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchNodeReimageContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchNodeReimageOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchNodeReimageContent(document.RootElement, options); + return DeserializeBatchNodeReimageOptions(document.RootElement, options); } - internal static BatchNodeReimageContent DeserializeBatchNodeReimageContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchNodeReimageOptions DeserializeBatchNodeReimageOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -96,46 +96,46 @@ internal static BatchNodeReimageContent DeserializeBatchNodeReimageContent(JsonE } } serializedAdditionalRawData = rawDataDictionary; - return new BatchNodeReimageContent(nodeReimageOption, serializedAdditionalRawData); + return new BatchNodeReimageOptions(nodeReimageOption, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchNodeReimageContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchNodeReimageOptions)} does not support writing '{options.Format}' format."); } } - BatchNodeReimageContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchNodeReimageOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchNodeReimageContent(document.RootElement, options); + return DeserializeBatchNodeReimageOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchNodeReimageContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchNodeReimageOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchNodeReimageContent FromResponse(Response response) + internal static BatchNodeReimageOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchNodeReimageContent(document.RootElement); + return DeserializeBatchNodeReimageOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageOptions.cs similarity index 91% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageOptions.cs index 8222990b7344..479f6b64b7d4 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeReimageOptions.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Parameters for reimaging an Azure Batch Compute Node. - public partial class BatchNodeReimageContent + internal partial class BatchNodeReimageOptions { /// /// Keeps track of any properties unknown to the library. @@ -45,15 +45,15 @@ public partial class BatchNodeReimageContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - public BatchNodeReimageContent() + /// Initializes a new instance of . + public BatchNodeReimageOptions() { } - /// Initializes a new instance of . + /// Initializes a new instance of . /// When to reimage the Compute Node and what to do with currently running Tasks. The default value is requeue. /// Keeps track of any properties unknown to the library. - internal BatchNodeReimageContent(BatchNodeReimageOption? nodeReimageOption, IDictionary serializedAdditionalRawData) + internal BatchNodeReimageOptions(BatchNodeReimageOption? nodeReimageOption, IDictionary serializedAdditionalRawData) { NodeReimageOption = nodeReimageOption; _serializedAdditionalRawData = serializedAdditionalRawData; diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.Serialization.cs index 0d1419261cec..607941db187a 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.Serialization.cs @@ -8,6 +8,7 @@ using System; using System.ClientModel.Primitives; using System.Collections.Generic; +using System.Net; using System.Text.Json; using Azure.Core; @@ -35,7 +36,7 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } writer.WritePropertyName("remoteLoginIPAddress"u8); - writer.WriteStringValue(RemoteLoginIpAddress); + writer.WriteStringValue(RemoteLoginIpAddress.ToString()); writer.WritePropertyName("remoteLoginPort"u8); writer.WriteNumberValue(RemoteLoginPort); if (options.Format != "W" && _serializedAdditionalRawData != null) @@ -75,7 +76,7 @@ internal static BatchNodeRemoteLoginSettings DeserializeBatchNodeRemoteLoginSett { return null; } - string remoteLoginIPAddress = default; + IPAddress remoteLoginIPAddress = default; int remoteLoginPort = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -83,7 +84,7 @@ internal static BatchNodeRemoteLoginSettings DeserializeBatchNodeRemoteLoginSett { if (property.NameEquals("remoteLoginIPAddress"u8)) { - remoteLoginIPAddress = property.Value.GetString(); + remoteLoginIPAddress = IPAddress.Parse(property.Value.GetString()); continue; } if (property.NameEquals("remoteLoginPort"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.cs index 3b66830690ae..d76d4c1ac8b9 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoteLoginSettings.cs @@ -7,6 +7,7 @@ using System; using System.Collections.Generic; +using System.Net; namespace Azure.Compute.Batch { @@ -49,7 +50,7 @@ public partial class BatchNodeRemoteLoginSettings /// The IP address used for remote login to the Compute Node. /// The port used for remote login to the Compute Node. /// is null. - internal BatchNodeRemoteLoginSettings(string remoteLoginIpAddress, int remoteLoginPort) + internal BatchNodeRemoteLoginSettings(IPAddress remoteLoginIpAddress, int remoteLoginPort) { Argument.AssertNotNull(remoteLoginIpAddress, nameof(remoteLoginIpAddress)); @@ -61,7 +62,7 @@ internal BatchNodeRemoteLoginSettings(string remoteLoginIpAddress, int remoteLog /// The IP address used for remote login to the Compute Node. /// The port used for remote login to the Compute Node. /// Keeps track of any properties unknown to the library. - internal BatchNodeRemoteLoginSettings(string remoteLoginIpAddress, int remoteLoginPort, IDictionary serializedAdditionalRawData) + internal BatchNodeRemoteLoginSettings(IPAddress remoteLoginIpAddress, int remoteLoginPort, IDictionary serializedAdditionalRawData) { RemoteLoginIpAddress = remoteLoginIpAddress; RemoteLoginPort = remoteLoginPort; @@ -74,7 +75,7 @@ internal BatchNodeRemoteLoginSettings() } /// The IP address used for remote login to the Compute Node. - public string RemoteLoginIpAddress { get; } + public IPAddress RemoteLoginIpAddress { get; } /// The port used for remote login to the Compute Node. public int RemoteLoginPort { get; } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveOptions.Serialization.cs similarity index 80% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveOptions.Serialization.cs index 97e6a6bd5b02..04224a0b9010 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchNodeRemoveContent : IUtf8JsonSerializable, IJsonModel + internal partial class BatchNodeRemoveOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,15 +28,15 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReader /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchNodeRemoveContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchNodeRemoveOptions)} does not support writing '{format}' format."); } writer.WritePropertyName("nodeList"u8); writer.WriteStartArray(); - foreach (var item in NodeList) + foreach (var item in NodeIds) { writer.WriteStringValue(item); } @@ -68,19 +68,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchNodeRemoveContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchNodeRemoveOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchNodeRemoveContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchNodeRemoveOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchNodeRemoveContent(document.RootElement, options); + return DeserializeBatchNodeRemoveOptions(document.RootElement, options); } - internal static BatchNodeRemoveContent DeserializeBatchNodeRemoveContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchNodeRemoveOptions DeserializeBatchNodeRemoveOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -129,46 +129,46 @@ internal static BatchNodeRemoveContent DeserializeBatchNodeRemoveContent(JsonEle } } serializedAdditionalRawData = rawDataDictionary; - return new BatchNodeRemoveContent(nodeList, resizeTimeout, nodeDeallocationOption, serializedAdditionalRawData); + return new BatchNodeRemoveOptions(nodeList, resizeTimeout, nodeDeallocationOption, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchNodeRemoveContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchNodeRemoveOptions)} does not support writing '{options.Format}' format."); } } - BatchNodeRemoveContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchNodeRemoveOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchNodeRemoveContent(document.RootElement, options); + return DeserializeBatchNodeRemoveOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchNodeRemoveContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchNodeRemoveOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchNodeRemoveContent FromResponse(Response response) + internal static BatchNodeRemoveOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchNodeRemoveContent(document.RootElement); + return DeserializeBatchNodeRemoveOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveOptions.cs similarity index 77% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveOptions.cs index ba28ad62da22..f77c6e4a2766 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeRemoveOptions.cs @@ -12,7 +12,7 @@ namespace Azure.Compute.Batch { /// Parameters for removing nodes from an Azure Batch Pool. - public partial class BatchNodeRemoveContent + internal partial class BatchNodeRemoveOptions { /// /// Keeps track of any properties unknown to the library. @@ -46,36 +46,36 @@ public partial class BatchNodeRemoveContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - /// A list containing the IDs of the Compute Nodes to be removed from the specified Pool. A maximum of 100 nodes may be removed per request. - /// is null. - public BatchNodeRemoveContent(IEnumerable nodeList) + /// Initializes a new instance of . + /// A list containing the IDs of the Compute Nodes to be removed from the specified Pool. A maximum of 100 nodes may be removed per request. + /// is null. + public BatchNodeRemoveOptions(IEnumerable nodeIds) { - Argument.AssertNotNull(nodeList, nameof(nodeList)); + Argument.AssertNotNull(nodeIds, nameof(nodeIds)); - NodeList = nodeList.ToList(); + NodeIds = nodeIds.ToList(); } - /// Initializes a new instance of . - /// A list containing the IDs of the Compute Nodes to be removed from the specified Pool. A maximum of 100 nodes may be removed per request. + /// Initializes a new instance of . + /// A list containing the IDs of the Compute Nodes to be removed from the specified Pool. A maximum of 100 nodes may be removed per request. /// The timeout for removal of Compute Nodes to the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). /// Determines what to do with a Compute Node and its running task(s) after it has been selected for deallocation. The default value is requeue. /// Keeps track of any properties unknown to the library. - internal BatchNodeRemoveContent(IList nodeList, TimeSpan? resizeTimeout, BatchNodeDeallocationOption? nodeDeallocationOption, IDictionary serializedAdditionalRawData) + internal BatchNodeRemoveOptions(IList nodeIds, TimeSpan? resizeTimeout, BatchNodeDeallocationOption? nodeDeallocationOption, IDictionary serializedAdditionalRawData) { - NodeList = nodeList; + NodeIds = nodeIds; ResizeTimeout = resizeTimeout; NodeDeallocationOption = nodeDeallocationOption; _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal BatchNodeRemoveContent() + /// Initializes a new instance of for deserialization. + internal BatchNodeRemoveOptions() { } /// A list containing the IDs of the Compute Nodes to be removed from the specified Pool. A maximum of 100 nodes may be removed per request. - public IList NodeList { get; } + public IList NodeIds { get; } /// The timeout for removal of Compute Nodes to the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). public TimeSpan? ResizeTimeout { get; set; } /// Determines what to do with a Compute Node and its running task(s) after it has been selected for deallocation. The default value is requeue. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateOptions.Serialization.cs similarity index 81% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateOptions.Serialization.cs index b672043c0e47..01b7ebac0761 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchNodeUserCreateContent : IUtf8JsonSerializable, IJsonModel + public partial class BatchNodeUserCreateOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelRe /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchNodeUserCreateContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchNodeUserCreateOptions)} does not support writing '{format}' format."); } writer.WritePropertyName("name"u8); @@ -73,19 +73,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchNodeUserCreateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchNodeUserCreateOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchNodeUserCreateContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchNodeUserCreateOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchNodeUserCreateContent(document.RootElement, options); + return DeserializeBatchNodeUserCreateOptions(document.RootElement, options); } - internal static BatchNodeUserCreateContent DeserializeBatchNodeUserCreateContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchNodeUserCreateOptions DeserializeBatchNodeUserCreateOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -141,7 +141,7 @@ internal static BatchNodeUserCreateContent DeserializeBatchNodeUserCreateContent } } serializedAdditionalRawData = rawDataDictionary; - return new BatchNodeUserCreateContent( + return new BatchNodeUserCreateOptions( name, isAdmin, expiryTime, @@ -150,43 +150,43 @@ internal static BatchNodeUserCreateContent DeserializeBatchNodeUserCreateContent serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchNodeUserCreateContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchNodeUserCreateOptions)} does not support writing '{options.Format}' format."); } } - BatchNodeUserCreateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchNodeUserCreateOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchNodeUserCreateContent(document.RootElement, options); + return DeserializeBatchNodeUserCreateOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchNodeUserCreateContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchNodeUserCreateOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchNodeUserCreateContent FromResponse(Response response) + internal static BatchNodeUserCreateOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchNodeUserCreateContent(document.RootElement); + return DeserializeBatchNodeUserCreateOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateOptions.cs similarity index 93% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateOptions.cs index e3d6007ed5fd..7b389104a5be 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserCreateOptions.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Parameters for creating a user account for RDP or SSH access on an Azure Batch Compute Node. - public partial class BatchNodeUserCreateContent + public partial class BatchNodeUserCreateOptions { /// /// Keeps track of any properties unknown to the library. @@ -45,24 +45,24 @@ public partial class BatchNodeUserCreateContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// The user name of the Account. /// is null. - public BatchNodeUserCreateContent(string name) + public BatchNodeUserCreateOptions(string name) { Argument.AssertNotNull(name, nameof(name)); Name = name; } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The user name of the Account. /// Whether the Account should be an administrator on the Compute Node. The default value is false. /// The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. /// The password of the Account. The password is required for Windows Compute Nodes. For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. /// The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). /// Keeps track of any properties unknown to the library. - internal BatchNodeUserCreateContent(string name, bool? isAdmin, DateTimeOffset? expiryTime, string password, string sshPublicKey, IDictionary serializedAdditionalRawData) + internal BatchNodeUserCreateOptions(string name, bool? isAdmin, DateTimeOffset? expiryTime, string password, string sshPublicKey, IDictionary serializedAdditionalRawData) { Name = name; IsAdmin = isAdmin; @@ -72,8 +72,8 @@ internal BatchNodeUserCreateContent(string name, bool? isAdmin, DateTimeOffset? _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal BatchNodeUserCreateContent() + /// Initializes a new instance of for deserialization. + internal BatchNodeUserCreateOptions() { } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateOptions.Serialization.cs similarity index 78% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateOptions.Serialization.cs index 48333a758f11..6416efd3061f 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchNodeUserUpdateContent : IUtf8JsonSerializable, IJsonModel + public partial class BatchNodeUserUpdateOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelRe /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchNodeUserUpdateContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchNodeUserUpdateOptions)} does not support writing '{format}' format."); } if (Optional.IsDefined(Password)) @@ -66,19 +66,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchNodeUserUpdateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchNodeUserUpdateOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchNodeUserUpdateContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchNodeUserUpdateOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchNodeUserUpdateContent(document.RootElement, options); + return DeserializeBatchNodeUserUpdateOptions(document.RootElement, options); } - internal static BatchNodeUserUpdateContent DeserializeBatchNodeUserUpdateContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchNodeUserUpdateOptions DeserializeBatchNodeUserUpdateOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -118,46 +118,46 @@ internal static BatchNodeUserUpdateContent DeserializeBatchNodeUserUpdateContent } } serializedAdditionalRawData = rawDataDictionary; - return new BatchNodeUserUpdateContent(password, expiryTime, sshPublicKey, serializedAdditionalRawData); + return new BatchNodeUserUpdateOptions(password, expiryTime, sshPublicKey, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchNodeUserUpdateContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchNodeUserUpdateOptions)} does not support writing '{options.Format}' format."); } } - BatchNodeUserUpdateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchNodeUserUpdateOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchNodeUserUpdateContent(document.RootElement, options); + return DeserializeBatchNodeUserUpdateOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchNodeUserUpdateContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchNodeUserUpdateOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchNodeUserUpdateContent FromResponse(Response response) + internal static BatchNodeUserUpdateOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchNodeUserUpdateContent(document.RootElement); + return DeserializeBatchNodeUserUpdateOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateOptions.cs similarity index 94% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateOptions.cs index 9741f08b6208..7902ea43a36f 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchNodeUserUpdateOptions.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Parameters for updating a user account for RDP or SSH access on an Azure Batch Compute Node. - public partial class BatchNodeUserUpdateContent + public partial class BatchNodeUserUpdateOptions { /// /// Keeps track of any properties unknown to the library. @@ -45,17 +45,17 @@ public partial class BatchNodeUserUpdateContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - public BatchNodeUserUpdateContent() + /// Initializes a new instance of . + public BatchNodeUserUpdateOptions() { } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The password of the Account. The password is required for Windows Compute Nodes. For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. If omitted, any existing password is removed. /// The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. /// The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If omitted, any existing SSH public key is removed. /// Keeps track of any properties unknown to the library. - internal BatchNodeUserUpdateContent(string password, DateTimeOffset? expiryTime, string sshPublicKey, IDictionary serializedAdditionalRawData) + internal BatchNodeUserUpdateOptions(string password, DateTimeOffset? expiryTime, string sshPublicKey, IDictionary serializedAdditionalRawData) { Password = password; ExpiryTime = expiryTime; diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OSDisk.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchOsDisk.Serialization.cs similarity index 77% rename from sdk/batch/Azure.Compute.Batch/src/Generated/OSDisk.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchOsDisk.Serialization.cs index 48698804cb8f..d67c1958dd35 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/OSDisk.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchOsDisk.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class OSDisk : IUtf8JsonSerializable, IJsonModel + public partial class BatchOsDisk : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions op /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(OSDisk)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchOsDisk)} does not support writing '{format}' format."); } if (Optional.IsDefined(EphemeralOSDiskSettings)) @@ -76,19 +76,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - OSDisk IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchOsDisk IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(OSDisk)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchOsDisk)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeOSDisk(document.RootElement, options); + return DeserializeBatchOsDisk(document.RootElement, options); } - internal static OSDisk DeserializeOSDisk(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchOsDisk DeserializeBatchOsDisk(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -96,7 +96,7 @@ internal static OSDisk DeserializeOSDisk(JsonElement element, ModelReaderWriterO { return null; } - DiffDiskSettings ephemeralOSDiskSettings = default; + BatchDiffDiskSettings ephemeralOSDiskSettings = default; CachingType? caching = default; int? diskSizeGB = default; ManagedDisk managedDisk = default; @@ -111,7 +111,7 @@ internal static OSDisk DeserializeOSDisk(JsonElement element, ModelReaderWriterO { continue; } - ephemeralOSDiskSettings = DiffDiskSettings.DeserializeDiffDiskSettings(property.Value, options); + ephemeralOSDiskSettings = BatchDiffDiskSettings.DeserializeBatchDiffDiskSettings(property.Value, options); continue; } if (property.NameEquals("caching"u8)) @@ -156,7 +156,7 @@ internal static OSDisk DeserializeOSDisk(JsonElement element, ModelReaderWriterO } } serializedAdditionalRawData = rawDataDictionary; - return new OSDisk( + return new BatchOsDisk( ephemeralOSDiskSettings, caching, diskSizeGB, @@ -165,43 +165,43 @@ internal static OSDisk DeserializeOSDisk(JsonElement element, ModelReaderWriterO serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(OSDisk)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchOsDisk)} does not support writing '{options.Format}' format."); } } - OSDisk IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchOsDisk IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeOSDisk(document.RootElement, options); + return DeserializeBatchOsDisk(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(OSDisk)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchOsDisk)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static OSDisk FromResponse(Response response) + internal static BatchOsDisk FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeOSDisk(document.RootElement); + return DeserializeBatchOsDisk(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OSDisk.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchOsDisk.cs similarity index 86% rename from sdk/batch/Azure.Compute.Batch/src/Generated/OSDisk.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchOsDisk.cs index 2c11fc67cba9..3a08b8c355a5 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/OSDisk.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchOsDisk.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Settings for the operating system disk of the compute node (VM). - public partial class OSDisk + public partial class BatchOsDisk { /// /// Keeps track of any properties unknown to the library. @@ -45,19 +45,19 @@ public partial class OSDisk /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - public OSDisk() + /// Initializes a new instance of . + public BatchOsDisk() { } - /// Initializes a new instance of . + /// Initializes a new instance of . /// Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). /// Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage. /// The initial disk size in GB when creating new OS disk. /// The managed disk parameters. /// Specifies whether writeAccelerator should be enabled or disabled on the disk. /// Keeps track of any properties unknown to the library. - internal OSDisk(DiffDiskSettings ephemeralOSDiskSettings, CachingType? caching, int? diskSizeGB, ManagedDisk managedDisk, bool? writeAcceleratorEnabled, IDictionary serializedAdditionalRawData) + internal BatchOsDisk(BatchDiffDiskSettings ephemeralOSDiskSettings, CachingType? caching, int? diskSizeGB, ManagedDisk managedDisk, bool? writeAcceleratorEnabled, IDictionary serializedAdditionalRawData) { EphemeralOSDiskSettings = ephemeralOSDiskSettings; Caching = caching; @@ -68,7 +68,7 @@ internal OSDisk(DiffDiskSettings ephemeralOSDiskSettings, CachingType? caching, } /// Specifies the ephemeral Disk Settings for the operating system disk used by the compute node (VM). - public DiffDiskSettings EphemeralOSDiskSettings { get; set; } + public BatchDiffDiskSettings EphemeralOSDiskSettings { get; set; } /// Specifies the caching requirements. Possible values are: None, ReadOnly, ReadWrite. The default values are: None for Standard storage. ReadOnly for Premium storage. public CachingType? Caching { get; set; } /// The initial disk size in GB when creating new OS disk. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.Serialization.cs index 04b663c3aeee..888a8ffa9b57 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.Serialization.cs @@ -44,15 +44,15 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("displayName"u8); writer.WriteStringValue(DisplayName); } - if (options.Format != "W" && Optional.IsDefined(Url)) + if (options.Format != "W" && Optional.IsDefined(Uri)) { writer.WritePropertyName("url"u8); - writer.WriteStringValue(Url); + writer.WriteStringValue(Uri.AbsoluteUri); } if (options.Format != "W" && Optional.IsDefined(ETag)) { writer.WritePropertyName("eTag"u8); - writer.WriteStringValue(ETag); + writer.WriteStringValue(ETag.Value.ToString()); } if (options.Format != "W" && Optional.IsDefined(LastModified)) { @@ -225,10 +225,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } writer.WriteEndArray(); } - if (options.Format != "W" && Optional.IsDefined(Stats)) + if (options.Format != "W" && Optional.IsDefined(PoolStatistics)) { writer.WritePropertyName("stats"u8); - writer.WriteObjectValue(Stats, options); + writer.WriteObjectValue(PoolStatistics, options); } if (options.Format != "W" && Optional.IsCollectionDefined(MountConfiguration)) { @@ -299,8 +299,8 @@ internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderW } string id = default; string displayName = default; - string url = default; - string eTag = default; + Uri url = default; + ETag? eTag = default; DateTimeOffset? lastModified = default; DateTimeOffset? creationTime = default; BatchPoolState? state = default; @@ -328,7 +328,7 @@ internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderW int? taskSlotsPerNode = default; BatchTaskSchedulingPolicy taskSchedulingPolicy = default; IReadOnlyList userAccounts = default; - IReadOnlyList metadata = default; + IReadOnlyList metadata = default; BatchPoolStatistics stats = default; IReadOnlyList mountConfiguration = default; BatchPoolIdentity identity = default; @@ -351,12 +351,20 @@ internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderW } if (property.NameEquals("url"u8)) { - url = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + url = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("eTag"u8)) { - eTag = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + eTag = new ETag(property.Value.GetString()); continue; } if (property.NameEquals("lastModified"u8)) @@ -625,10 +633,10 @@ internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderW { continue; } - List array = new List(); + List array = new List(); foreach (var item in property.Value.EnumerateArray()) { - array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + array.Add(BatchMetadataItem.DeserializeBatchMetadataItem(item, options)); } metadata = array; continue; @@ -730,7 +738,7 @@ internal static BatchPool DeserializeBatchPool(JsonElement element, ModelReaderW taskSlotsPerNode, taskSchedulingPolicy, userAccounts ?? new ChangeTrackingList(), - metadata ?? new ChangeTrackingList(), + metadata ?? new ChangeTrackingList(), stats, mountConfiguration ?? new ChangeTrackingList(), identity, diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.cs index db28d05648db..7318a38b3688 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPool.cs @@ -53,14 +53,14 @@ internal BatchPool() CertificateReferences = new ChangeTrackingList(); ApplicationPackageReferences = new ChangeTrackingList(); UserAccounts = new ChangeTrackingList(); - Metadata = new ChangeTrackingList(); + Metadata = new ChangeTrackingList(); MountConfiguration = new ChangeTrackingList(); } /// Initializes a new instance of . /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. - /// The URL of the Pool. + /// The URL of the Pool. /// The ETag of the Pool. This is an opaque string. You can use it to detect whether the Pool has changed between requests. In particular, you can be pass the ETag when updating a Pool to specify that your changes should take effect only if nobody else has modified the Pool in the meantime. /// The last modified time of the Pool. This is the last time at which the Pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level changes such as a Compute Node changing state. /// The creation time of the Pool. @@ -95,18 +95,18 @@ internal BatchPool() /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. /// The list of user Accounts to be created on each Compute Node in the Pool. /// A list of name-value pairs associated with the Pool as metadata. - /// Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the BatchPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the BatchPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. /// A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. /// The desired node communication mode for the pool. If omitted, the default value is Default. /// The current state of the pool communication mode. /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. /// Keeps track of any properties unknown to the library. - internal BatchPool(string id, string displayName, string url, string eTag, DateTimeOffset? lastModified, DateTimeOffset? creationTime, BatchPoolState? state, DateTimeOffset? stateTransitionTime, AllocationState? allocationState, DateTimeOffset? allocationStateTransitionTime, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, TimeSpan? resizeTimeout, IReadOnlyList resizeErrors, IReadOnlyDictionary resourceTags, int? currentDedicatedNodes, int? currentLowPriorityNodes, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, AutoScaleRun autoScaleRun, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IReadOnlyList certificateReferences, IReadOnlyList applicationPackageReferences, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, IReadOnlyList userAccounts, IReadOnlyList metadata, BatchPoolStatistics stats, IReadOnlyList mountConfiguration, BatchPoolIdentity identity, BatchNodeCommunicationMode? targetNodeCommunicationMode, BatchNodeCommunicationMode? currentNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) + internal BatchPool(string id, string displayName, Uri uri, ETag? eTag, DateTimeOffset? lastModified, DateTimeOffset? creationTime, BatchPoolState? state, DateTimeOffset? stateTransitionTime, AllocationState? allocationState, DateTimeOffset? allocationStateTransitionTime, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, TimeSpan? resizeTimeout, IReadOnlyList resizeErrors, IReadOnlyDictionary resourceTags, int? currentDedicatedNodes, int? currentLowPriorityNodes, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, AutoScaleRun autoScaleRun, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IReadOnlyList certificateReferences, IReadOnlyList applicationPackageReferences, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, IReadOnlyList userAccounts, IReadOnlyList metadata, BatchPoolStatistics poolStatistics, IReadOnlyList mountConfiguration, BatchPoolIdentity identity, BatchNodeCommunicationMode? targetNodeCommunicationMode, BatchNodeCommunicationMode? currentNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) { Id = id; DisplayName = displayName; - Url = url; + Uri = uri; ETag = eTag; LastModified = lastModified; CreationTime = creationTime; @@ -136,7 +136,7 @@ internal BatchPool(string id, string displayName, string url, string eTag, DateT TaskSchedulingPolicy = taskSchedulingPolicy; UserAccounts = userAccounts; Metadata = metadata; - Stats = stats; + PoolStatistics = poolStatistics; MountConfiguration = mountConfiguration; Identity = identity; TargetNodeCommunicationMode = targetNodeCommunicationMode; @@ -150,9 +150,9 @@ internal BatchPool(string id, string displayName, string url, string eTag, DateT /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. public string DisplayName { get; } /// The URL of the Pool. - public string Url { get; } + public Uri Uri { get; } /// The ETag of the Pool. This is an opaque string. You can use it to detect whether the Pool has changed between requests. In particular, you can be pass the ETag when updating a Pool to specify that your changes should take effect only if nobody else has modified the Pool in the meantime. - public string ETag { get; } + public ETag? ETag { get; } /// The last modified time of the Pool. This is the last time at which the Pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level changes such as a Compute Node changing state. public DateTimeOffset? LastModified { get; } /// The creation time of the Pool. @@ -213,9 +213,9 @@ internal BatchPool(string id, string displayName, string url, string eTag, DateT /// The list of user Accounts to be created on each Compute Node in the Pool. public IReadOnlyList UserAccounts { get; } /// A list of name-value pairs associated with the Pool as metadata. - public IReadOnlyList Metadata { get; } + public IReadOnlyList Metadata { get; } /// Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the BatchPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. - public BatchPoolStatistics Stats { get; } + public BatchPoolStatistics PoolStatistics { get; } /// A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. public IReadOnlyList MountConfiguration { get; } /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEnableAutoScaleContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolAutoScaleEnableOptions.Serialization.cs similarity index 77% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEnableAutoScaleContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolAutoScaleEnableOptions.Serialization.cs index db9fc6e2ebfd..4d408806acec 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEnableAutoScaleContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolAutoScaleEnableOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchPoolEnableAutoScaleContent : IUtf8JsonSerializable, IJsonModel + public partial class BatchPoolAutoScaleEnableOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, Mo /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchPoolEnableAutoScaleContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchPoolAutoScaleEnableOptions)} does not support writing '{format}' format."); } if (Optional.IsDefined(AutoScaleFormula)) @@ -61,19 +61,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchPoolEnableAutoScaleContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchPoolAutoScaleEnableOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchPoolEnableAutoScaleContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchPoolAutoScaleEnableOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchPoolEnableAutoScaleContent(document.RootElement, options); + return DeserializeBatchPoolAutoScaleEnableOptions(document.RootElement, options); } - internal static BatchPoolEnableAutoScaleContent DeserializeBatchPoolEnableAutoScaleContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchPoolAutoScaleEnableOptions DeserializeBatchPoolAutoScaleEnableOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -107,46 +107,46 @@ internal static BatchPoolEnableAutoScaleContent DeserializeBatchPoolEnableAutoSc } } serializedAdditionalRawData = rawDataDictionary; - return new BatchPoolEnableAutoScaleContent(autoScaleFormula, autoScaleEvaluationInterval, serializedAdditionalRawData); + return new BatchPoolAutoScaleEnableOptions(autoScaleFormula, autoScaleEvaluationInterval, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchPoolEnableAutoScaleContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchPoolAutoScaleEnableOptions)} does not support writing '{options.Format}' format."); } } - BatchPoolEnableAutoScaleContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchPoolAutoScaleEnableOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchPoolEnableAutoScaleContent(document.RootElement, options); + return DeserializeBatchPoolAutoScaleEnableOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchPoolEnableAutoScaleContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchPoolAutoScaleEnableOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchPoolEnableAutoScaleContent FromResponse(Response response) + internal static BatchPoolAutoScaleEnableOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchPoolEnableAutoScaleContent(document.RootElement); + return DeserializeBatchPoolAutoScaleEnableOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEnableAutoScaleContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolAutoScaleEnableOptions.cs similarity index 95% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEnableAutoScaleContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolAutoScaleEnableOptions.cs index b3b15b6404d6..fa520467052f 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEnableAutoScaleContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolAutoScaleEnableOptions.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Parameters for enabling automatic scaling on an Azure Batch Pool. - public partial class BatchPoolEnableAutoScaleContent + public partial class BatchPoolAutoScaleEnableOptions { /// /// Keeps track of any properties unknown to the library. @@ -45,16 +45,16 @@ public partial class BatchPoolEnableAutoScaleContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - public BatchPoolEnableAutoScaleContent() + /// Initializes a new instance of . + public BatchPoolAutoScaleEnableOptions() { } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The formula for the desired number of Compute Nodes in the Pool. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. /// The time interval at which to automatically adjust the Pool size according to the autoscale formula. The default value is 15 minutes. The minimum and maximum value are 5 minutes and 168 hours respectively. If you specify a value less than 5 minutes or greater than 168 hours, the Batch service rejects the request with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). If you specify a new interval, then the existing autoscale evaluation schedule will be stopped and a new autoscale evaluation schedule will be started, with its starting time being the time when this request was issued. /// Keeps track of any properties unknown to the library. - internal BatchPoolEnableAutoScaleContent(string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, IDictionary serializedAdditionalRawData) + internal BatchPoolAutoScaleEnableOptions(string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, IDictionary serializedAdditionalRawData) { AutoScaleFormula = autoScaleFormula; AutoScaleEvaluationInterval = autoScaleEvaluationInterval; diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEvaluateAutoScaleContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolAutoScaleEvaluateOptions.Serialization.cs similarity index 73% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEvaluateAutoScaleContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolAutoScaleEvaluateOptions.Serialization.cs index 99793a9ee53b..514466a45893 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEvaluateAutoScaleContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolAutoScaleEvaluateOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchPoolEvaluateAutoScaleContent : IUtf8JsonSerializable, IJsonModel + public partial class BatchPoolAutoScaleEvaluateOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchPoolEvaluateAutoScaleContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchPoolAutoScaleEvaluateOptions)} does not support writing '{format}' format."); } writer.WritePropertyName("autoScaleFormula"u8); @@ -53,19 +53,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchPoolEvaluateAutoScaleContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchPoolAutoScaleEvaluateOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchPoolEvaluateAutoScaleContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchPoolAutoScaleEvaluateOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchPoolEvaluateAutoScaleContent(document.RootElement, options); + return DeserializeBatchPoolAutoScaleEvaluateOptions(document.RootElement, options); } - internal static BatchPoolEvaluateAutoScaleContent DeserializeBatchPoolEvaluateAutoScaleContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchPoolAutoScaleEvaluateOptions DeserializeBatchPoolAutoScaleEvaluateOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -89,46 +89,46 @@ internal static BatchPoolEvaluateAutoScaleContent DeserializeBatchPoolEvaluateAu } } serializedAdditionalRawData = rawDataDictionary; - return new BatchPoolEvaluateAutoScaleContent(autoScaleFormula, serializedAdditionalRawData); + return new BatchPoolAutoScaleEvaluateOptions(autoScaleFormula, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchPoolEvaluateAutoScaleContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchPoolAutoScaleEvaluateOptions)} does not support writing '{options.Format}' format."); } } - BatchPoolEvaluateAutoScaleContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchPoolAutoScaleEvaluateOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchPoolEvaluateAutoScaleContent(document.RootElement, options); + return DeserializeBatchPoolAutoScaleEvaluateOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchPoolEvaluateAutoScaleContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchPoolAutoScaleEvaluateOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchPoolEvaluateAutoScaleContent FromResponse(Response response) + internal static BatchPoolAutoScaleEvaluateOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchPoolEvaluateAutoScaleContent(document.RootElement); + return DeserializeBatchPoolAutoScaleEvaluateOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEvaluateAutoScaleContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolAutoScaleEvaluateOptions.cs similarity index 90% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEvaluateAutoScaleContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolAutoScaleEvaluateOptions.cs index 50097db45eac..3cc89c95b9a1 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEvaluateAutoScaleContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolAutoScaleEvaluateOptions.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Parameters for evaluating an automatic scaling formula on an Azure Batch Pool. - public partial class BatchPoolEvaluateAutoScaleContent + public partial class BatchPoolAutoScaleEvaluateOptions { /// /// Keeps track of any properties unknown to the library. @@ -45,27 +45,27 @@ public partial class BatchPoolEvaluateAutoScaleContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-automatic-scaling). /// is null. - public BatchPoolEvaluateAutoScaleContent(string autoScaleFormula) + public BatchPoolAutoScaleEvaluateOptions(string autoScaleFormula) { Argument.AssertNotNull(autoScaleFormula, nameof(autoScaleFormula)); AutoScaleFormula = autoScaleFormula; } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The formula for the desired number of Compute Nodes in the Pool. The formula is validated and its results calculated, but it is not applied to the Pool. To apply the formula to the Pool, 'Enable automatic scaling on a Pool'. For more information about specifying this formula, see Automatically scale Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-automatic-scaling). /// Keeps track of any properties unknown to the library. - internal BatchPoolEvaluateAutoScaleContent(string autoScaleFormula, IDictionary serializedAdditionalRawData) + internal BatchPoolAutoScaleEvaluateOptions(string autoScaleFormula, IDictionary serializedAdditionalRawData) { AutoScaleFormula = autoScaleFormula; _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal BatchPoolEvaluateAutoScaleContent() + /// Initializes a new instance of for deserialization. + internal BatchPoolAutoScaleEvaluateOptions() { } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateOptions.Serialization.cs similarity index 92% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateOptions.Serialization.cs index c935821f281d..cf08e0cf4e14 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchPoolCreateContent : IUtf8JsonSerializable, IJsonModel + public partial class BatchPoolCreateOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReader /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchPoolCreateContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchPoolCreateOptions)} does not support writing '{format}' format."); } writer.WritePropertyName("id"u8); @@ -191,19 +191,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchPoolCreateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchPoolCreateOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchPoolCreateContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchPoolCreateOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchPoolCreateContent(document.RootElement, options); + return DeserializeBatchPoolCreateOptions(document.RootElement, options); } - internal static BatchPoolCreateContent DeserializeBatchPoolCreateContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchPoolCreateOptions DeserializeBatchPoolCreateOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -230,7 +230,7 @@ internal static BatchPoolCreateContent DeserializeBatchPoolCreateContent(JsonEle int? taskSlotsPerNode = default; BatchTaskSchedulingPolicy taskSchedulingPolicy = default; IList userAccounts = default; - IList metadata = default; + IList metadata = default; IList mountConfiguration = default; BatchNodeCommunicationMode? targetNodeCommunicationMode = default; UpgradePolicy upgradePolicy = default; @@ -419,10 +419,10 @@ internal static BatchPoolCreateContent DeserializeBatchPoolCreateContent(JsonEle { continue; } - List array = new List(); + List array = new List(); foreach (var item in property.Value.EnumerateArray()) { - array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + array.Add(BatchMetadataItem.DeserializeBatchMetadataItem(item, options)); } metadata = array; continue; @@ -465,7 +465,7 @@ internal static BatchPoolCreateContent DeserializeBatchPoolCreateContent(JsonEle } } serializedAdditionalRawData = rawDataDictionary; - return new BatchPoolCreateContent( + return new BatchPoolCreateOptions( id, displayName, vmSize, @@ -485,50 +485,50 @@ internal static BatchPoolCreateContent DeserializeBatchPoolCreateContent(JsonEle taskSlotsPerNode, taskSchedulingPolicy, userAccounts ?? new ChangeTrackingList(), - metadata ?? new ChangeTrackingList(), + metadata ?? new ChangeTrackingList(), mountConfiguration ?? new ChangeTrackingList(), targetNodeCommunicationMode, upgradePolicy, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchPoolCreateContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchPoolCreateOptions)} does not support writing '{options.Format}' format."); } } - BatchPoolCreateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchPoolCreateOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchPoolCreateContent(document.RootElement, options); + return DeserializeBatchPoolCreateOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchPoolCreateContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchPoolCreateOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchPoolCreateContent FromResponse(Response response) + internal static BatchPoolCreateOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchPoolCreateContent(document.RootElement); + return DeserializeBatchPoolCreateOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateOptions.cs similarity index 96% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateOptions.cs index d8fcbc31786e..986899023db9 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolCreateOptions.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Parameters for creating an Azure Batch Pool. - public partial class BatchPoolCreateContent + public partial class BatchPoolCreateOptions { /// /// Keeps track of any properties unknown to the library. @@ -45,11 +45,11 @@ public partial class BatchPoolCreateContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two Pool IDs within an Account that differ only by case). /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). /// or is null. - public BatchPoolCreateContent(string id, string vmSize) + public BatchPoolCreateOptions(string id, string vmSize) { Argument.AssertNotNull(id, nameof(id)); Argument.AssertNotNull(vmSize, nameof(vmSize)); @@ -60,11 +60,11 @@ public BatchPoolCreateContent(string id, string vmSize) CertificateReferences = new ChangeTrackingList(); ApplicationPackageReferences = new ChangeTrackingList(); UserAccounts = new ChangeTrackingList(); - Metadata = new ChangeTrackingList(); + Metadata = new ChangeTrackingList(); MountConfiguration = new ChangeTrackingList(); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two Pool IDs within an Account that differ only by case). /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). @@ -94,7 +94,7 @@ public BatchPoolCreateContent(string id, string vmSize) /// The desired node communication mode for the pool. If omitted, the default value is Default. /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. /// Keeps track of any properties unknown to the library. - internal BatchPoolCreateContent(string id, string displayName, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, TimeSpan? resizeTimeout, IDictionary resourceTags, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IList certificateReferences, IList applicationPackageReferences, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, IList userAccounts, IList metadata, IList mountConfiguration, BatchNodeCommunicationMode? targetNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) + internal BatchPoolCreateOptions(string id, string displayName, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, TimeSpan? resizeTimeout, IDictionary resourceTags, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IList certificateReferences, IList applicationPackageReferences, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, IList userAccounts, IList metadata, IList mountConfiguration, BatchNodeCommunicationMode? targetNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) { Id = id; DisplayName = displayName; @@ -122,8 +122,8 @@ internal BatchPoolCreateContent(string id, string displayName, string vmSize, Vi _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal BatchPoolCreateContent() + /// Initializes a new instance of for deserialization. + internal BatchPoolCreateOptions() { } @@ -171,7 +171,7 @@ internal BatchPoolCreateContent() /// The list of user Accounts to be created on each Compute Node in the Pool. public IList UserAccounts { get; } /// A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - public IList Metadata { get; } + public IList Metadata { get; } /// Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. public IList MountConfiguration { get; } /// The desired node communication mode for the pool. If omitted, the default value is Default. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEndpointConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEndpointConfiguration.Serialization.cs index 789b6fcf1154..c2a7bb55e505 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEndpointConfiguration.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEndpointConfiguration.Serialization.cs @@ -78,17 +78,17 @@ internal static BatchPoolEndpointConfiguration DeserializeBatchPoolEndpointConfi { return null; } - IList inboundNATPools = default; + IList inboundNATPools = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) { if (property.NameEquals("inboundNATPools"u8)) { - List array = new List(); + List array = new List(); foreach (var item in property.Value.EnumerateArray()) { - array.Add(InboundNatPool.DeserializeInboundNatPool(item, options)); + array.Add(BatchInboundNatPool.DeserializeBatchInboundNatPool(item, options)); } inboundNATPools = array; continue; diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEndpointConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEndpointConfiguration.cs index 0c8173fbeed5..8d7a4fc10b9d 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEndpointConfiguration.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolEndpointConfiguration.cs @@ -49,7 +49,7 @@ public partial class BatchPoolEndpointConfiguration /// Initializes a new instance of . /// A list of inbound NAT Pools that can be used to address specific ports on an individual Compute Node externally. The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum number of inbound NAT Pools is exceeded the request fails with HTTP status code 400. This cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses. /// is null. - public BatchPoolEndpointConfiguration(IEnumerable inboundNatPools) + public BatchPoolEndpointConfiguration(IEnumerable inboundNatPools) { Argument.AssertNotNull(inboundNatPools, nameof(inboundNatPools)); @@ -59,7 +59,7 @@ public BatchPoolEndpointConfiguration(IEnumerable inboundNatPool /// Initializes a new instance of . /// A list of inbound NAT Pools that can be used to address specific ports on an individual Compute Node externally. The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum number of inbound NAT Pools is exceeded the request fails with HTTP status code 400. This cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses. /// Keeps track of any properties unknown to the library. - internal BatchPoolEndpointConfiguration(IList inboundNatPools, IDictionary serializedAdditionalRawData) + internal BatchPoolEndpointConfiguration(IList inboundNatPools, IDictionary serializedAdditionalRawData) { InboundNatPools = inboundNatPools; _serializedAdditionalRawData = serializedAdditionalRawData; @@ -71,6 +71,6 @@ internal BatchPoolEndpointConfiguration() } /// A list of inbound NAT Pools that can be used to address specific ports on an individual Compute Node externally. The maximum number of inbound NAT Pools per Batch Pool is 5. If the maximum number of inbound NAT Pools is exceeded the request fails with HTTP status code 400. This cannot be specified if the IPAddressProvisioningType is NoPublicIPAddresses. - public IList InboundNatPools { get; } + public IList InboundNatPools { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentity.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentity.Serialization.cs index 0cc92fbff27f..9606ec215f9f 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentity.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentity.Serialization.cs @@ -84,7 +84,7 @@ internal static BatchPoolIdentity DeserializeBatchPoolIdentity(JsonElement eleme return null; } BatchPoolIdentityType type = default; - IReadOnlyList userAssignedIdentities = default; + IReadOnlyList userAssignedIdentities = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -100,10 +100,10 @@ internal static BatchPoolIdentity DeserializeBatchPoolIdentity(JsonElement eleme { continue; } - List array = new List(); + List array = new List(); foreach (var item in property.Value.EnumerateArray()) { - array.Add(UserAssignedIdentity.DeserializeUserAssignedIdentity(item, options)); + array.Add(BatchUserAssignedIdentity.DeserializeBatchUserAssignedIdentity(item, options)); } userAssignedIdentities = array; continue; @@ -114,7 +114,7 @@ internal static BatchPoolIdentity DeserializeBatchPoolIdentity(JsonElement eleme } } serializedAdditionalRawData = rawDataDictionary; - return new BatchPoolIdentity(type, userAssignedIdentities ?? new ChangeTrackingList(), serializedAdditionalRawData); + return new BatchPoolIdentity(type, userAssignedIdentities ?? new ChangeTrackingList(), serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentity.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentity.cs index 4cf680eb1c40..baf6b374763c 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentity.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolIdentity.cs @@ -50,14 +50,14 @@ public partial class BatchPoolIdentity internal BatchPoolIdentity(BatchPoolIdentityType type) { Type = type; - UserAssignedIdentities = new ChangeTrackingList(); + UserAssignedIdentities = new ChangeTrackingList(); } /// Initializes a new instance of . /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. /// The list of user identities associated with the Batch account. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. /// Keeps track of any properties unknown to the library. - internal BatchPoolIdentity(BatchPoolIdentityType type, IReadOnlyList userAssignedIdentities, IDictionary serializedAdditionalRawData) + internal BatchPoolIdentity(BatchPoolIdentityType type, IReadOnlyList userAssignedIdentities, IDictionary serializedAdditionalRawData) { Type = type; UserAssignedIdentities = userAssignedIdentities; @@ -72,6 +72,6 @@ internal BatchPoolIdentity() /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. public BatchPoolIdentityType Type { get; } /// The list of user identities associated with the Batch account. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. - public IReadOnlyList UserAssignedIdentities { get; } + public IReadOnlyList UserAssignedIdentities { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceOptions.Serialization.cs similarity index 81% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceOptions.Serialization.cs index a2590cbda43c..147e49e15bb9 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchPoolReplaceContent : IUtf8JsonSerializable, IJsonModel + public partial class BatchPoolReplaceOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReade /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchPoolReplaceContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchPoolReplaceOptions)} does not support writing '{format}' format."); } if (Optional.IsDefined(StartTask)) @@ -82,19 +82,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchPoolReplaceContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchPoolReplaceOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchPoolReplaceContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchPoolReplaceOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchPoolReplaceContent(document.RootElement, options); + return DeserializeBatchPoolReplaceOptions(document.RootElement, options); } - internal static BatchPoolReplaceContent DeserializeBatchPoolReplaceContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchPoolReplaceOptions DeserializeBatchPoolReplaceOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -105,7 +105,7 @@ internal static BatchPoolReplaceContent DeserializeBatchPoolReplaceContent(JsonE BatchStartTask startTask = default; IList certificateReferences = default; IList applicationPackageReferences = default; - IList metadata = default; + IList metadata = default; BatchNodeCommunicationMode? targetNodeCommunicationMode = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -142,10 +142,10 @@ internal static BatchPoolReplaceContent DeserializeBatchPoolReplaceContent(JsonE } if (property.NameEquals("metadata"u8)) { - List array = new List(); + List array = new List(); foreach (var item in property.Value.EnumerateArray()) { - array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + array.Add(BatchMetadataItem.DeserializeBatchMetadataItem(item, options)); } metadata = array; continue; @@ -165,7 +165,7 @@ internal static BatchPoolReplaceContent DeserializeBatchPoolReplaceContent(JsonE } } serializedAdditionalRawData = rawDataDictionary; - return new BatchPoolReplaceContent( + return new BatchPoolReplaceOptions( startTask, certificateReferences, applicationPackageReferences, @@ -174,43 +174,43 @@ internal static BatchPoolReplaceContent DeserializeBatchPoolReplaceContent(JsonE serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchPoolReplaceContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchPoolReplaceOptions)} does not support writing '{options.Format}' format."); } } - BatchPoolReplaceContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchPoolReplaceOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchPoolReplaceContent(document.RootElement, options); + return DeserializeBatchPoolReplaceOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchPoolReplaceContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchPoolReplaceOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchPoolReplaceContent FromResponse(Response response) + internal static BatchPoolReplaceOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchPoolReplaceContent(document.RootElement); + return DeserializeBatchPoolReplaceOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceOptions.cs similarity index 94% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceOptions.cs index 0db0b9f350b2..ebf7444dae60 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolReplaceOptions.cs @@ -12,7 +12,7 @@ namespace Azure.Compute.Batch { /// Parameters for replacing properties on an Azure Batch Pool. - public partial class BatchPoolReplaceContent + public partial class BatchPoolReplaceOptions { /// /// Keeps track of any properties unknown to the library. @@ -46,7 +46,7 @@ public partial class BatchPoolReplaceContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// /// This list replaces any existing Certificate references configured on the Pool. /// If you specify an empty collection, any existing Certificate references are removed from the Pool. @@ -58,7 +58,7 @@ public partial class BatchPoolReplaceContent /// The list of Application Packages to be installed on each Compute Node in the Pool. The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool. /// A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. /// , or is null. - public BatchPoolReplaceContent(IEnumerable certificateReferences, IEnumerable applicationPackageReferences, IEnumerable metadata) + public BatchPoolReplaceOptions(IEnumerable certificateReferences, IEnumerable applicationPackageReferences, IEnumerable metadata) { Argument.AssertNotNull(certificateReferences, nameof(certificateReferences)); Argument.AssertNotNull(applicationPackageReferences, nameof(applicationPackageReferences)); @@ -69,7 +69,7 @@ public BatchPoolReplaceContent(IEnumerable certificat Metadata = metadata.ToList(); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is removed from the Pool. /// /// This list replaces any existing Certificate references configured on the Pool. @@ -83,7 +83,7 @@ public BatchPoolReplaceContent(IEnumerable certificat /// A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. /// The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default. /// Keeps track of any properties unknown to the library. - internal BatchPoolReplaceContent(BatchStartTask startTask, IList certificateReferences, IList applicationPackageReferences, IList metadata, BatchNodeCommunicationMode? targetNodeCommunicationMode, IDictionary serializedAdditionalRawData) + internal BatchPoolReplaceOptions(BatchStartTask startTask, IList certificateReferences, IList applicationPackageReferences, IList metadata, BatchNodeCommunicationMode? targetNodeCommunicationMode, IDictionary serializedAdditionalRawData) { StartTask = startTask; CertificateReferences = certificateReferences; @@ -93,8 +93,8 @@ internal BatchPoolReplaceContent(BatchStartTask startTask, IList Initializes a new instance of for deserialization. - internal BatchPoolReplaceContent() + /// Initializes a new instance of for deserialization. + internal BatchPoolReplaceOptions() { } @@ -112,7 +112,7 @@ internal BatchPoolReplaceContent() /// The list of Application Packages to be installed on each Compute Node in the Pool. The list replaces any existing Application Package references on the Pool. Changes to Application Package references affect all new Compute Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. There is a maximum of 10 Application Package references on any given Pool. If omitted, or if you specify an empty collection, any existing Application Packages references are removed from the Pool. A maximum of 10 references may be specified on a given Pool. public IList ApplicationPackageReferences { get; } /// A list of name-value pairs associated with the Pool as metadata. This list replaces any existing metadata configured on the Pool. If omitted, or if you specify an empty collection, any existing metadata is removed from the Pool. - public IList Metadata { get; } + public IList Metadata { get; } /// The desired node communication mode for the pool. This setting replaces any existing targetNodeCommunication setting on the Pool. If omitted, the existing setting is default. public BatchNodeCommunicationMode? TargetNodeCommunicationMode { get; set; } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeOptions.Serialization.cs similarity index 82% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeOptions.Serialization.cs index 1e38ae657757..367c0f957b6e 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchPoolResizeContent : IUtf8JsonSerializable, IJsonModel + internal partial class BatchPoolResizeOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReader /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchPoolResizeContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchPoolResizeOptions)} does not support writing '{format}' format."); } if (Optional.IsDefined(TargetDedicatedNodes)) @@ -71,19 +71,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchPoolResizeContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchPoolResizeOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchPoolResizeContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchPoolResizeOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchPoolResizeContent(document.RootElement, options); + return DeserializeBatchPoolResizeOptions(document.RootElement, options); } - internal static BatchPoolResizeContent DeserializeBatchPoolResizeContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchPoolResizeOptions DeserializeBatchPoolResizeOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -141,46 +141,46 @@ internal static BatchPoolResizeContent DeserializeBatchPoolResizeContent(JsonEle } } serializedAdditionalRawData = rawDataDictionary; - return new BatchPoolResizeContent(targetDedicatedNodes, targetLowPriorityNodes, resizeTimeout, nodeDeallocationOption, serializedAdditionalRawData); + return new BatchPoolResizeOptions(targetDedicatedNodes, targetLowPriorityNodes, resizeTimeout, nodeDeallocationOption, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchPoolResizeContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchPoolResizeOptions)} does not support writing '{options.Format}' format."); } } - BatchPoolResizeContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchPoolResizeOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchPoolResizeContent(document.RootElement, options); + return DeserializeBatchPoolResizeOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchPoolResizeContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchPoolResizeOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchPoolResizeContent FromResponse(Response response) + internal static BatchPoolResizeOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchPoolResizeContent(document.RootElement); + return DeserializeBatchPoolResizeOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeOptions.cs similarity index 94% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeOptions.cs index 2a85fe641ba4..1d34cc7d64ea 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResizeOptions.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Parameters for changing the size of an Azure Batch Pool. - public partial class BatchPoolResizeContent + internal partial class BatchPoolResizeOptions { /// /// Keeps track of any properties unknown to the library. @@ -45,18 +45,18 @@ public partial class BatchPoolResizeContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - public BatchPoolResizeContent() + /// Initializes a new instance of . + public BatchPoolResizeOptions() { } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The desired number of dedicated Compute Nodes in the Pool. /// The desired number of Spot/Low-priority Compute Nodes in the Pool. /// The timeout for allocation of Nodes to the Pool or removal of Compute Nodes from the Pool. The default value is 15 minutes. The minimum value is 5 minutes. If you specify a value less than 5 minutes, the Batch service returns an error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). /// Determines what to do with a Compute Node and its running task(s) if the Pool size is decreasing. The default value is requeue. /// Keeps track of any properties unknown to the library. - internal BatchPoolResizeContent(int? targetDedicatedNodes, int? targetLowPriorityNodes, TimeSpan? resizeTimeout, BatchNodeDeallocationOption? nodeDeallocationOption, IDictionary serializedAdditionalRawData) + internal BatchPoolResizeOptions(int? targetDedicatedNodes, int? targetLowPriorityNodes, TimeSpan? resizeTimeout, BatchNodeDeallocationOption? nodeDeallocationOption, IDictionary serializedAdditionalRawData) { TargetDedicatedNodes = targetDedicatedNodes; TargetLowPriorityNodes = targetLowPriorityNodes; diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.Serialization.cs index d0ad5730f49c..b3e9aad75423 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.Serialization.cs @@ -49,9 +49,9 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("peakDiskGiB"u8); writer.WriteNumberValue(PeakDiskGiB); writer.WritePropertyName("diskReadIOps"u8); - writer.WriteStringValue(DiskReadIOps.ToString()); + writer.WriteStringValue(DiskReadIops.ToString()); writer.WritePropertyName("diskWriteIOps"u8); - writer.WriteStringValue(DiskWriteIOps.ToString()); + writer.WriteStringValue(DiskWriteIops.ToString()); writer.WritePropertyName("diskReadGiB"u8); writer.WriteNumberValue(DiskReadGiB); writer.WritePropertyName("diskWriteGiB"u8); diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.cs index 0fd446ceda9e..6b39d2d82cae 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolResourceStatistics.cs @@ -53,13 +53,13 @@ public partial class BatchPoolResourceStatistics /// The peak memory usage in GiB across all Compute Nodes in the Pool. /// The average used disk space in GiB across all Compute Nodes in the Pool. /// The peak used disk space in GiB across all Compute Nodes in the Pool. - /// The total number of disk read operations across all Compute Nodes in the Pool. - /// The total number of disk write operations across all Compute Nodes in the Pool. + /// The total number of disk read operations across all Compute Nodes in the Pool. + /// The total number of disk write operations across all Compute Nodes in the Pool. /// The total amount of data in GiB of disk reads across all Compute Nodes in the Pool. /// The total amount of data in GiB of disk writes across all Compute Nodes in the Pool. /// The total amount of data in GiB of network reads across all Compute Nodes in the Pool. /// The total amount of data in GiB of network writes across all Compute Nodes in the Pool. - internal BatchPoolResourceStatistics(DateTimeOffset startTime, DateTimeOffset lastUpdateTime, float avgCpuPercentage, float avgMemoryGiB, float peakMemoryGiB, float avgDiskGiB, float peakDiskGiB, long diskReadIOps, long diskWriteIOps, float diskReadGiB, float diskWriteGiB, float networkReadGiB, float networkWriteGiB) + internal BatchPoolResourceStatistics(DateTimeOffset startTime, DateTimeOffset lastUpdateTime, float avgCpuPercentage, float avgMemoryGiB, float peakMemoryGiB, float avgDiskGiB, float peakDiskGiB, long diskReadIops, long diskWriteIops, float diskReadGiB, float diskWriteGiB, float networkReadGiB, float networkWriteGiB) { StartTime = startTime; LastUpdateTime = lastUpdateTime; @@ -68,8 +68,8 @@ internal BatchPoolResourceStatistics(DateTimeOffset startTime, DateTimeOffset la PeakMemoryGiB = peakMemoryGiB; AvgDiskGiB = avgDiskGiB; PeakDiskGiB = peakDiskGiB; - DiskReadIOps = diskReadIOps; - DiskWriteIOps = diskWriteIOps; + DiskReadIops = diskReadIops; + DiskWriteIops = diskWriteIops; DiskReadGiB = diskReadGiB; DiskWriteGiB = diskWriteGiB; NetworkReadGiB = networkReadGiB; @@ -84,14 +84,14 @@ internal BatchPoolResourceStatistics(DateTimeOffset startTime, DateTimeOffset la /// The peak memory usage in GiB across all Compute Nodes in the Pool. /// The average used disk space in GiB across all Compute Nodes in the Pool. /// The peak used disk space in GiB across all Compute Nodes in the Pool. - /// The total number of disk read operations across all Compute Nodes in the Pool. - /// The total number of disk write operations across all Compute Nodes in the Pool. + /// The total number of disk read operations across all Compute Nodes in the Pool. + /// The total number of disk write operations across all Compute Nodes in the Pool. /// The total amount of data in GiB of disk reads across all Compute Nodes in the Pool. /// The total amount of data in GiB of disk writes across all Compute Nodes in the Pool. /// The total amount of data in GiB of network reads across all Compute Nodes in the Pool. /// The total amount of data in GiB of network writes across all Compute Nodes in the Pool. /// Keeps track of any properties unknown to the library. - internal BatchPoolResourceStatistics(DateTimeOffset startTime, DateTimeOffset lastUpdateTime, float avgCpuPercentage, float avgMemoryGiB, float peakMemoryGiB, float avgDiskGiB, float peakDiskGiB, long diskReadIOps, long diskWriteIOps, float diskReadGiB, float diskWriteGiB, float networkReadGiB, float networkWriteGiB, IDictionary serializedAdditionalRawData) + internal BatchPoolResourceStatistics(DateTimeOffset startTime, DateTimeOffset lastUpdateTime, float avgCpuPercentage, float avgMemoryGiB, float peakMemoryGiB, float avgDiskGiB, float peakDiskGiB, long diskReadIops, long diskWriteIops, float diskReadGiB, float diskWriteGiB, float networkReadGiB, float networkWriteGiB, IDictionary serializedAdditionalRawData) { StartTime = startTime; LastUpdateTime = lastUpdateTime; @@ -100,8 +100,8 @@ internal BatchPoolResourceStatistics(DateTimeOffset startTime, DateTimeOffset la PeakMemoryGiB = peakMemoryGiB; AvgDiskGiB = avgDiskGiB; PeakDiskGiB = peakDiskGiB; - DiskReadIOps = diskReadIOps; - DiskWriteIOps = diskWriteIOps; + DiskReadIops = diskReadIops; + DiskWriteIops = diskWriteIops; DiskReadGiB = diskReadGiB; DiskWriteGiB = diskWriteGiB; NetworkReadGiB = networkReadGiB; @@ -129,9 +129,9 @@ internal BatchPoolResourceStatistics() /// The peak used disk space in GiB across all Compute Nodes in the Pool. public float PeakDiskGiB { get; } /// The total number of disk read operations across all Compute Nodes in the Pool. - public long DiskReadIOps { get; } + public long DiskReadIops { get; } /// The total number of disk write operations across all Compute Nodes in the Pool. - public long DiskWriteIOps { get; } + public long DiskWriteIops { get; } /// The total amount of data in GiB of disk reads across all Compute Nodes in the Pool. public float DiskReadGiB { get; } /// The total amount of data in GiB of disk writes across all Compute Nodes in the Pool. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.Serialization.cs index a559cc944d96..ab70d15c32e5 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.Serialization.cs @@ -221,7 +221,7 @@ internal static BatchPoolSpecification DeserializeBatchPoolSpecification(JsonEle IList certificateReferences = default; IList applicationPackageReferences = default; IList userAccounts = default; - IList metadata = default; + IList metadata = default; IList mountConfiguration = default; BatchNodeCommunicationMode? targetNodeCommunicationMode = default; UpgradePolicy upgradePolicy = default; @@ -396,10 +396,10 @@ internal static BatchPoolSpecification DeserializeBatchPoolSpecification(JsonEle { continue; } - List array = new List(); + List array = new List(); foreach (var item in property.Value.EnumerateArray()) { - array.Add(MetadataItem.DeserializeMetadataItem(item, options)); + array.Add(BatchMetadataItem.DeserializeBatchMetadataItem(item, options)); } metadata = array; continue; @@ -461,7 +461,7 @@ internal static BatchPoolSpecification DeserializeBatchPoolSpecification(JsonEle certificateReferences ?? new ChangeTrackingList(), applicationPackageReferences ?? new ChangeTrackingList(), userAccounts ?? new ChangeTrackingList(), - metadata ?? new ChangeTrackingList(), + metadata ?? new ChangeTrackingList(), mountConfiguration ?? new ChangeTrackingList(), targetNodeCommunicationMode, upgradePolicy, diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.cs index 00d8012ecbf0..1872af5f9c49 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolSpecification.cs @@ -56,7 +56,7 @@ public BatchPoolSpecification(string vmSize) CertificateReferences = new ChangeTrackingList(); ApplicationPackageReferences = new ChangeTrackingList(); UserAccounts = new ChangeTrackingList(); - Metadata = new ChangeTrackingList(); + Metadata = new ChangeTrackingList(); MountConfiguration = new ChangeTrackingList(); } @@ -88,7 +88,7 @@ public BatchPoolSpecification(string vmSize) /// The desired node communication mode for the pool. If omitted, the default value is Default. /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. /// Keeps track of any properties unknown to the library. - internal BatchPoolSpecification(string displayName, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, TimeSpan? resizeTimeout, string resourceTags, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IList certificateReferences, IList applicationPackageReferences, IList userAccounts, IList metadata, IList mountConfiguration, BatchNodeCommunicationMode? targetNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) + internal BatchPoolSpecification(string displayName, string vmSize, VirtualMachineConfiguration virtualMachineConfiguration, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, TimeSpan? resizeTimeout, string resourceTags, int? targetDedicatedNodes, int? targetLowPriorityNodes, bool? enableAutoScale, string autoScaleFormula, TimeSpan? autoScaleEvaluationInterval, bool? enableInterNodeCommunication, NetworkConfiguration networkConfiguration, BatchStartTask startTask, IList certificateReferences, IList applicationPackageReferences, IList userAccounts, IList metadata, IList mountConfiguration, BatchNodeCommunicationMode? targetNodeCommunicationMode, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) { DisplayName = displayName; VmSize = vmSize; @@ -161,7 +161,7 @@ internal BatchPoolSpecification() /// The list of user Accounts to be created on each Compute Node in the Pool. public IList UserAccounts { get; } /// A list of name-value pairs associated with the Pool as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - public IList Metadata { get; } + public IList Metadata { get; } /// A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. public IList MountConfiguration { get; } /// The desired node communication mode for the pool. If omitted, the default value is Default. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolStatistics.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolStatistics.Serialization.cs index 504750993c53..7e6663c8db71 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolStatistics.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolStatistics.Serialization.cs @@ -35,20 +35,20 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } writer.WritePropertyName("url"u8); - writer.WriteStringValue(Url); + writer.WriteStringValue(Uri.AbsoluteUri); writer.WritePropertyName("startTime"u8); writer.WriteStringValue(StartTime, "O"); writer.WritePropertyName("lastUpdateTime"u8); writer.WriteStringValue(LastUpdateTime, "O"); - if (Optional.IsDefined(UsageStats)) + if (Optional.IsDefined(UsageStatistics)) { writer.WritePropertyName("usageStats"u8); - writer.WriteObjectValue(UsageStats, options); + writer.WriteObjectValue(UsageStatistics, options); } - if (Optional.IsDefined(ResourceStats)) + if (Optional.IsDefined(ResourceStatistics)) { writer.WritePropertyName("resourceStats"u8); - writer.WriteObjectValue(ResourceStats, options); + writer.WriteObjectValue(ResourceStatistics, options); } if (options.Format != "W" && _serializedAdditionalRawData != null) { @@ -87,7 +87,7 @@ internal static BatchPoolStatistics DeserializeBatchPoolStatistics(JsonElement e { return null; } - string url = default; + Uri url = default; DateTimeOffset startTime = default; DateTimeOffset lastUpdateTime = default; BatchPoolUsageStatistics usageStats = default; @@ -98,7 +98,7 @@ internal static BatchPoolStatistics DeserializeBatchPoolStatistics(JsonElement e { if (property.NameEquals("url"u8)) { - url = property.Value.GetString(); + url = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("startTime"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolStatistics.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolStatistics.cs index ca9bdfab38e3..e4a56c702095 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolStatistics.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolStatistics.cs @@ -46,33 +46,33 @@ public partial class BatchPoolStatistics private IDictionary _serializedAdditionalRawData; /// Initializes a new instance of . - /// The URL for the statistics. + /// The URL for the statistics. /// The start time of the time range covered by the statistics. /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. - /// is null. - internal BatchPoolStatistics(string url, DateTimeOffset startTime, DateTimeOffset lastUpdateTime) + /// is null. + internal BatchPoolStatistics(Uri uri, DateTimeOffset startTime, DateTimeOffset lastUpdateTime) { - Argument.AssertNotNull(url, nameof(url)); + Argument.AssertNotNull(uri, nameof(uri)); - Url = url; + Uri = uri; StartTime = startTime; LastUpdateTime = lastUpdateTime; } /// Initializes a new instance of . - /// The URL for the statistics. + /// The URL for the statistics. /// The start time of the time range covered by the statistics. /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. - /// Statistics related to Pool usage, such as the amount of core-time used. - /// Statistics related to resource consumption by Compute Nodes in the Pool. + /// Statistics related to Pool usage, such as the amount of core-time used. + /// Statistics related to resource consumption by Compute Nodes in the Pool. /// Keeps track of any properties unknown to the library. - internal BatchPoolStatistics(string url, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, BatchPoolUsageStatistics usageStats, BatchPoolResourceStatistics resourceStats, IDictionary serializedAdditionalRawData) + internal BatchPoolStatistics(Uri uri, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, BatchPoolUsageStatistics usageStatistics, BatchPoolResourceStatistics resourceStatistics, IDictionary serializedAdditionalRawData) { - Url = url; + Uri = uri; StartTime = startTime; LastUpdateTime = lastUpdateTime; - UsageStats = usageStats; - ResourceStats = resourceStats; + UsageStatistics = usageStatistics; + ResourceStatistics = resourceStatistics; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -82,14 +82,14 @@ internal BatchPoolStatistics() } /// The URL for the statistics. - public string Url { get; } + public Uri Uri { get; } /// The start time of the time range covered by the statistics. public DateTimeOffset StartTime { get; } /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. public DateTimeOffset LastUpdateTime { get; } /// Statistics related to Pool usage, such as the amount of core-time used. - public BatchPoolUsageStatistics UsageStats { get; } + public BatchPoolUsageStatistics UsageStatistics { get; } /// Statistics related to resource consumption by Compute Nodes in the Pool. - public BatchPoolResourceStatistics ResourceStats { get; } + public BatchPoolResourceStatistics ResourceStatistics { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.Serialization.cs deleted file mode 100644 index 3fe96dfafbc2..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.Serialization.cs +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.ClientModel.Primitives; -using System.Collections.Generic; -using System.Text.Json; -using Azure.Core; - -namespace Azure.Compute.Batch -{ - public partial class BatchPoolUpdateContent : IUtf8JsonSerializable, IJsonModel - { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - writer.WriteStartObject(); - JsonModelWriteCore(writer, options); - writer.WriteEndObject(); - } - - /// The JSON writer. - /// The client options for reading and writing models. - protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(BatchPoolUpdateContent)} does not support writing '{format}' format."); - } - - if (Optional.IsDefined(DisplayName)) - { - writer.WritePropertyName("displayName"u8); - writer.WriteStringValue(DisplayName); - } - if (Optional.IsDefined(VmSize)) - { - writer.WritePropertyName("vmSize"u8); - writer.WriteStringValue(VmSize); - } - if (Optional.IsDefined(EnableInterNodeCommunication)) - { - writer.WritePropertyName("enableInterNodeCommunication"u8); - writer.WriteBooleanValue(EnableInterNodeCommunication.Value); - } - if (Optional.IsDefined(StartTask)) - { - writer.WritePropertyName("startTask"u8); - writer.WriteObjectValue(StartTask, options); - } - if (Optional.IsCollectionDefined(CertificateReferences)) - { - writer.WritePropertyName("certificateReferences"u8); - writer.WriteStartArray(); - foreach (var item in CertificateReferences) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); - } - if (Optional.IsCollectionDefined(ApplicationPackageReferences)) - { - writer.WritePropertyName("applicationPackageReferences"u8); - writer.WriteStartArray(); - foreach (var item in ApplicationPackageReferences) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); - } - if (Optional.IsCollectionDefined(Metadata)) - { - writer.WritePropertyName("metadata"u8); - writer.WriteStartArray(); - foreach (var item in Metadata) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); - } - if (Optional.IsDefined(VirtualMachineConfiguration)) - { - writer.WritePropertyName("virtualMachineConfiguration"u8); - writer.WriteObjectValue(VirtualMachineConfiguration, options); - } - if (Optional.IsDefined(TargetNodeCommunicationMode)) - { - writer.WritePropertyName("targetNodeCommunicationMode"u8); - writer.WriteStringValue(TargetNodeCommunicationMode.Value.ToString()); - } - if (Optional.IsDefined(TaskSlotsPerNode)) - { - writer.WritePropertyName("taskSlotsPerNode"u8); - writer.WriteNumberValue(TaskSlotsPerNode.Value); - } - if (Optional.IsDefined(TaskSchedulingPolicy)) - { - writer.WritePropertyName("taskSchedulingPolicy"u8); - writer.WriteObjectValue(TaskSchedulingPolicy, options); - } - if (Optional.IsDefined(NetworkConfiguration)) - { - writer.WritePropertyName("networkConfiguration"u8); - writer.WriteObjectValue(NetworkConfiguration, options); - } - if (Optional.IsCollectionDefined(ResourceTags)) - { - writer.WritePropertyName("resourceTags"u8); - writer.WriteStartObject(); - foreach (var item in ResourceTags) - { - writer.WritePropertyName(item.Key); - writer.WriteStringValue(item.Value); - } - writer.WriteEndObject(); - } - if (Optional.IsCollectionDefined(UserAccounts)) - { - writer.WritePropertyName("userAccounts"u8); - writer.WriteStartArray(); - foreach (var item in UserAccounts) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); - } - if (Optional.IsCollectionDefined(MountConfiguration)) - { - writer.WritePropertyName("mountConfiguration"u8); - writer.WriteStartArray(); - foreach (var item in MountConfiguration) - { - writer.WriteObjectValue(item, options); - } - writer.WriteEndArray(); - } - if (Optional.IsDefined(UpgradePolicy)) - { - writer.WritePropertyName("upgradePolicy"u8); - writer.WriteObjectValue(UpgradePolicy, options); - } - if (options.Format != "W" && _serializedAdditionalRawData != null) - { - foreach (var item in _serializedAdditionalRawData) - { - writer.WritePropertyName(item.Key); -#if NET6_0_OR_GREATER - writer.WriteRawValue(item.Value); -#else - using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions)) - { - JsonSerializer.Serialize(writer, document.RootElement); - } -#endif - } - } - } - - BatchPoolUpdateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - if (format != "J") - { - throw new FormatException($"The model {nameof(BatchPoolUpdateContent)} does not support reading '{format}' format."); - } - - using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchPoolUpdateContent(document.RootElement, options); - } - - internal static BatchPoolUpdateContent DeserializeBatchPoolUpdateContent(JsonElement element, ModelReaderWriterOptions options = null) - { - options ??= ModelSerializationExtensions.WireOptions; - - if (element.ValueKind == JsonValueKind.Null) - { - return null; - } - string displayName = default; - string vmSize = default; - bool? enableInterNodeCommunication = default; - BatchStartTask startTask = default; - IList certificateReferences = default; - IList applicationPackageReferences = default; - IList metadata = default; - VirtualMachineConfiguration virtualMachineConfiguration = default; - BatchNodeCommunicationMode? targetNodeCommunicationMode = default; - int? taskSlotsPerNode = default; - BatchTaskSchedulingPolicy taskSchedulingPolicy = default; - NetworkConfiguration networkConfiguration = default; - IDictionary resourceTags = default; - IList userAccounts = default; - IList mountConfiguration = default; - UpgradePolicy upgradePolicy = default; - IDictionary serializedAdditionalRawData = default; - Dictionary rawDataDictionary = new Dictionary(); - foreach (var property in element.EnumerateObject()) - { - if (property.NameEquals("displayName"u8)) - { - displayName = property.Value.GetString(); - continue; - } - if (property.NameEquals("vmSize"u8)) - { - vmSize = property.Value.GetString(); - continue; - } - if (property.NameEquals("enableInterNodeCommunication"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - enableInterNodeCommunication = property.Value.GetBoolean(); - continue; - } - if (property.NameEquals("startTask"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - startTask = BatchStartTask.DeserializeBatchStartTask(property.Value, options); - continue; - } - if (property.NameEquals("certificateReferences"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(BatchCertificateReference.DeserializeBatchCertificateReference(item, options)); - } - certificateReferences = array; - continue; - } - if (property.NameEquals("applicationPackageReferences"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(BatchApplicationPackageReference.DeserializeBatchApplicationPackageReference(item, options)); - } - applicationPackageReferences = array; - continue; - } - if (property.NameEquals("metadata"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(MetadataItem.DeserializeMetadataItem(item, options)); - } - metadata = array; - continue; - } - if (property.NameEquals("virtualMachineConfiguration"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - virtualMachineConfiguration = VirtualMachineConfiguration.DeserializeVirtualMachineConfiguration(property.Value, options); - continue; - } - if (property.NameEquals("targetNodeCommunicationMode"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - targetNodeCommunicationMode = new BatchNodeCommunicationMode(property.Value.GetString()); - continue; - } - if (property.NameEquals("taskSlotsPerNode"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - taskSlotsPerNode = property.Value.GetInt32(); - continue; - } - if (property.NameEquals("taskSchedulingPolicy"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - taskSchedulingPolicy = BatchTaskSchedulingPolicy.DeserializeBatchTaskSchedulingPolicy(property.Value, options); - continue; - } - if (property.NameEquals("networkConfiguration"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - networkConfiguration = NetworkConfiguration.DeserializeNetworkConfiguration(property.Value, options); - continue; - } - if (property.NameEquals("resourceTags"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - Dictionary dictionary = new Dictionary(); - foreach (var property0 in property.Value.EnumerateObject()) - { - dictionary.Add(property0.Name, property0.Value.GetString()); - } - resourceTags = dictionary; - continue; - } - if (property.NameEquals("userAccounts"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(UserAccount.DeserializeUserAccount(item, options)); - } - userAccounts = array; - continue; - } - if (property.NameEquals("mountConfiguration"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - List array = new List(); - foreach (var item in property.Value.EnumerateArray()) - { - array.Add(Batch.MountConfiguration.DeserializeMountConfiguration(item, options)); - } - mountConfiguration = array; - continue; - } - if (property.NameEquals("upgradePolicy"u8)) - { - if (property.Value.ValueKind == JsonValueKind.Null) - { - continue; - } - upgradePolicy = UpgradePolicy.DeserializeUpgradePolicy(property.Value, options); - continue; - } - if (options.Format != "W") - { - rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText())); - } - } - serializedAdditionalRawData = rawDataDictionary; - return new BatchPoolUpdateContent( - displayName, - vmSize, - enableInterNodeCommunication, - startTask, - certificateReferences ?? new ChangeTrackingList(), - applicationPackageReferences ?? new ChangeTrackingList(), - metadata ?? new ChangeTrackingList(), - virtualMachineConfiguration, - targetNodeCommunicationMode, - taskSlotsPerNode, - taskSchedulingPolicy, - networkConfiguration, - resourceTags ?? new ChangeTrackingDictionary(), - userAccounts ?? new ChangeTrackingList(), - mountConfiguration ?? new ChangeTrackingList(), - upgradePolicy, - serializedAdditionalRawData); - } - - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); - default: - throw new FormatException($"The model {nameof(BatchPoolUpdateContent)} does not support writing '{options.Format}' format."); - } - } - - BatchPoolUpdateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) - { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; - - switch (format) - { - case "J": - { - using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchPoolUpdateContent(document.RootElement, options); - } - default: - throw new FormatException($"The model {nameof(BatchPoolUpdateContent)} does not support reading '{options.Format}' format."); - } - } - - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; - - /// Deserializes the model from a raw response. - /// The response to deserialize the model from. - internal static BatchPoolUpdateContent FromResponse(Response response) - { - using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchPoolUpdateContent(document.RootElement); - } - - /// Convert into a . - internal virtual RequestContent ToRequestContent() - { - var content = new Utf8JsonRequestContent(); - content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions); - return content; - } - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.cs deleted file mode 100644 index f51418e24d54..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPoolUpdateContent.cs +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.Collections.Generic; - -namespace Azure.Compute.Batch -{ - /// Parameters for updating an Azure Batch Pool. - public partial class BatchPoolUpdateContent - { - /// - /// Keeps track of any properties unknown to the library. - /// - /// To assign an object to the value of this property use . - /// - /// - /// To assign an already formatted json string to this property use . - /// - /// - /// Examples: - /// - /// - /// BinaryData.FromObjectAsJson("foo") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromString("\"foo\"") - /// Creates a payload of "foo". - /// - /// - /// BinaryData.FromObjectAsJson(new { key = "value" }) - /// Creates a payload of { "key": "value" }. - /// - /// - /// BinaryData.FromString("{\"key\": \"value\"}") - /// Creates a payload of { "key": "value" }. - /// - /// - /// - /// - private IDictionary _serializedAdditionalRawData; - - /// Initializes a new instance of . - public BatchPoolUpdateContent() - { - CertificateReferences = new ChangeTrackingList(); - ApplicationPackageReferences = new ChangeTrackingList(); - Metadata = new ChangeTrackingList(); - ResourceTags = new ChangeTrackingDictionary(); - UserAccounts = new ChangeTrackingList(); - MountConfiguration = new ChangeTrackingList(); - } - - /// Initializes a new instance of . - /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. This field can be updated only when the pool is empty. - /// The size of virtual machines in the Pool. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes).<br /><br />This field can be updated only when the pool is empty. - /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false.<br /><br />This field can be updated only when the pool is empty. - /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is left unchanged. - /// - /// If this element is present, it replaces any existing Certificate references configured on the Pool. - /// If omitted, any existing Certificate references are left unchanged. - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// - /// A list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this element is present, it replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged. - /// A list of name-value pairs associated with the Pool as metadata. If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged. - /// The virtual machine configuration for the Pool. This property must be specified.<br /><br />This field can be updated only when the pool is empty. - /// The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. - /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256.<br /><br />This field can be updated only when the pool is empty. - /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread.<br /><br />This field can be updated only when the pool is empty. - /// The network configuration for the Pool. This field can be updated only when the pool is empty. - /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'.<br /><br />This field can be updated only when the pool is empty. - /// The list of user Accounts to be created on each Compute Node in the Pool. This field can be updated only when the pool is empty. - /// Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system.<br /><br />This field can be updated only when the pool is empty. - /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling.<br /><br />This field can be updated only when the pool is empty. - /// Keeps track of any properties unknown to the library. - internal BatchPoolUpdateContent(string displayName, string vmSize, bool? enableInterNodeCommunication, BatchStartTask startTask, IList certificateReferences, IList applicationPackageReferences, IList metadata, VirtualMachineConfiguration virtualMachineConfiguration, BatchNodeCommunicationMode? targetNodeCommunicationMode, int? taskSlotsPerNode, BatchTaskSchedulingPolicy taskSchedulingPolicy, NetworkConfiguration networkConfiguration, IDictionary resourceTags, IList userAccounts, IList mountConfiguration, UpgradePolicy upgradePolicy, IDictionary serializedAdditionalRawData) - { - DisplayName = displayName; - VmSize = vmSize; - EnableInterNodeCommunication = enableInterNodeCommunication; - StartTask = startTask; - CertificateReferences = certificateReferences; - ApplicationPackageReferences = applicationPackageReferences; - Metadata = metadata; - VirtualMachineConfiguration = virtualMachineConfiguration; - TargetNodeCommunicationMode = targetNodeCommunicationMode; - TaskSlotsPerNode = taskSlotsPerNode; - TaskSchedulingPolicy = taskSchedulingPolicy; - NetworkConfiguration = networkConfiguration; - ResourceTags = resourceTags; - UserAccounts = userAccounts; - MountConfiguration = mountConfiguration; - UpgradePolicy = upgradePolicy; - _serializedAdditionalRawData = serializedAdditionalRawData; - } - - /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. This field can be updated only when the pool is empty. - public string DisplayName { get; set; } - /// The size of virtual machines in the Pool. For information about available sizes of virtual machines in Pools, see Choose a VM size for Compute Nodes in an Azure Batch Pool (https://learn.microsoft.com/azure/batch/batch-pool-vm-sizes).<br /><br />This field can be updated only when the pool is empty. - public string VmSize { get; set; } - /// Whether the Pool permits direct communication between Compute Nodes. Enabling inter-node communication limits the maximum size of the Pool due to deployment restrictions on the Compute Nodes of the Pool. This may result in the Pool not reaching its desired size. The default value is false.<br /><br />This field can be updated only when the pool is empty. - public bool? EnableInterNodeCommunication { get; set; } - /// A Task to run on each Compute Node as it joins the Pool. The Task runs when the Compute Node is added to the Pool or when the Compute Node is restarted. If this element is present, it overwrites any existing StartTask. If omitted, any existing StartTask is left unchanged. - public BatchStartTask StartTask { get; set; } - /// - /// If this element is present, it replaces any existing Certificate references configured on the Pool. - /// If omitted, any existing Certificate references are left unchanged. - /// For Windows Nodes, the Batch service installs the Certificates to the specified Certificate store and location. - /// For Linux Compute Nodes, the Certificates are stored in a directory inside the Task working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is supplied to the Task to query for this location. - /// For Certificates with visibility of 'remoteUser', a 'certs' directory is created in the user's home directory (e.g., /home/{user-name}/certs) and Certificates are placed in that directory. - /// Warning: This property is deprecated and will be removed after February, 2024. Please use the [Azure KeyVault Extension](https://learn.microsoft.com/azure/batch/batch-certificate-migration-guide) instead. - /// - public IList CertificateReferences { get; } - /// A list of Packages to be installed on each Compute Node in the Pool. Changes to Package references affect all new Nodes joining the Pool, but do not affect Compute Nodes that are already in the Pool until they are rebooted or reimaged. If this element is present, it replaces any existing Package references. If you specify an empty collection, then all Package references are removed from the Pool. If omitted, any existing Package references are left unchanged. - public IList ApplicationPackageReferences { get; } - /// A list of name-value pairs associated with the Pool as metadata. If this element is present, it replaces any existing metadata configured on the Pool. If you specify an empty collection, any metadata is removed from the Pool. If omitted, any existing metadata is left unchanged. - public IList Metadata { get; } - /// The virtual machine configuration for the Pool. This property must be specified.<br /><br />This field can be updated only when the pool is empty. - public VirtualMachineConfiguration VirtualMachineConfiguration { get; set; } - /// The desired node communication mode for the pool. If this element is present, it replaces the existing targetNodeCommunicationMode configured on the Pool. If omitted, any existing metadata is left unchanged. - public BatchNodeCommunicationMode? TargetNodeCommunicationMode { get; set; } - /// The number of task slots that can be used to run concurrent tasks on a single compute node in the pool. The default value is 1. The maximum value is the smaller of 4 times the number of cores of the vmSize of the pool or 256.<br /><br />This field can be updated only when the pool is empty. - public int? TaskSlotsPerNode { get; set; } - /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread.<br /><br />This field can be updated only when the pool is empty. - public BatchTaskSchedulingPolicy TaskSchedulingPolicy { get; set; } - /// The network configuration for the Pool. This field can be updated only when the pool is empty. - public NetworkConfiguration NetworkConfiguration { get; set; } - /// The user-specified tags associated with the pool. The user-defined tags to be associated with the Azure Batch Pool. When specified, these tags are propagated to the backing Azure resources associated with the pool. This property can only be specified when the Batch account was created with the poolAllocationMode property set to 'UserSubscription'.<br /><br />This field can be updated only when the pool is empty. - public IDictionary ResourceTags { get; } - /// The list of user Accounts to be created on each Compute Node in the Pool. This field can be updated only when the pool is empty. - public IList UserAccounts { get; } - /// Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system.<br /><br />This field can be updated only when the pool is empty. - public IList MountConfiguration { get; } - /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling.<br /><br />This field can be updated only when the pool is empty. - public UpgradePolicy UpgradePolicy { get; set; } - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/PublicIpAddressConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPublicIpAddressConfiguration.Serialization.cs similarity index 62% rename from sdk/batch/Azure.Compute.Batch/src/Generated/PublicIpAddressConfiguration.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchPublicIpAddressConfiguration.Serialization.cs index 75462850b7fa..cd9e285ce66c 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/PublicIpAddressConfiguration.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPublicIpAddressConfiguration.Serialization.cs @@ -8,16 +8,17 @@ using System; using System.ClientModel.Primitives; using System.Collections.Generic; +using System.Net; using System.Text.Json; using Azure.Core; namespace Azure.Compute.Batch { - public partial class PublicIpAddressConfiguration : IUtf8JsonSerializable, IJsonModel + public partial class BatchPublicIpAddressConfiguration : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +29,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, Model /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(PublicIpAddressConfiguration)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchPublicIpAddressConfiguration)} does not support writing '{format}' format."); } if (Optional.IsDefined(IpAddressProvisioningType)) @@ -45,7 +46,12 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WriteStartArray(); foreach (var item in IpAddressIds) { - writer.WriteStringValue(item); + if (item == null) + { + writer.WriteNullValue(); + continue; + } + writer.WriteStringValue(item.ToString()); } writer.WriteEndArray(); } @@ -66,19 +72,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - PublicIpAddressConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchPublicIpAddressConfiguration IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(PublicIpAddressConfiguration)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchPublicIpAddressConfiguration)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializePublicIpAddressConfiguration(document.RootElement, options); + return DeserializeBatchPublicIpAddressConfiguration(document.RootElement, options); } - internal static PublicIpAddressConfiguration DeserializePublicIpAddressConfiguration(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchPublicIpAddressConfiguration DeserializeBatchPublicIpAddressConfiguration(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -87,7 +93,7 @@ internal static PublicIpAddressConfiguration DeserializePublicIpAddressConfigura return null; } IpAddressProvisioningType? provision = default; - IList ipAddressIds = default; + IList ipAddressIds = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -107,10 +113,17 @@ internal static PublicIpAddressConfiguration DeserializePublicIpAddressConfigura { continue; } - List array = new List(); + List array = new List(); foreach (var item in property.Value.EnumerateArray()) { - array.Add(item.GetString()); + if (item.ValueKind == JsonValueKind.Null) + { + array.Add(null); + } + else + { + array.Add(IPAddress.Parse(item.GetString())); + } } ipAddressIds = array; continue; @@ -121,46 +134,46 @@ internal static PublicIpAddressConfiguration DeserializePublicIpAddressConfigura } } serializedAdditionalRawData = rawDataDictionary; - return new PublicIpAddressConfiguration(provision, ipAddressIds ?? new ChangeTrackingList(), serializedAdditionalRawData); + return new BatchPublicIpAddressConfiguration(provision, ipAddressIds ?? new ChangeTrackingList(), serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(PublicIpAddressConfiguration)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchPublicIpAddressConfiguration)} does not support writing '{options.Format}' format."); } } - PublicIpAddressConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchPublicIpAddressConfiguration IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializePublicIpAddressConfiguration(document.RootElement, options); + return DeserializeBatchPublicIpAddressConfiguration(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(PublicIpAddressConfiguration)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchPublicIpAddressConfiguration)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static PublicIpAddressConfiguration FromResponse(Response response) + internal static BatchPublicIpAddressConfiguration FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializePublicIpAddressConfiguration(document.RootElement); + return DeserializeBatchPublicIpAddressConfiguration(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/PublicIpAddressConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPublicIpAddressConfiguration.cs similarity index 83% rename from sdk/batch/Azure.Compute.Batch/src/Generated/PublicIpAddressConfiguration.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchPublicIpAddressConfiguration.cs index 3af8d5206e91..b723b00b9b32 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/PublicIpAddressConfiguration.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchPublicIpAddressConfiguration.cs @@ -7,11 +7,12 @@ using System; using System.Collections.Generic; +using System.Net; namespace Azure.Compute.Batch { /// The public IP Address configuration of the networking configuration of a Pool. - public partial class PublicIpAddressConfiguration + public partial class BatchPublicIpAddressConfiguration { /// /// Keeps track of any properties unknown to the library. @@ -45,17 +46,17 @@ public partial class PublicIpAddressConfiguration /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - public PublicIpAddressConfiguration() + /// Initializes a new instance of . + public BatchPublicIpAddressConfiguration() { - IpAddressIds = new ChangeTrackingList(); + IpAddressIds = new ChangeTrackingList(); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. /// The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. /// Keeps track of any properties unknown to the library. - internal PublicIpAddressConfiguration(IpAddressProvisioningType? ipAddressProvisioningType, IList ipAddressIds, IDictionary serializedAdditionalRawData) + internal BatchPublicIpAddressConfiguration(IpAddressProvisioningType? ipAddressProvisioningType, IList ipAddressIds, IDictionary serializedAdditionalRawData) { IpAddressProvisioningType = ipAddressProvisioningType; IpAddressIds = ipAddressIds; @@ -65,6 +66,6 @@ internal PublicIpAddressConfiguration(IpAddressProvisioningType? ipAddressProvis /// The provisioning type for Public IP Addresses for the Pool. The default value is BatchManaged. public IpAddressProvisioningType? IpAddressProvisioningType { get; set; } /// The list of public IPs which the Batch service will use when provisioning Compute Nodes. The number of IPs specified here limits the maximum size of the Pool - 100 dedicated nodes or 100 Spot/Low-priority nodes can be allocated for each public IP. For example, a pool needing 250 dedicated VMs would need at least 3 public IPs specified. Each element of this collection is of the form: /subscriptions/{subscription}/resourceGroups/{group}/providers/Microsoft.Network/publicIPAddresses/{ip}. - public IList IpAddressIds { get; } + public IList IpAddressIds { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSupportedImage.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSupportedImage.Serialization.cs index 77cf870bd6b7..484b96e9b688 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSupportedImage.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSupportedImage.Serialization.cs @@ -95,7 +95,7 @@ internal static BatchSupportedImage DeserializeBatchSupportedImage(JsonElement e return null; } string nodeAgentSKUId = default; - ImageReference imageReference = default; + BatchVmImageReference imageReference = default; OSType osType = default; IReadOnlyList capabilities = default; DateTimeOffset? batchSupportEndOfLife = default; @@ -111,7 +111,7 @@ internal static BatchSupportedImage DeserializeBatchSupportedImage(JsonElement e } if (property.NameEquals("imageReference"u8)) { - imageReference = ImageReference.DeserializeImageReference(property.Value, options); + imageReference = BatchVmImageReference.DeserializeBatchVmImageReference(property.Value, options); continue; } if (property.NameEquals("osType"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSupportedImage.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSupportedImage.cs index dcc24fa9d8e8..2b0dd2266a4b 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSupportedImage.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchSupportedImage.cs @@ -54,7 +54,7 @@ public partial class BatchSupportedImage /// The type of operating system (e.g. Windows or Linux) of the Image. /// Whether the Azure Batch service actively verifies that the Image is compatible with the associated Compute Node agent SKU. /// or is null. - internal BatchSupportedImage(string nodeAgentSkuId, ImageReference imageReference, OSType osType, ImageVerificationType verificationType) + internal BatchSupportedImage(string nodeAgentSkuId, BatchVmImageReference imageReference, OSType osType, ImageVerificationType verificationType) { Argument.AssertNotNull(nodeAgentSkuId, nameof(nodeAgentSkuId)); Argument.AssertNotNull(imageReference, nameof(imageReference)); @@ -74,7 +74,7 @@ internal BatchSupportedImage(string nodeAgentSkuId, ImageReference imageReferenc /// The time when the Azure Batch service will stop accepting create Pool requests for the Image. /// Whether the Azure Batch service actively verifies that the Image is compatible with the associated Compute Node agent SKU. /// Keeps track of any properties unknown to the library. - internal BatchSupportedImage(string nodeAgentSkuId, ImageReference imageReference, OSType osType, IReadOnlyList capabilities, DateTimeOffset? batchSupportEndOfLife, ImageVerificationType verificationType, IDictionary serializedAdditionalRawData) + internal BatchSupportedImage(string nodeAgentSkuId, BatchVmImageReference imageReference, OSType osType, IReadOnlyList capabilities, DateTimeOffset? batchSupportEndOfLife, ImageVerificationType verificationType, IDictionary serializedAdditionalRawData) { NodeAgentSkuId = nodeAgentSkuId; ImageReference = imageReference; @@ -93,7 +93,7 @@ internal BatchSupportedImage() /// The ID of the Compute Node agent SKU which the Image supports. public string NodeAgentSkuId { get; } /// The reference to the Azure Virtual Machine's Marketplace Image. - public ImageReference ImageReference { get; } + public BatchVmImageReference ImageReference { get; } /// The type of operating system (e.g. Windows or Linux) of the Image. public OSType OsType { get; } /// The capabilities or features which the Image supports. Not every capability of the Image is listed. Capabilities in this list are considered of special interest and are generally related to integration with other features in the Azure Batch service. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.Serialization.cs index f00756c89967..127c33151b4a 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.Serialization.cs @@ -44,15 +44,15 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("displayName"u8); writer.WriteStringValue(DisplayName); } - if (options.Format != "W" && Optional.IsDefined(Url)) + if (options.Format != "W" && Optional.IsDefined(Uri)) { writer.WritePropertyName("url"u8); - writer.WriteStringValue(Url); + writer.WriteStringValue(Uri.AbsoluteUri); } if (options.Format != "W" && Optional.IsDefined(ETag)) { writer.WritePropertyName("eTag"u8); - writer.WriteStringValue(ETag); + writer.WriteStringValue(ETag.Value.ToString()); } if (options.Format != "W" && Optional.IsDefined(LastModified)) { @@ -164,10 +164,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("multiInstanceSettings"u8); writer.WriteObjectValue(MultiInstanceSettings, options); } - if (options.Format != "W" && Optional.IsDefined(Stats)) + if (options.Format != "W" && Optional.IsDefined(TaskStatistics)) { writer.WritePropertyName("stats"u8); - writer.WriteObjectValue(Stats, options); + writer.WriteObjectValue(TaskStatistics, options); } if (options.Format != "W" && Optional.IsDefined(DependsOn)) { @@ -228,8 +228,8 @@ internal static BatchTask DeserializeBatchTask(JsonElement element, ModelReaderW } string id = default; string displayName = default; - string url = default; - string eTag = default; + Uri url = default; + ETag? eTag = default; DateTimeOffset? lastModified = default; DateTimeOffset? creationTime = default; ExitConditions exitConditions = default; @@ -242,7 +242,7 @@ internal static BatchTask DeserializeBatchTask(JsonElement element, ModelReaderW IReadOnlyList resourceFiles = default; IReadOnlyList outputFiles = default; IReadOnlyList environmentSettings = default; - AffinityInfo affinityInfo = default; + BatchAffinityInfo affinityInfo = default; BatchTaskConstraints constraints = default; int? requiredSlots = default; UserIdentity userIdentity = default; @@ -269,12 +269,20 @@ internal static BatchTask DeserializeBatchTask(JsonElement element, ModelReaderW } if (property.NameEquals("url"u8)) { - url = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + url = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("eTag"u8)) { - eTag = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + eTag = new ETag(property.Value.GetString()); continue; } if (property.NameEquals("lastModified"u8)) @@ -402,7 +410,7 @@ internal static BatchTask DeserializeBatchTask(JsonElement element, ModelReaderW { continue; } - affinityInfo = AffinityInfo.DeserializeAffinityInfo(property.Value, options); + affinityInfo = BatchAffinityInfo.DeserializeBatchAffinityInfo(property.Value, options); continue; } if (property.NameEquals("constraints"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.cs index db112fb7f7ae..c392682a2fe2 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTask.cs @@ -67,7 +67,7 @@ public BatchTask() /// Initializes a new instance of . /// A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. /// A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. - /// The URL of the Task. + /// The URL of the Task. /// The ETag of the Task. This is an opaque string. You can use it to detect whether the Task has changed between requests. In particular, you can be pass the ETag when updating a Task to specify that your changes should take effect only if nobody else has modified the Task in the meantime. /// The last modified time of the Task. /// The creation time of the Task. @@ -88,16 +88,16 @@ public BatchTask() /// Information about the execution of the Task. /// Information about the Compute Node on which the Task ran. /// An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. - /// Resource usage statistics for the Task. + /// Resource usage statistics for the Task. /// The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. /// A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. /// The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. /// Keeps track of any properties unknown to the library. - internal BatchTask(string id, string displayName, string url, string eTag, DateTimeOffset? lastModified, DateTimeOffset? creationTime, ExitConditions exitConditions, BatchTaskState? state, DateTimeOffset? stateTransitionTime, BatchTaskState? previousState, DateTimeOffset? previousStateTransitionTime, string commandLine, BatchTaskContainerSettings containerSettings, IReadOnlyList resourceFiles, IReadOnlyList outputFiles, IReadOnlyList environmentSettings, AffinityInfo affinityInfo, BatchTaskConstraints constraints, int? requiredSlots, UserIdentity userIdentity, BatchTaskExecutionInfo executionInfo, BatchNodeInfo nodeInfo, MultiInstanceSettings multiInstanceSettings, BatchTaskStatistics stats, BatchTaskDependencies dependsOn, IReadOnlyList applicationPackageReferences, AuthenticationTokenSettings authenticationTokenSettings, IDictionary serializedAdditionalRawData) + internal BatchTask(string id, string displayName, Uri uri, ETag? eTag, DateTimeOffset? lastModified, DateTimeOffset? creationTime, ExitConditions exitConditions, BatchTaskState? state, DateTimeOffset? stateTransitionTime, BatchTaskState? previousState, DateTimeOffset? previousStateTransitionTime, string commandLine, BatchTaskContainerSettings containerSettings, IReadOnlyList resourceFiles, IReadOnlyList outputFiles, IReadOnlyList environmentSettings, BatchAffinityInfo affinityInfo, BatchTaskConstraints constraints, int? requiredSlots, UserIdentity userIdentity, BatchTaskExecutionInfo executionInfo, BatchNodeInfo nodeInfo, MultiInstanceSettings multiInstanceSettings, BatchTaskStatistics taskStatistics, BatchTaskDependencies dependsOn, IReadOnlyList applicationPackageReferences, AuthenticationTokenSettings authenticationTokenSettings, IDictionary serializedAdditionalRawData) { Id = id; DisplayName = displayName; - Url = url; + Uri = uri; ETag = eTag; LastModified = lastModified; CreationTime = creationTime; @@ -118,7 +118,7 @@ internal BatchTask(string id, string displayName, string url, string eTag, DateT ExecutionInfo = executionInfo; NodeInfo = nodeInfo; MultiInstanceSettings = multiInstanceSettings; - Stats = stats; + TaskStatistics = taskStatistics; DependsOn = dependsOn; ApplicationPackageReferences = applicationPackageReferences; AuthenticationTokenSettings = authenticationTokenSettings; @@ -130,9 +130,9 @@ internal BatchTask(string id, string displayName, string url, string eTag, DateT /// A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. public string DisplayName { get; } /// The URL of the Task. - public string Url { get; } + public Uri Uri { get; } /// The ETag of the Task. This is an opaque string. You can use it to detect whether the Task has changed between requests. In particular, you can be pass the ETag when updating a Task to specify that your changes should take effect only if nobody else has modified the Task in the meantime. - public string ETag { get; } + public ETag? ETag { get; } /// The last modified time of the Task. public DateTimeOffset? LastModified { get; } /// The creation time of the Task. @@ -158,7 +158,7 @@ internal BatchTask(string id, string displayName, string url, string eTag, DateT /// A list of environment variable settings for the Task. public IReadOnlyList EnvironmentSettings { get; } /// A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. - public AffinityInfo AffinityInfo { get; } + public BatchAffinityInfo AffinityInfo { get; } /// The execution constraints that apply to this Task. public BatchTaskConstraints Constraints { get; set; } /// The number of scheduling slots that the Task requires to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. @@ -172,7 +172,7 @@ internal BatchTask(string id, string displayName, string url, string eTag, DateT /// An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. public MultiInstanceSettings MultiInstanceSettings { get; } /// Resource usage statistics for the Task. - public BatchTaskStatistics Stats { get; } + public BatchTaskStatistics TaskStatistics { get; } /// The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. public BatchTaskDependencies DependsOn { get; } /// A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateOptions.Serialization.cs similarity index 90% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateOptions.Serialization.cs index 08ae5c389d46..a1a4d14f6589 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchTaskCreateContent : IUtf8JsonSerializable, IJsonModel + public partial class BatchTaskCreateOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReader /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchTaskCreateContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchTaskCreateOptions)} does not support writing '{format}' format."); } writer.WritePropertyName("id"u8); @@ -145,19 +145,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchTaskCreateContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchTaskCreateOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchTaskCreateContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchTaskCreateOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchTaskCreateContent(document.RootElement, options); + return DeserializeBatchTaskCreateOptions(document.RootElement, options); } - internal static BatchTaskCreateContent DeserializeBatchTaskCreateContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchTaskCreateOptions DeserializeBatchTaskCreateOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -173,7 +173,7 @@ internal static BatchTaskCreateContent DeserializeBatchTaskCreateContent(JsonEle IList resourceFiles = default; IList outputFiles = default; IList environmentSettings = default; - AffinityInfo affinityInfo = default; + BatchAffinityInfo affinityInfo = default; BatchTaskConstraints constraints = default; int? requiredSlots = default; UserIdentity userIdentity = default; @@ -266,7 +266,7 @@ internal static BatchTaskCreateContent DeserializeBatchTaskCreateContent(JsonEle { continue; } - affinityInfo = AffinityInfo.DeserializeAffinityInfo(property.Value, options); + affinityInfo = BatchAffinityInfo.DeserializeBatchAffinityInfo(property.Value, options); continue; } if (property.NameEquals("constraints"u8)) @@ -343,7 +343,7 @@ internal static BatchTaskCreateContent DeserializeBatchTaskCreateContent(JsonEle } } serializedAdditionalRawData = rawDataDictionary; - return new BatchTaskCreateContent( + return new BatchTaskCreateOptions( id, displayName, exitConditions, @@ -363,43 +363,43 @@ internal static BatchTaskCreateContent DeserializeBatchTaskCreateContent(JsonEle serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchTaskCreateContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchTaskCreateOptions)} does not support writing '{options.Format}' format."); } } - BatchTaskCreateContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchTaskCreateOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchTaskCreateContent(document.RootElement, options); + return DeserializeBatchTaskCreateOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchTaskCreateContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchTaskCreateOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchTaskCreateContent FromResponse(Response response) + internal static BatchTaskCreateOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchTaskCreateContent(document.RootElement); + return DeserializeBatchTaskCreateOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateOptions.cs similarity index 95% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateOptions.cs index 2f6b5bda02f0..32d8a513a2de 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateOptions.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Parameters for creating an Azure Batch Task. - public partial class BatchTaskCreateContent + public partial class BatchTaskCreateOptions { /// /// Keeps track of any properties unknown to the library. @@ -45,11 +45,11 @@ public partial class BatchTaskCreateContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case). /// The command line of the Task. For multi-instance Tasks, the command line is executed as the primary Task, after the primary Task and all subtasks have finished executing the coordination command line. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take advantage of such features, you should invoke the shell in the command line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c MyCommand" in Linux. If the command line refers to file paths, it should use a relative path (relative to the Task working directory), or use the Batch provided environment variable (https://learn.microsoft.com/azure/batch/batch-compute-node-environment-variables). /// or is null. - public BatchTaskCreateContent(string id, string commandLine) + public BatchTaskCreateOptions(string id, string commandLine) { Argument.AssertNotNull(id, nameof(id)); Argument.AssertNotNull(commandLine, nameof(commandLine)); @@ -62,7 +62,7 @@ public BatchTaskCreateContent(string id, string commandLine) ApplicationPackageReferences = new ChangeTrackingList(); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case). /// A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. /// How the Batch service should respond when the Task completes. @@ -80,7 +80,7 @@ public BatchTaskCreateContent(string id, string commandLine) /// A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. /// The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. /// Keeps track of any properties unknown to the library. - internal BatchTaskCreateContent(string id, string displayName, ExitConditions exitConditions, string commandLine, BatchTaskContainerSettings containerSettings, IList resourceFiles, IList outputFiles, IList environmentSettings, AffinityInfo affinityInfo, BatchTaskConstraints constraints, int? requiredSlots, UserIdentity userIdentity, MultiInstanceSettings multiInstanceSettings, BatchTaskDependencies dependsOn, IList applicationPackageReferences, AuthenticationTokenSettings authenticationTokenSettings, IDictionary serializedAdditionalRawData) + internal BatchTaskCreateOptions(string id, string displayName, ExitConditions exitConditions, string commandLine, BatchTaskContainerSettings containerSettings, IList resourceFiles, IList outputFiles, IList environmentSettings, BatchAffinityInfo affinityInfo, BatchTaskConstraints constraints, int? requiredSlots, UserIdentity userIdentity, MultiInstanceSettings multiInstanceSettings, BatchTaskDependencies dependsOn, IList applicationPackageReferences, AuthenticationTokenSettings authenticationTokenSettings, IDictionary serializedAdditionalRawData) { Id = id; DisplayName = displayName; @@ -101,8 +101,8 @@ internal BatchTaskCreateContent(string id, string displayName, ExitConditions ex _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal BatchTaskCreateContent() + /// Initializes a new instance of for deserialization. + internal BatchTaskCreateOptions() { } @@ -123,7 +123,7 @@ internal BatchTaskCreateContent() /// A list of environment variable settings for the Task. public IList EnvironmentSettings { get; } /// A locality hint that can be used by the Batch service to select a Compute Node on which to start the new Task. - public AffinityInfo AffinityInfo { get; set; } + public BatchAffinityInfo AffinityInfo { get; set; } /// The execution constraints that apply to this Task. If you do not specify constraints, the maxTaskRetryCount is the maxTaskRetryCount specified for the Job, the maxWallClockTime is infinite, and the retentionTime is 7 days. public BatchTaskConstraints Constraints { get; set; } /// The number of scheduling slots that the Task required to run. The default is 1. A Task can only be scheduled to run on a compute node if the node has enough free scheduling slots available. For multi-instance Tasks, this must be 1. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddResult.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateResult.Serialization.cs similarity index 74% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddResult.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateResult.Serialization.cs index 223d36e03b15..67bb8ad7b54e 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddResult.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateResult.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class BatchTaskAddResult : IUtf8JsonSerializable, IJsonModel + public partial class BatchTaskCreateResult : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWrit /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchTaskAddResult)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchTaskCreateResult)} does not support writing '{format}' format."); } writer.WritePropertyName("status"u8); @@ -41,7 +41,7 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit if (Optional.IsDefined(ETag)) { writer.WritePropertyName("eTag"u8); - writer.WriteStringValue(ETag); + writer.WriteStringValue(ETag.Value.ToString()); } if (Optional.IsDefined(LastModified)) { @@ -75,19 +75,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - BatchTaskAddResult IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchTaskCreateResult IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(BatchTaskAddResult)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchTaskCreateResult)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeBatchTaskAddResult(document.RootElement, options); + return DeserializeBatchTaskCreateResult(document.RootElement, options); } - internal static BatchTaskAddResult DeserializeBatchTaskAddResult(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchTaskCreateResult DeserializeBatchTaskCreateResult(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -97,7 +97,7 @@ internal static BatchTaskAddResult DeserializeBatchTaskAddResult(JsonElement ele } BatchTaskAddStatus status = default; string taskId = default; - string eTag = default; + ETag? eTag = default; DateTimeOffset? lastModified = default; string location = default; BatchError error = default; @@ -117,7 +117,11 @@ internal static BatchTaskAddResult DeserializeBatchTaskAddResult(JsonElement ele } if (property.NameEquals("eTag"u8)) { - eTag = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + eTag = new ETag(property.Value.GetString()); continue; } if (property.NameEquals("lastModified"u8)) @@ -149,7 +153,7 @@ internal static BatchTaskAddResult DeserializeBatchTaskAddResult(JsonElement ele } } serializedAdditionalRawData = rawDataDictionary; - return new BatchTaskAddResult( + return new BatchTaskCreateResult( status, taskId, eTag, @@ -159,43 +163,43 @@ internal static BatchTaskAddResult DeserializeBatchTaskAddResult(JsonElement ele serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(BatchTaskAddResult)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchTaskCreateResult)} does not support writing '{options.Format}' format."); } } - BatchTaskAddResult IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchTaskCreateResult IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchTaskAddResult(document.RootElement, options); + return DeserializeBatchTaskCreateResult(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(BatchTaskAddResult)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchTaskCreateResult)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static BatchTaskAddResult FromResponse(Response response) + internal static BatchTaskCreateResult FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeBatchTaskAddResult(document.RootElement); + return DeserializeBatchTaskCreateResult(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddResult.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateResult.cs similarity index 86% rename from sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddResult.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateResult.cs index 2ac298b53fe2..5489d7870b83 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskAddResult.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskCreateResult.cs @@ -10,8 +10,8 @@ namespace Azure.Compute.Batch { - /// Result for a single Task added as part of an add Task collection operation. - public partial class BatchTaskAddResult + /// Result for a single Task created as part of an add Task collection operation. + public partial class BatchTaskCreateResult { /// /// Keeps track of any properties unknown to the library. @@ -45,11 +45,11 @@ public partial class BatchTaskAddResult /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// The status of the add Task request. /// The ID of the Task for which this is the result. /// is null. - internal BatchTaskAddResult(BatchTaskAddStatus status, string taskId) + internal BatchTaskCreateResult(BatchTaskAddStatus status, string taskId) { Argument.AssertNotNull(taskId, nameof(taskId)); @@ -57,7 +57,7 @@ internal BatchTaskAddResult(BatchTaskAddStatus status, string taskId) TaskId = taskId; } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The status of the add Task request. /// The ID of the Task for which this is the result. /// The ETag of the Task, if the Task was successfully added. You can use this to detect whether the Task has changed between requests. In particular, you can be pass the ETag with an Update Task request to specify that your changes should take effect only if nobody else has modified the Job in the meantime. @@ -65,7 +65,7 @@ internal BatchTaskAddResult(BatchTaskAddStatus status, string taskId) /// The URL of the Task, if the Task was successfully added. /// The error encountered while attempting to add the Task. /// Keeps track of any properties unknown to the library. - internal BatchTaskAddResult(BatchTaskAddStatus status, string taskId, string eTag, DateTimeOffset? lastModified, string location, BatchError error, IDictionary serializedAdditionalRawData) + internal BatchTaskCreateResult(BatchTaskAddStatus status, string taskId, ETag? eTag, DateTimeOffset? lastModified, string location, BatchError error, IDictionary serializedAdditionalRawData) { Status = status; TaskId = taskId; @@ -76,8 +76,8 @@ internal BatchTaskAddResult(BatchTaskAddStatus status, string taskId, string eTa _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal BatchTaskAddResult() + /// Initializes a new instance of for deserialization. + internal BatchTaskCreateResult() { } @@ -86,7 +86,7 @@ internal BatchTaskAddResult() /// The ID of the Task for which this is the result. public string TaskId { get; } /// The ETag of the Task, if the Task was successfully added. You can use this to detect whether the Task has changed between requests. In particular, you can be pass the ETag with an Update Task request to specify that your changes should take effect only if nobody else has modified the Job in the meantime. - public string ETag { get; } + public ETag? ETag { get; } /// The last modified time of the Task. public DateTimeOffset? LastModified { get; } /// The URL of the Task, if the Task was successfully added. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskFailureInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskFailureInfo.Serialization.cs index f042192f8ee9..bf22475c001d 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskFailureInfo.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskFailureInfo.Serialization.cs @@ -93,7 +93,7 @@ internal static BatchTaskFailureInfo DeserializeBatchTaskFailureInfo(JsonElement { return null; } - ErrorCategory category = default; + BatchErrorSourceCategory category = default; string code = default; string message = default; IReadOnlyList details = default; @@ -103,7 +103,7 @@ internal static BatchTaskFailureInfo DeserializeBatchTaskFailureInfo(JsonElement { if (property.NameEquals("category"u8)) { - category = new ErrorCategory(property.Value.GetString()); + category = new BatchErrorSourceCategory(property.Value.GetString()); continue; } if (property.NameEquals("code"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskFailureInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskFailureInfo.cs index 870ca54a89eb..ed4fb0d66093 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskFailureInfo.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskFailureInfo.cs @@ -47,7 +47,7 @@ public partial class BatchTaskFailureInfo /// Initializes a new instance of . /// The category of the Task error. - internal BatchTaskFailureInfo(ErrorCategory category) + internal BatchTaskFailureInfo(BatchErrorSourceCategory category) { Category = category; Details = new ChangeTrackingList(); @@ -59,7 +59,7 @@ internal BatchTaskFailureInfo(ErrorCategory category) /// A message describing the Task error, intended to be suitable for display in a user interface. /// A list of additional details related to the error. /// Keeps track of any properties unknown to the library. - internal BatchTaskFailureInfo(ErrorCategory category, string code, string message, IReadOnlyList details, IDictionary serializedAdditionalRawData) + internal BatchTaskFailureInfo(BatchErrorSourceCategory category, string code, string message, IReadOnlyList details, IDictionary serializedAdditionalRawData) { Category = category; Code = code; @@ -74,7 +74,7 @@ internal BatchTaskFailureInfo() } /// The category of the Task error. - public ErrorCategory Category { get; } + public BatchErrorSourceCategory Category { get; } /// An identifier for the Task error. Codes are invariant and are intended to be consumed programmatically. public string Code { get; } /// A message describing the Task error, intended to be suitable for display in a user interface. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskFailureMode.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskFailureMode.cs new file mode 100644 index 000000000000..f2a41c1f8a33 --- /dev/null +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskFailureMode.cs @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// + +#nullable disable + +using System; +using System.ComponentModel; + +namespace Azure.Compute.Batch +{ + /// TaskFailure enums. + public readonly partial struct BatchTaskFailureMode : IEquatable + { + private readonly string _value; + + /// Initializes a new instance of . + /// is null. + public BatchTaskFailureMode(string value) + { + _value = value ?? throw new ArgumentNullException(nameof(value)); + } + + private const string NoActionValue = "noaction"; + private const string PerformExitOptionsJobActionValue = "performexitoptionsjobaction"; + + /// Do nothing. The Job remains active unless terminated or disabled by some other means. + public static BatchTaskFailureMode NoAction { get; } = new BatchTaskFailureMode(NoActionValue); + /// Terminate the Job. The Job's terminationReason is set to 'AllTasksComplete'. + public static BatchTaskFailureMode PerformExitOptionsJobAction { get; } = new BatchTaskFailureMode(PerformExitOptionsJobActionValue); + /// Determines if two values are the same. + public static bool operator ==(BatchTaskFailureMode left, BatchTaskFailureMode right) => left.Equals(right); + /// Determines if two values are not the same. + public static bool operator !=(BatchTaskFailureMode left, BatchTaskFailureMode right) => !left.Equals(right); + /// Converts a to a . + public static implicit operator BatchTaskFailureMode(string value) => new BatchTaskFailureMode(value); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override bool Equals(object obj) => obj is BatchTaskFailureMode other && Equals(other); + /// + public bool Equals(BatchTaskFailureMode other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); + + /// + [EditorBrowsable(EditorBrowsableState.Never)] + public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; + /// + public override string ToString() => _value; + } +} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskGroup.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskGroup.Serialization.cs index 0e0366ab6f5c..7b7ced312f7f 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskGroup.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskGroup.Serialization.cs @@ -36,7 +36,7 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("value"u8); writer.WriteStartArray(); - foreach (var item in Value) + foreach (var item in Values) { writer.WriteObjectValue(item, options); } @@ -78,17 +78,17 @@ internal static BatchTaskGroup DeserializeBatchTaskGroup(JsonElement element, Mo { return null; } - IList value = default; + IList value = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) { if (property.NameEquals("value"u8)) { - List array = new List(); + List array = new List(); foreach (var item in property.Value.EnumerateArray()) { - array.Add(BatchTaskCreateContent.DeserializeBatchTaskCreateContent(item, options)); + array.Add(BatchTaskCreateOptions.DeserializeBatchTaskCreateOptions(item, options)); } value = array; continue; diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskGroup.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskGroup.cs index 952152f88850..8288b612dd46 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskGroup.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskGroup.cs @@ -47,21 +47,21 @@ public partial class BatchTaskGroup private IDictionary _serializedAdditionalRawData; /// Initializes a new instance of . - /// The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks. - /// is null. - public BatchTaskGroup(IEnumerable value) + /// The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks. + /// is null. + public BatchTaskGroup(IEnumerable values) { - Argument.AssertNotNull(value, nameof(value)); + Argument.AssertNotNull(values, nameof(values)); - Value = value.ToList(); + Values = values.ToList(); } /// Initializes a new instance of . - /// The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks. + /// The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks. /// Keeps track of any properties unknown to the library. - internal BatchTaskGroup(IList value, IDictionary serializedAdditionalRawData) + internal BatchTaskGroup(IList values, IDictionary serializedAdditionalRawData) { - Value = value; + Values = values; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -71,6 +71,6 @@ internal BatchTaskGroup() } /// The collection of Tasks to add. The maximum count of Tasks is 100. The total serialized size of this collection must be less than 1MB. If it is greater than 1MB (for example if each Task has 100's of resource files or environment variables), the request will fail with code 'RequestBodyTooLarge' and should be retried again with fewer Tasks. - public IList Value { get; } + public IList Values { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskInfo.Serialization.cs index b273a4445c98..3d197a38eef2 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskInfo.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskInfo.Serialization.cs @@ -34,10 +34,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit throw new FormatException($"The model {nameof(BatchTaskInfo)} does not support writing '{format}' format."); } - if (Optional.IsDefined(TaskUrl)) + if (Optional.IsDefined(TaskUri)) { writer.WritePropertyName("taskUrl"u8); - writer.WriteStringValue(TaskUrl); + writer.WriteStringValue(TaskUri.AbsoluteUri); } if (Optional.IsDefined(JobId)) { @@ -98,7 +98,7 @@ internal static BatchTaskInfo DeserializeBatchTaskInfo(JsonElement element, Mode { return null; } - string taskUrl = default; + Uri taskUrl = default; string jobId = default; string taskId = default; int? subtaskId = default; @@ -110,7 +110,11 @@ internal static BatchTaskInfo DeserializeBatchTaskInfo(JsonElement element, Mode { if (property.NameEquals("taskUrl"u8)) { - taskUrl = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + taskUrl = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("jobId"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskInfo.cs index 1302e69a62a5..c7b04244cba3 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskInfo.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskInfo.cs @@ -53,16 +53,16 @@ internal BatchTaskInfo(BatchTaskState taskState) } /// Initializes a new instance of . - /// The URL of the Task. + /// The URL of the Task. /// The ID of the Job to which the Task belongs. /// The ID of the Task. /// The ID of the subtask if the Task is a multi-instance Task. /// The current state of the Task. /// Information about the execution of the Task. /// Keeps track of any properties unknown to the library. - internal BatchTaskInfo(string taskUrl, string jobId, string taskId, int? subtaskId, BatchTaskState taskState, BatchTaskExecutionInfo executionInfo, IDictionary serializedAdditionalRawData) + internal BatchTaskInfo(Uri taskUri, string jobId, string taskId, int? subtaskId, BatchTaskState taskState, BatchTaskExecutionInfo executionInfo, IDictionary serializedAdditionalRawData) { - TaskUrl = taskUrl; + TaskUri = taskUri; JobId = jobId; TaskId = taskId; SubtaskId = subtaskId; @@ -77,7 +77,7 @@ internal BatchTaskInfo() } /// The URL of the Task. - public string TaskUrl { get; } + public Uri TaskUri { get; } /// The ID of the Job to which the Task belongs. public string JobId { get; } /// The ID of the Task. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.Serialization.cs index 17e9ea906968..491623fd1a1c 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.Serialization.cs @@ -35,7 +35,7 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } writer.WritePropertyName("url"u8); - writer.WriteStringValue(Url); + writer.WriteStringValue(Uri.AbsoluteUri); writer.WritePropertyName("startTime"u8); writer.WriteStringValue(StartTime, "O"); writer.WritePropertyName("lastUpdateTime"u8); @@ -47,13 +47,13 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("wallClockTime"u8); writer.WriteStringValue(WallClockTime, "P"); writer.WritePropertyName("readIOps"u8); - writer.WriteStringValue(ReadIOps.ToString()); + writer.WriteStringValue(ReadIops.ToString()); writer.WritePropertyName("writeIOps"u8); - writer.WriteStringValue(WriteIOps.ToString()); + writer.WriteStringValue(WriteIops.ToString()); writer.WritePropertyName("readIOGiB"u8); - writer.WriteNumberValue(ReadIOGiB); + writer.WriteNumberValue(ReadIoGiB); writer.WritePropertyName("writeIOGiB"u8); - writer.WriteNumberValue(WriteIOGiB); + writer.WriteNumberValue(WriteIoGiB); writer.WritePropertyName("waitTime"u8); writer.WriteStringValue(WaitTime, "P"); if (options.Format != "W" && _serializedAdditionalRawData != null) @@ -93,7 +93,7 @@ internal static BatchTaskStatistics DeserializeBatchTaskStatistics(JsonElement e { return null; } - string url = default; + Uri url = default; DateTimeOffset startTime = default; DateTimeOffset lastUpdateTime = default; TimeSpan userCPUTime = default; @@ -110,7 +110,7 @@ internal static BatchTaskStatistics DeserializeBatchTaskStatistics(JsonElement e { if (property.NameEquals("url"u8)) { - url = property.Value.GetString(); + url = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("startTime"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.cs index 59520d11e905..47e2dc1cb4eb 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchTaskStatistics.cs @@ -46,60 +46,60 @@ public partial class BatchTaskStatistics private IDictionary _serializedAdditionalRawData; /// Initializes a new instance of . - /// The URL of the statistics. + /// The URL of the statistics. /// The start time of the time range covered by the statistics. /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. /// The total wall clock time of the Task. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries. - /// The total number of disk read operations made by the Task. - /// The total number of disk write operations made by the Task. - /// The total gibibytes read from disk by the Task. - /// The total gibibytes written to disk by the Task. + /// The total number of disk read operations made by the Task. + /// The total number of disk write operations made by the Task. + /// The total gibibytes read from disk by the Task. + /// The total gibibytes written to disk by the Task. /// The total wait time of the Task. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). - /// is null. - internal BatchTaskStatistics(string url, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIOps, long writeIOps, float readIOGiB, float writeIOGiB, TimeSpan waitTime) + /// is null. + internal BatchTaskStatistics(Uri uri, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIops, long writeIops, float readIoGiB, float writeIoGiB, TimeSpan waitTime) { - Argument.AssertNotNull(url, nameof(url)); + Argument.AssertNotNull(uri, nameof(uri)); - Url = url; + Uri = uri; StartTime = startTime; LastUpdateTime = lastUpdateTime; UserCpuTime = userCpuTime; KernelCpuTime = kernelCpuTime; WallClockTime = wallClockTime; - ReadIOps = readIOps; - WriteIOps = writeIOps; - ReadIOGiB = readIOGiB; - WriteIOGiB = writeIOGiB; + ReadIops = readIops; + WriteIops = writeIops; + ReadIoGiB = readIoGiB; + WriteIoGiB = writeIoGiB; WaitTime = waitTime; } /// Initializes a new instance of . - /// The URL of the statistics. + /// The URL of the statistics. /// The start time of the time range covered by the statistics. /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. /// The total wall clock time of the Task. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries. - /// The total number of disk read operations made by the Task. - /// The total number of disk write operations made by the Task. - /// The total gibibytes read from disk by the Task. - /// The total gibibytes written to disk by the Task. + /// The total number of disk read operations made by the Task. + /// The total number of disk write operations made by the Task. + /// The total gibibytes read from disk by the Task. + /// The total gibibytes written to disk by the Task. /// The total wait time of the Task. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). /// Keeps track of any properties unknown to the library. - internal BatchTaskStatistics(string url, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIOps, long writeIOps, float readIOGiB, float writeIOGiB, TimeSpan waitTime, IDictionary serializedAdditionalRawData) + internal BatchTaskStatistics(Uri uri, DateTimeOffset startTime, DateTimeOffset lastUpdateTime, TimeSpan userCpuTime, TimeSpan kernelCpuTime, TimeSpan wallClockTime, long readIops, long writeIops, float readIoGiB, float writeIoGiB, TimeSpan waitTime, IDictionary serializedAdditionalRawData) { - Url = url; + Uri = uri; StartTime = startTime; LastUpdateTime = lastUpdateTime; UserCpuTime = userCpuTime; KernelCpuTime = kernelCpuTime; WallClockTime = wallClockTime; - ReadIOps = readIOps; - WriteIOps = writeIOps; - ReadIOGiB = readIOGiB; - WriteIOGiB = writeIOGiB; + ReadIops = readIops; + WriteIops = writeIops; + ReadIoGiB = readIoGiB; + WriteIoGiB = writeIoGiB; WaitTime = waitTime; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -110,7 +110,7 @@ internal BatchTaskStatistics() } /// The URL of the statistics. - public string Url { get; } + public Uri Uri { get; } /// The start time of the time range covered by the statistics. public DateTimeOffset StartTime { get; } /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. @@ -122,13 +122,13 @@ internal BatchTaskStatistics() /// The total wall clock time of the Task. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries. public TimeSpan WallClockTime { get; } /// The total number of disk read operations made by the Task. - public long ReadIOps { get; } + public long ReadIops { get; } /// The total number of disk write operations made by the Task. - public long WriteIOps { get; } + public long WriteIops { get; } /// The total gibibytes read from disk by the Task. - public float ReadIOGiB { get; } + public float ReadIoGiB { get; } /// The total gibibytes written to disk by the Task. - public float WriteIOGiB { get; } + public float WriteIoGiB { get; } /// The total wait time of the Task. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). public TimeSpan WaitTime { get; } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UefiSettings.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchUefiSettings.Serialization.cs similarity index 71% rename from sdk/batch/Azure.Compute.Batch/src/Generated/UefiSettings.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchUefiSettings.Serialization.cs index 72c57da5fd86..e6591b3de2c4 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/UefiSettings.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchUefiSettings.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class UefiSettings : IUtf8JsonSerializable, IJsonModel + public partial class BatchUefiSettings : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOpti /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(UefiSettings)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchUefiSettings)} does not support writing '{format}' format."); } if (Optional.IsDefined(SecureBootEnabled)) @@ -61,19 +61,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - UefiSettings IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchUefiSettings IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(UefiSettings)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchUefiSettings)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeUefiSettings(document.RootElement, options); + return DeserializeBatchUefiSettings(document.RootElement, options); } - internal static UefiSettings DeserializeUefiSettings(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchUefiSettings DeserializeBatchUefiSettings(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -111,46 +111,46 @@ internal static UefiSettings DeserializeUefiSettings(JsonElement element, ModelR } } serializedAdditionalRawData = rawDataDictionary; - return new UefiSettings(secureBootEnabled, vTpmEnabled, serializedAdditionalRawData); + return new BatchUefiSettings(secureBootEnabled, vTpmEnabled, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(UefiSettings)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchUefiSettings)} does not support writing '{options.Format}' format."); } } - UefiSettings IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchUefiSettings IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeUefiSettings(document.RootElement, options); + return DeserializeBatchUefiSettings(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(UefiSettings)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchUefiSettings)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static UefiSettings FromResponse(Response response) + internal static BatchUefiSettings FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeUefiSettings(document.RootElement); + return DeserializeBatchUefiSettings(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UefiSettings.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchUefiSettings.cs similarity index 86% rename from sdk/batch/Azure.Compute.Batch/src/Generated/UefiSettings.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchUefiSettings.cs index bc8c7c1a7f49..fae15ebc0359 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/UefiSettings.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchUefiSettings.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Specifies the security settings like secure boot and vTPM used while creating the virtual machine. - public partial class UefiSettings + public partial class BatchUefiSettings { /// /// Keeps track of any properties unknown to the library. @@ -45,16 +45,16 @@ public partial class UefiSettings /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - public UefiSettings() + /// Initializes a new instance of . + public BatchUefiSettings() { } - /// Initializes a new instance of . + /// Initializes a new instance of . /// Specifies whether secure boot should be enabled on the virtual machine. /// Specifies whether vTPM should be enabled on the virtual machine. /// Keeps track of any properties unknown to the library. - internal UefiSettings(bool? secureBootEnabled, bool? vTpmEnabled, IDictionary serializedAdditionalRawData) + internal BatchUefiSettings(bool? secureBootEnabled, bool? vTpmEnabled, IDictionary serializedAdditionalRawData) { SecureBootEnabled = secureBootEnabled; VTpmEnabled = vTpmEnabled; diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UserAssignedIdentity.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchUserAssignedIdentity.Serialization.cs similarity index 67% rename from sdk/batch/Azure.Compute.Batch/src/Generated/UserAssignedIdentity.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchUserAssignedIdentity.Serialization.cs index 0edf504e9b09..d20d8dc65bc9 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/UserAssignedIdentity.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchUserAssignedIdentity.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class UserAssignedIdentity : IUtf8JsonSerializable, IJsonModel + public partial class BatchUserAssignedIdentity : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWr /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(UserAssignedIdentity)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchUserAssignedIdentity)} does not support writing '{format}' format."); } writer.WritePropertyName("resourceId"u8); @@ -63,19 +63,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - UserAssignedIdentity IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchUserAssignedIdentity IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(UserAssignedIdentity)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchUserAssignedIdentity)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeUserAssignedIdentity(document.RootElement, options); + return DeserializeBatchUserAssignedIdentity(document.RootElement, options); } - internal static UserAssignedIdentity DeserializeUserAssignedIdentity(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchUserAssignedIdentity DeserializeBatchUserAssignedIdentity(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -83,7 +83,7 @@ internal static UserAssignedIdentity DeserializeUserAssignedIdentity(JsonElement { return null; } - string resourceId = default; + ResourceIdentifier resourceId = default; string clientId = default; string principalId = default; IDictionary serializedAdditionalRawData = default; @@ -92,7 +92,7 @@ internal static UserAssignedIdentity DeserializeUserAssignedIdentity(JsonElement { if (property.NameEquals("resourceId"u8)) { - resourceId = property.Value.GetString(); + resourceId = new ResourceIdentifier(property.Value.GetString()); continue; } if (property.NameEquals("clientId"u8)) @@ -111,46 +111,46 @@ internal static UserAssignedIdentity DeserializeUserAssignedIdentity(JsonElement } } serializedAdditionalRawData = rawDataDictionary; - return new UserAssignedIdentity(resourceId, clientId, principalId, serializedAdditionalRawData); + return new BatchUserAssignedIdentity(resourceId, clientId, principalId, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(UserAssignedIdentity)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchUserAssignedIdentity)} does not support writing '{options.Format}' format."); } } - UserAssignedIdentity IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchUserAssignedIdentity IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeUserAssignedIdentity(document.RootElement, options); + return DeserializeBatchUserAssignedIdentity(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(UserAssignedIdentity)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchUserAssignedIdentity)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static UserAssignedIdentity FromResponse(Response response) + internal static BatchUserAssignedIdentity FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeUserAssignedIdentity(document.RootElement); + return DeserializeBatchUserAssignedIdentity(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UserAssignedIdentity.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchUserAssignedIdentity.cs similarity index 79% rename from sdk/batch/Azure.Compute.Batch/src/Generated/UserAssignedIdentity.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchUserAssignedIdentity.cs index efdf060fdc91..91f17861422d 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/UserAssignedIdentity.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchUserAssignedIdentity.cs @@ -7,11 +7,12 @@ using System; using System.Collections.Generic; +using Azure.Core; namespace Azure.Compute.Batch { /// The user assigned Identity. - public partial class UserAssignedIdentity + public partial class BatchUserAssignedIdentity { /// /// Keeps track of any properties unknown to the library. @@ -45,22 +46,22 @@ public partial class UserAssignedIdentity /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// The ARM resource id of the user assigned identity. /// is null. - internal UserAssignedIdentity(string resourceId) + internal BatchUserAssignedIdentity(ResourceIdentifier resourceId) { Argument.AssertNotNull(resourceId, nameof(resourceId)); ResourceId = resourceId; } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The ARM resource id of the user assigned identity. /// The client id of the user assigned identity. /// The principal id of the user assigned identity. /// Keeps track of any properties unknown to the library. - internal UserAssignedIdentity(string resourceId, string clientId, string principalId, IDictionary serializedAdditionalRawData) + internal BatchUserAssignedIdentity(ResourceIdentifier resourceId, string clientId, string principalId, IDictionary serializedAdditionalRawData) { ResourceId = resourceId; ClientId = clientId; @@ -68,13 +69,13 @@ internal UserAssignedIdentity(string resourceId, string clientId, string princip _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal UserAssignedIdentity() + /// Initializes a new instance of for deserialization. + internal BatchUserAssignedIdentity() { } /// The ARM resource id of the user assigned identity. - public string ResourceId { get; } + public ResourceIdentifier ResourceId { get; } /// The client id of the user assigned identity. public string ClientId { get; } /// The principal id of the user assigned identity. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchVmImageReference.Serialization.cs similarity index 75% rename from sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchVmImageReference.Serialization.cs index 2090e0b9e51d..62738a2251e0 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchVmImageReference.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class ImageReference : IUtf8JsonSerializable, IJsonModel + public partial class BatchVmImageReference : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOp /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(ImageReference)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(BatchVmImageReference)} does not support writing '{format}' format."); } if (Optional.IsDefined(Publisher)) @@ -91,19 +91,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - ImageReference IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + BatchVmImageReference IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(ImageReference)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(BatchVmImageReference)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeImageReference(document.RootElement, options); + return DeserializeBatchVmImageReference(document.RootElement, options); } - internal static ImageReference DeserializeImageReference(JsonElement element, ModelReaderWriterOptions options = null) + internal static BatchVmImageReference DeserializeBatchVmImageReference(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -115,7 +115,7 @@ internal static ImageReference DeserializeImageReference(JsonElement element, Mo string offer = default; string sku = default; string version = default; - string virtualMachineImageId = default; + ResourceIdentifier virtualMachineImageId = default; string exactVersion = default; string sharedGalleryImageId = default; string communityGalleryImageId = default; @@ -145,7 +145,11 @@ internal static ImageReference DeserializeImageReference(JsonElement element, Mo } if (property.NameEquals("virtualMachineImageId"u8)) { - virtualMachineImageId = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + virtualMachineImageId = new ResourceIdentifier(property.Value.GetString()); continue; } if (property.NameEquals("exactVersion"u8)) @@ -169,7 +173,7 @@ internal static ImageReference DeserializeImageReference(JsonElement element, Mo } } serializedAdditionalRawData = rawDataDictionary; - return new ImageReference( + return new BatchVmImageReference( publisher, offer, sku, @@ -181,43 +185,43 @@ internal static ImageReference DeserializeImageReference(JsonElement element, Mo serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(ImageReference)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchVmImageReference)} does not support writing '{options.Format}' format."); } } - ImageReference IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + BatchVmImageReference IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeImageReference(document.RootElement, options); + return DeserializeBatchVmImageReference(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(ImageReference)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(BatchVmImageReference)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static ImageReference FromResponse(Response response) + internal static BatchVmImageReference FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeImageReference(document.RootElement); + return DeserializeBatchVmImageReference(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchVmImageReference.cs similarity index 92% rename from sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/BatchVmImageReference.cs index 67fa3216c0de..31e59b8d39fb 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ImageReference.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/BatchVmImageReference.cs @@ -7,6 +7,7 @@ using System; using System.Collections.Generic; +using Azure.Core; namespace Azure.Compute.Batch { @@ -15,7 +16,7 @@ namespace Azure.Compute.Batch /// To get the list of all Azure Marketplace Image references verified by Azure Batch, see the /// ' List Supported Images ' operation. /// - public partial class ImageReference + public partial class BatchVmImageReference { /// /// Keeps track of any properties unknown to the library. @@ -49,12 +50,12 @@ public partial class ImageReference /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - public ImageReference() + /// Initializes a new instance of . + public BatchVmImageReference() { } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or MicrosoftWindowsServer. /// The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or WindowsServer. /// The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. @@ -64,7 +65,7 @@ public ImageReference() /// The shared gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from shared gallery image GET call. /// The community gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from community gallery image GET call. /// Keeps track of any properties unknown to the library. - internal ImageReference(string publisher, string offer, string sku, string version, string virtualMachineImageId, string exactVersion, string sharedGalleryImageId, string communityGalleryImageId, IDictionary serializedAdditionalRawData) + internal BatchVmImageReference(string publisher, string offer, string sku, string version, ResourceIdentifier virtualMachineImageId, string exactVersion, string sharedGalleryImageId, string communityGalleryImageId, IDictionary serializedAdditionalRawData) { Publisher = publisher; Offer = offer; @@ -86,7 +87,7 @@ internal ImageReference(string publisher, string offer, string sku, string versi /// The version of the Azure Virtual Machines Marketplace Image. A value of 'latest' can be specified to select the latest version of an Image. If omitted, the default is 'latest'. public string Version { get; set; } /// The ARM resource identifier of the Azure Compute Gallery Image. Compute Nodes in the Pool will be created using this Image Id. This is of the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName}/versions/{VersionId} or /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageDefinitionName} for always defaulting to the latest image version. This property is mutually exclusive with other ImageReference properties. The Azure Compute Gallery Image must have replicas in the same region and must be in the same subscription as the Azure Batch account. If the image version is not specified in the imageId, the latest version will be used. For information about the firewall settings for the Batch Compute Node agent to communicate with the Batch service see https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration. - public string VirtualMachineImageId { get; set; } + public ResourceIdentifier VirtualMachineImageId { get; set; } /// The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'. public string ExactVersion { get; } /// The shared gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from shared gallery image GET call. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchModelFactory.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchModelFactory.cs index 8e8562bc669e..624bc8ad9d99 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchModelFactory.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ComputeBatchModelFactory.cs @@ -8,6 +8,8 @@ using System; using System.Collections.Generic; using System.Linq; +using System.Net; +using Azure.Core; namespace Azure.Compute.Batch { @@ -74,7 +76,7 @@ public static BatchPoolUsageMetrics BatchPoolUsageMetrics(string poolId = null, serializedAdditionalRawData: null); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two Pool IDs within an Account that differ only by case). /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. /// The size of virtual machines in the Pool. All virtual machines in a Pool are the same size. For information about available VM sizes for Pools using Images from the Virtual Machines Marketplace (pools created with virtualMachineConfiguration), see Sizes for Virtual Machines in Azure (https://learn.microsoft.com/azure/virtual-machines/sizes/overview). Batch supports all Azure VM sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). @@ -103,17 +105,17 @@ public static BatchPoolUsageMetrics BatchPoolUsageMetrics(string poolId = null, /// Mount storage using specified file system for the entire lifetime of the pool. Mount the storage using Azure fileshare, NFS, CIFS or Blobfuse based file system. /// The desired node communication mode for the pool. If omitted, the default value is Default. /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. - /// A new instance for mocking. - public static BatchPoolCreateContent BatchPoolCreateContent(string id = null, string displayName = null, string vmSize = null, VirtualMachineConfiguration virtualMachineConfiguration = null, TimeSpan? resizeTimeout = null, IDictionary resourceTags = null, int? targetDedicatedNodes = null, int? targetLowPriorityNodes = null, bool? enableAutoScale = null, string autoScaleFormula = null, TimeSpan? autoScaleEvaluationInterval = null, bool? enableInterNodeCommunication = null, NetworkConfiguration networkConfiguration = null, BatchStartTask startTask = null, IEnumerable certificateReferences = null, IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = null, BatchTaskSchedulingPolicy taskSchedulingPolicy = null, IEnumerable userAccounts = null, IEnumerable metadata = null, IEnumerable mountConfiguration = null, BatchNodeCommunicationMode? targetNodeCommunicationMode = null, UpgradePolicy upgradePolicy = null) + /// A new instance for mocking. + public static BatchPoolCreateOptions BatchPoolCreateOptions(string id = null, string displayName = null, string vmSize = null, VirtualMachineConfiguration virtualMachineConfiguration = null, TimeSpan? resizeTimeout = null, IDictionary resourceTags = null, int? targetDedicatedNodes = null, int? targetLowPriorityNodes = null, bool? enableAutoScale = null, string autoScaleFormula = null, TimeSpan? autoScaleEvaluationInterval = null, bool? enableInterNodeCommunication = null, NetworkConfiguration networkConfiguration = null, BatchStartTask startTask = null, IEnumerable certificateReferences = null, IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = null, BatchTaskSchedulingPolicy taskSchedulingPolicy = null, IEnumerable userAccounts = null, IEnumerable metadata = null, IEnumerable mountConfiguration = null, BatchNodeCommunicationMode? targetNodeCommunicationMode = null, UpgradePolicy upgradePolicy = null) { resourceTags ??= new Dictionary(); certificateReferences ??= new List(); applicationPackageReferences ??= new List(); userAccounts ??= new List(); - metadata ??= new List(); + metadata ??= new List(); mountConfiguration ??= new List(); - return new BatchPoolCreateContent( + return new BatchPoolCreateOptions( id, displayName, vmSize, @@ -140,7 +142,7 @@ public static BatchPoolCreateContent BatchPoolCreateContent(string id = null, st serializedAdditionalRawData: null); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or MicrosoftWindowsServer. /// The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or WindowsServer. /// The SKU of the Azure Virtual Machines Marketplace Image. For example, 18.04-LTS or 2019-Datacenter. @@ -149,10 +151,10 @@ public static BatchPoolCreateContent BatchPoolCreateContent(string id = null, st /// The specific version of the platform image or marketplace image used to create the node. This read-only field differs from 'version' only if the value specified for 'version' when the pool was created was 'latest'. /// The shared gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from shared gallery image GET call. /// The community gallery image unique identifier. This property is mutually exclusive with other properties and can be fetched from community gallery image GET call. - /// A new instance for mocking. - public static ImageReference ImageReference(string publisher = null, string offer = null, string sku = null, string version = null, string virtualMachineImageId = null, string exactVersion = null, string sharedGalleryImageId = null, string communityGalleryImageId = null) + /// A new instance for mocking. + public static BatchVmImageReference BatchVmImageReference(string publisher = null, string offer = null, string sku = null, string version = null, ResourceIdentifier virtualMachineImageId = null, string exactVersion = null, string sharedGalleryImageId = null, string communityGalleryImageId = null) { - return new ImageReference( + return new BatchVmImageReference( publisher, offer, sku, @@ -167,7 +169,7 @@ public static ImageReference ImageReference(string publisher = null, string offe /// Initializes a new instance of . /// A string that uniquely identifies the Pool within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). /// The display name for the Pool. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. - /// The URL of the Pool. + /// The URL of the Pool. /// The ETag of the Pool. This is an opaque string. You can use it to detect whether the Pool has changed between requests. In particular, you can be pass the ETag when updating a Pool to specify that your changes should take effect only if nobody else has modified the Pool in the meantime. /// The last modified time of the Pool. This is the last time at which the Pool level data, such as the targetDedicatedNodes or enableAutoscale settings, changed. It does not factor in node-level changes such as a Compute Node changing state. /// The creation time of the Pool. @@ -202,27 +204,27 @@ public static ImageReference ImageReference(string publisher = null, string offe /// How Tasks are distributed across Compute Nodes in a Pool. If not specified, the default is spread. /// The list of user Accounts to be created on each Compute Node in the Pool. /// A list of name-value pairs associated with the Pool as metadata. - /// Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the BatchPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// Utilization and resource usage statistics for the entire lifetime of the Pool. This property is populated only if the BatchPool was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. /// A list of file systems to mount on each node in the pool. This supports Azure Files, NFS, CIFS/SMB, and Blobfuse. /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. /// The desired node communication mode for the pool. If omitted, the default value is Default. /// The current state of the pool communication mode. /// The upgrade policy for the Pool. Describes an upgrade policy - automatic, manual, or rolling. /// A new instance for mocking. - public static BatchPool BatchPool(string id = null, string displayName = null, string url = null, string eTag = null, DateTimeOffset? lastModified = null, DateTimeOffset? creationTime = null, BatchPoolState? state = null, DateTimeOffset? stateTransitionTime = null, AllocationState? allocationState = null, DateTimeOffset? allocationStateTransitionTime = null, string vmSize = null, VirtualMachineConfiguration virtualMachineConfiguration = null, TimeSpan? resizeTimeout = null, IEnumerable resizeErrors = null, IReadOnlyDictionary resourceTags = null, int? currentDedicatedNodes = null, int? currentLowPriorityNodes = null, int? targetDedicatedNodes = null, int? targetLowPriorityNodes = null, bool? enableAutoScale = null, string autoScaleFormula = null, TimeSpan? autoScaleEvaluationInterval = null, AutoScaleRun autoScaleRun = null, bool? enableInterNodeCommunication = null, NetworkConfiguration networkConfiguration = null, BatchStartTask startTask = null, IEnumerable certificateReferences = null, IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = null, BatchTaskSchedulingPolicy taskSchedulingPolicy = null, IEnumerable userAccounts = null, IEnumerable metadata = null, BatchPoolStatistics stats = null, IEnumerable mountConfiguration = null, BatchPoolIdentity identity = null, BatchNodeCommunicationMode? targetNodeCommunicationMode = null, BatchNodeCommunicationMode? currentNodeCommunicationMode = null, UpgradePolicy upgradePolicy = null) + public static BatchPool BatchPool(string id = null, string displayName = null, Uri uri = null, ETag? eTag = null, DateTimeOffset? lastModified = null, DateTimeOffset? creationTime = null, BatchPoolState? state = null, DateTimeOffset? stateTransitionTime = null, AllocationState? allocationState = null, DateTimeOffset? allocationStateTransitionTime = null, string vmSize = null, VirtualMachineConfiguration virtualMachineConfiguration = null, TimeSpan? resizeTimeout = null, IEnumerable resizeErrors = null, IReadOnlyDictionary resourceTags = null, int? currentDedicatedNodes = null, int? currentLowPriorityNodes = null, int? targetDedicatedNodes = null, int? targetLowPriorityNodes = null, bool? enableAutoScale = null, string autoScaleFormula = null, TimeSpan? autoScaleEvaluationInterval = null, AutoScaleRun autoScaleRun = null, bool? enableInterNodeCommunication = null, NetworkConfiguration networkConfiguration = null, BatchStartTask startTask = null, IEnumerable certificateReferences = null, IEnumerable applicationPackageReferences = null, int? taskSlotsPerNode = null, BatchTaskSchedulingPolicy taskSchedulingPolicy = null, IEnumerable userAccounts = null, IEnumerable metadata = null, BatchPoolStatistics poolStatistics = null, IEnumerable mountConfiguration = null, BatchPoolIdentity identity = null, BatchNodeCommunicationMode? targetNodeCommunicationMode = null, BatchNodeCommunicationMode? currentNodeCommunicationMode = null, UpgradePolicy upgradePolicy = null) { resizeErrors ??= new List(); resourceTags ??= new Dictionary(); certificateReferences ??= new List(); applicationPackageReferences ??= new List(); userAccounts ??= new List(); - metadata ??= new List(); + metadata ??= new List(); mountConfiguration ??= new List(); return new BatchPool( id, displayName, - url, + uri, eTag, lastModified, creationTime, @@ -252,7 +254,7 @@ public static BatchPool BatchPool(string id = null, string displayName = null, s taskSchedulingPolicy, userAccounts?.ToList(), metadata?.ToList(), - stats, + poolStatistics, mountConfiguration?.ToList(), identity, targetNodeCommunicationMode, @@ -305,20 +307,20 @@ public static AutoScaleRunError AutoScaleRunError(string code = null, string mes } /// Initializes a new instance of . - /// The URL for the statistics. + /// The URL for the statistics. /// The start time of the time range covered by the statistics. /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. - /// Statistics related to Pool usage, such as the amount of core-time used. - /// Statistics related to resource consumption by Compute Nodes in the Pool. + /// Statistics related to Pool usage, such as the amount of core-time used. + /// Statistics related to resource consumption by Compute Nodes in the Pool. /// A new instance for mocking. - public static BatchPoolStatistics BatchPoolStatistics(string url = null, DateTimeOffset startTime = default, DateTimeOffset lastUpdateTime = default, BatchPoolUsageStatistics usageStats = null, BatchPoolResourceStatistics resourceStats = null) + public static BatchPoolStatistics BatchPoolStatistics(Uri uri = null, DateTimeOffset startTime = default, DateTimeOffset lastUpdateTime = default, BatchPoolUsageStatistics usageStatistics = null, BatchPoolResourceStatistics resourceStatistics = null) { return new BatchPoolStatistics( - url, + uri, startTime, lastUpdateTime, - usageStats, - resourceStats, + usageStatistics, + resourceStatistics, serializedAdditionalRawData: null); } @@ -340,14 +342,14 @@ public static BatchPoolUsageStatistics BatchPoolUsageStatistics(DateTimeOffset s /// The peak memory usage in GiB across all Compute Nodes in the Pool. /// The average used disk space in GiB across all Compute Nodes in the Pool. /// The peak used disk space in GiB across all Compute Nodes in the Pool. - /// The total number of disk read operations across all Compute Nodes in the Pool. - /// The total number of disk write operations across all Compute Nodes in the Pool. + /// The total number of disk read operations across all Compute Nodes in the Pool. + /// The total number of disk write operations across all Compute Nodes in the Pool. /// The total amount of data in GiB of disk reads across all Compute Nodes in the Pool. /// The total amount of data in GiB of disk writes across all Compute Nodes in the Pool. /// The total amount of data in GiB of network reads across all Compute Nodes in the Pool. /// The total amount of data in GiB of network writes across all Compute Nodes in the Pool. /// A new instance for mocking. - public static BatchPoolResourceStatistics BatchPoolResourceStatistics(DateTimeOffset startTime = default, DateTimeOffset lastUpdateTime = default, float avgCpuPercentage = default, float avgMemoryGiB = default, float peakMemoryGiB = default, float avgDiskGiB = default, float peakDiskGiB = default, long diskReadIOps = default, long diskWriteIOps = default, float diskReadGiB = default, float diskWriteGiB = default, float networkReadGiB = default, float networkWriteGiB = default) + public static BatchPoolResourceStatistics BatchPoolResourceStatistics(DateTimeOffset startTime = default, DateTimeOffset lastUpdateTime = default, float avgCpuPercentage = default, float avgMemoryGiB = default, float peakMemoryGiB = default, float avgDiskGiB = default, float peakDiskGiB = default, long diskReadIops = default, long diskWriteIops = default, float diskReadGiB = default, float diskWriteGiB = default, float networkReadGiB = default, float networkWriteGiB = default) { return new BatchPoolResourceStatistics( startTime, @@ -357,8 +359,8 @@ public static BatchPoolResourceStatistics BatchPoolResourceStatistics(DateTimeOf peakMemoryGiB, avgDiskGiB, peakDiskGiB, - diskReadIOps, - diskWriteIOps, + diskReadIops, + diskWriteIops, diskReadGiB, diskWriteGiB, networkReadGiB, @@ -370,21 +372,21 @@ public static BatchPoolResourceStatistics BatchPoolResourceStatistics(DateTimeOf /// The identity of the Batch pool, if configured. The list of user identities associated with the Batch pool. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. /// The list of user identities associated with the Batch account. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. /// A new instance for mocking. - public static BatchPoolIdentity BatchPoolIdentity(BatchPoolIdentityType type = default, IEnumerable userAssignedIdentities = null) + public static BatchPoolIdentity BatchPoolIdentity(BatchPoolIdentityType type = default, IEnumerable userAssignedIdentities = null) { - userAssignedIdentities ??= new List(); + userAssignedIdentities ??= new List(); return new BatchPoolIdentity(type, userAssignedIdentities?.ToList(), serializedAdditionalRawData: null); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The ARM resource id of the user assigned identity. /// The client id of the user assigned identity. /// The principal id of the user assigned identity. - /// A new instance for mocking. - public static UserAssignedIdentity UserAssignedIdentity(string resourceId = null, string clientId = null, string principalId = null) + /// A new instance for mocking. + public static BatchUserAssignedIdentity BatchUserAssignedIdentity(ResourceIdentifier resourceId = null, string clientId = null, string principalId = null) { - return new UserAssignedIdentity(resourceId, clientId, principalId, serializedAdditionalRawData: null); + return new BatchUserAssignedIdentity(resourceId, clientId, principalId, serializedAdditionalRawData: null); } /// Initializes a new instance of . @@ -395,7 +397,7 @@ public static UserAssignedIdentity UserAssignedIdentity(string resourceId = null /// The time when the Azure Batch service will stop accepting create Pool requests for the Image. /// Whether the Azure Batch service actively verifies that the Image is compatible with the associated Compute Node agent SKU. /// A new instance for mocking. - public static BatchSupportedImage BatchSupportedImage(string nodeAgentSkuId = null, ImageReference imageReference = null, OSType osType = default, IEnumerable capabilities = null, DateTimeOffset? batchSupportEndOfLife = null, ImageVerificationType verificationType = default) + public static BatchSupportedImage BatchSupportedImage(string nodeAgentSkuId = null, BatchVmImageReference imageReference = null, OSType osType = default, IEnumerable capabilities = null, DateTimeOffset? batchSupportEndOfLife = null, ImageVerificationType verificationType = default) { capabilities ??= new List(); @@ -465,7 +467,7 @@ public static BatchNodeCounts BatchNodeCounts(int creating = default, int idle = /// A string that uniquely identifies the Job within the Account. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). /// The display name for the Job. /// Whether Tasks in the Job can define dependencies on each other. The default is false. - /// The URL of the Job. + /// The URL of the Job. /// The ETag of the Job. This is an opaque string. You can use it to detect whether the Job has changed between requests. In particular, you can be pass the ETag when updating a Job to specify that your changes should take effect only if nobody else has modified the Job in the meantime. /// The last modified time of the Job. This is the last time at which the Job level data, such as the Job state or priority, changed. It does not factor in task-level changes such as adding new Tasks or Tasks changing state. /// The creation time of the Job. @@ -482,23 +484,23 @@ public static BatchNodeCounts BatchNodeCounts(int creating = default, int idle = /// The Job Release Task. The Job Release Task is a special Task run at the end of the Job on each Compute Node that has run any other Task of the Job. /// The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. /// The Pool settings associated with the Job. - /// The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. - /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + /// The action the Batch service should take when all Tasks in the Job are in the completed state. The default is noaction. + /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. /// The network configuration for the Job. /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. /// The execution information for the Job. - /// Resource usage statistics for the entire lifetime of the Job. This property is populated only if the BatchJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// Resource usage statistics for the entire lifetime of the Job. This property is populated only if the BatchJob was retrieved with an expand clause including the 'stats' attribute; otherwise it is null. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. /// A new instance for mocking. - public static BatchJob BatchJob(string id = null, string displayName = null, bool? usesTaskDependencies = null, string url = null, string eTag = null, DateTimeOffset? lastModified = null, DateTimeOffset? creationTime = null, BatchJobState? state = null, DateTimeOffset? stateTransitionTime = null, BatchJobState? previousState = null, DateTimeOffset? previousStateTransitionTime = null, int? priority = null, bool? allowTaskPreemption = null, int? maxParallelTasks = null, BatchJobConstraints constraints = null, BatchJobManagerTask jobManagerTask = null, BatchJobPreparationTask jobPreparationTask = null, BatchJobReleaseTask jobReleaseTask = null, IEnumerable commonEnvironmentSettings = null, BatchPoolInfo poolInfo = null, OnAllBatchTasksComplete? onAllTasksComplete = null, OnBatchTaskFailure? onTaskFailure = null, BatchJobNetworkConfiguration networkConfiguration = null, IEnumerable metadata = null, BatchJobExecutionInfo executionInfo = null, BatchJobStatistics stats = null) + public static BatchJob BatchJob(string id = null, string displayName = null, bool? usesTaskDependencies = null, Uri uri = null, ETag? eTag = null, DateTimeOffset? lastModified = null, DateTimeOffset? creationTime = null, BatchJobState? state = null, DateTimeOffset? stateTransitionTime = null, BatchJobState? previousState = null, DateTimeOffset? previousStateTransitionTime = null, int? priority = null, bool? allowTaskPreemption = null, int? maxParallelTasks = null, BatchJobConstraints constraints = null, BatchJobManagerTask jobManagerTask = null, BatchJobPreparationTask jobPreparationTask = null, BatchJobReleaseTask jobReleaseTask = null, IEnumerable commonEnvironmentSettings = null, BatchPoolInfo poolInfo = null, BatchAllTasksCompleteMode? allTasksCompleteMode = null, BatchTaskFailureMode? taskFailureMode = null, BatchJobNetworkConfiguration networkConfiguration = null, IEnumerable metadata = null, BatchJobExecutionInfo executionInfo = null, BatchJobStatistics jobStatistics = null) { commonEnvironmentSettings ??= new List(); - metadata ??= new List(); + metadata ??= new List(); return new BatchJob( id, displayName, usesTaskDependencies, - url, + uri, eTag, lastModified, creationTime, @@ -515,12 +517,12 @@ public static BatchJob BatchJob(string id = null, string displayName = null, boo jobReleaseTask, commonEnvironmentSettings?.ToList(), poolInfo, - onAllTasksComplete, - onTaskFailure, + allTasksCompleteMode, + taskFailureMode, networkConfiguration, metadata?.ToList(), executionInfo, - stats, + jobStatistics, serializedAdditionalRawData: null); } @@ -548,7 +550,7 @@ public static BatchJobExecutionInfo BatchJobExecutionInfo(DateTimeOffset startTi /// A message describing the Job scheduling error, intended to be suitable for display in a user interface. /// A list of additional error details related to the scheduling error. /// A new instance for mocking. - public static BatchJobSchedulingError BatchJobSchedulingError(ErrorCategory category = default, string code = null, string message = null, IEnumerable details = null) + public static BatchJobSchedulingError BatchJobSchedulingError(BatchErrorSourceCategory category = default, string code = null, string message = null, IEnumerable details = null) { details ??= new List(); @@ -556,42 +558,42 @@ public static BatchJobSchedulingError BatchJobSchedulingError(ErrorCategory cate } /// Initializes a new instance of . - /// The URL of the statistics. + /// The URL of the statistics. /// The start time of the time range covered by the statistics. /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in the Job. /// The total wall clock time of all Tasks in the Job. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. - /// The total number of disk read operations made by all Tasks in the Job. - /// The total number of disk write operations made by all Tasks in the Job. - /// The total amount of data in GiB read from disk by all Tasks in the Job. - /// The total amount of data in GiB written to disk by all Tasks in the Job. - /// The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. - /// The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. - /// The total number of retries on all the Tasks in the Job during the given time range. + /// The total number of disk read operations made by all Tasks in the Job. + /// The total number of disk write operations made by all Tasks in the Job. + /// The total amount of data in GiB read from disk by all Tasks in the Job. + /// The total amount of data in GiB written to disk by all Tasks in the Job. + /// The total number of Tasks successfully completed in the Job during the given time range. A Task completes successfully if it returns exit code 0. + /// The total number of Tasks in the Job that failed during the given time range. A Task fails if it exhausts its maximum retry count without returning exit code 0. + /// The total number of retries on all the Tasks in the Job during the given time range. /// The total wait time of all Tasks in the Job. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.) This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. /// A new instance for mocking. - public static BatchJobStatistics BatchJobStatistics(string url = null, DateTimeOffset startTime = default, DateTimeOffset lastUpdateTime = default, TimeSpan userCpuTime = default, TimeSpan kernelCpuTime = default, TimeSpan wallClockTime = default, long readIOps = default, long writeIOps = default, float readIOGiB = default, float writeIOGiB = default, long numSucceededTasks = default, long numFailedTasks = default, long numTaskRetries = default, TimeSpan waitTime = default) + public static BatchJobStatistics BatchJobStatistics(Uri uri = null, DateTimeOffset startTime = default, DateTimeOffset lastUpdateTime = default, TimeSpan userCpuTime = default, TimeSpan kernelCpuTime = default, TimeSpan wallClockTime = default, long readIops = default, long writeIops = default, float readIoGiB = default, float writeIoGiB = default, long succeededTasksCount = default, long failedTasksCount = default, long taskRetriesCount = default, TimeSpan waitTime = default) { return new BatchJobStatistics( - url, + uri, startTime, lastUpdateTime, userCpuTime, kernelCpuTime, wallClockTime, - readIOps, - writeIOps, - readIOGiB, - writeIOGiB, - numSucceededTasks, - numFailedTasks, - numTaskRetries, + readIops, + writeIops, + readIoGiB, + writeIoGiB, + succeededTasksCount, + failedTasksCount, + taskRetriesCount, waitTime, serializedAdditionalRawData: null); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// A string that uniquely identifies the Job within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). /// The display name for the Job. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. /// Whether Tasks in the Job can define dependencies on each other. The default is false. @@ -604,17 +606,17 @@ public static BatchJobStatistics BatchJobStatistics(string url = null, DateTimeO /// The Job Release Task. A Job Release Task cannot be specified without also specifying a Job Preparation Task for the Job. The Batch service runs the Job Release Task on the Nodes that have run the Job Preparation Task. The primary purpose of the Job Release Task is to undo changes to Compute Nodes made by the Job Preparation Task. Example activities include deleting local files, or shutting down services that were started as part of Job preparation. /// The list of common environment variable settings. These environment variables are set for all Tasks in the Job (including the Job Manager, Job Preparation and Job Release Tasks). Individual Tasks can override an environment setting specified here by specifying the same setting name with a different value. /// The Pool on which the Batch service runs the Job's Tasks. - /// The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. - /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. + /// The action the Batch service should take when all Tasks in the Job are in the completed state. Note that if a Job contains no Tasks, then all Tasks are considered complete. This option is therefore most commonly used with a Job Manager task; if you want to use automatic Job termination without a Job Manager, you should initially set onAllTasksComplete to noaction and update the Job properties to set onAllTasksComplete to terminatejob once you have finished adding Tasks. The default is noaction. + /// The action the Batch service should take when any Task in the Job fails. A Task is considered to have failed if has a failureInfo. A failureInfo is set if the Task completes with a non-zero exit code after exhausting its retry count, or if there was an error starting the Task, for example due to a resource file download error. The default is noaction. /// The network configuration for the Job. /// A list of name-value pairs associated with the Job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - /// A new instance for mocking. - public static BatchJobCreateContent BatchJobCreateContent(string id = null, string displayName = null, bool? usesTaskDependencies = null, int? priority = null, bool? allowTaskPreemption = null, int? maxParallelTasks = null, BatchJobConstraints constraints = null, BatchJobManagerTask jobManagerTask = null, BatchJobPreparationTask jobPreparationTask = null, BatchJobReleaseTask jobReleaseTask = null, IEnumerable commonEnvironmentSettings = null, BatchPoolInfo poolInfo = null, OnAllBatchTasksComplete? onAllTasksComplete = null, OnBatchTaskFailure? onTaskFailure = null, BatchJobNetworkConfiguration networkConfiguration = null, IEnumerable metadata = null) + /// A new instance for mocking. + public static BatchJobCreateOptions BatchJobCreateOptions(string id = null, string displayName = null, bool? usesTaskDependencies = null, int? priority = null, bool? allowTaskPreemption = null, int? maxParallelTasks = null, BatchJobConstraints constraints = null, BatchJobManagerTask jobManagerTask = null, BatchJobPreparationTask jobPreparationTask = null, BatchJobReleaseTask jobReleaseTask = null, IEnumerable commonEnvironmentSettings = null, BatchPoolInfo poolInfo = null, BatchAllTasksCompleteMode? allTasksCompleteMode = null, BatchTaskFailureMode? taskFailureMode = null, BatchJobNetworkConfiguration networkConfiguration = null, IEnumerable metadata = null) { commonEnvironmentSettings ??= new List(); - metadata ??= new List(); + metadata ??= new List(); - return new BatchJobCreateContent( + return new BatchJobCreateOptions( id, displayName, usesTaskDependencies, @@ -627,8 +629,8 @@ public static BatchJobCreateContent BatchJobCreateContent(string id = null, stri jobReleaseTask, commonEnvironmentSettings?.ToList(), poolInfo, - onAllTasksComplete, - onTaskFailure, + allTasksCompleteMode, + taskFailureMode, networkConfiguration, metadata?.ToList(), serializedAdditionalRawData: null); @@ -637,16 +639,16 @@ public static BatchJobCreateContent BatchJobCreateContent(string id = null, stri /// Initializes a new instance of . /// The ID of the Pool containing the Compute Node to which this entry refers. /// The ID of the Compute Node to which this entry refers. - /// The URL of the Compute Node to which this entry refers. + /// The URL of the Compute Node to which this entry refers. /// Information about the execution status of the Job Preparation Task on this Compute Node. /// Information about the execution status of the Job Release Task on this Compute Node. This property is set only if the Job Release Task has run on the Compute Node. /// A new instance for mocking. - public static BatchJobPreparationAndReleaseTaskStatus BatchJobPreparationAndReleaseTaskStatus(string poolId = null, string nodeId = null, string nodeUrl = null, BatchJobPreparationTaskExecutionInfo jobPreparationTaskExecutionInfo = null, BatchJobReleaseTaskExecutionInfo jobReleaseTaskExecutionInfo = null) + public static BatchJobPreparationAndReleaseTaskStatus BatchJobPreparationAndReleaseTaskStatus(string poolId = null, string nodeId = null, Uri nodeUri = null, BatchJobPreparationTaskExecutionInfo jobPreparationTaskExecutionInfo = null, BatchJobReleaseTaskExecutionInfo jobReleaseTaskExecutionInfo = null) { return new BatchJobPreparationAndReleaseTaskStatus( poolId, nodeId, - nodeUrl, + nodeUri, jobPreparationTaskExecutionInfo, jobReleaseTaskExecutionInfo, serializedAdditionalRawData: null); @@ -657,7 +659,7 @@ public static BatchJobPreparationAndReleaseTaskStatus BatchJobPreparationAndRele /// The time at which the Job Preparation Task completed. This property is set only if the Task is in the Completed state. /// The current state of the Job Preparation Task on the Compute Node. /// The root directory of the Job Preparation Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. - /// The URL to the root directory of the Job Preparation Task on the Compute Node. + /// The URL to the root directory of the Job Preparation Task on the Compute Node. /// The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. @@ -665,14 +667,14 @@ public static BatchJobPreparationAndReleaseTaskStatus BatchJobPreparationAndRele /// The most recent time at which a retry of the Job Preparation Task started running. This property is set only if the Task was retried (i.e. retryCount is nonzero). If present, this is typically the same as startTime, but may be different if the Task has been restarted for reasons other than retry; for example, if the Compute Node was rebooted during a retry, then the startTime is updated but the lastRetryTime is not. /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. /// A new instance for mocking. - public static BatchJobPreparationTaskExecutionInfo BatchJobPreparationTaskExecutionInfo(DateTimeOffset startTime = default, DateTimeOffset? endTime = null, BatchJobPreparationTaskState state = default, string taskRootDirectory = null, string taskRootDirectoryUrl = null, int? exitCode = null, BatchTaskContainerExecutionInfo containerInfo = null, BatchTaskFailureInfo failureInfo = null, int retryCount = default, DateTimeOffset? lastRetryTime = null, BatchTaskExecutionResult? result = null) + public static BatchJobPreparationTaskExecutionInfo BatchJobPreparationTaskExecutionInfo(DateTimeOffset startTime = default, DateTimeOffset? endTime = null, BatchJobPreparationTaskState state = default, string taskRootDirectory = null, Uri taskRootDirectoryUri = null, int? exitCode = null, BatchTaskContainerExecutionInfo containerInfo = null, BatchTaskFailureInfo failureInfo = null, int retryCount = default, DateTimeOffset? lastRetryTime = null, BatchTaskExecutionResult? result = null) { return new BatchJobPreparationTaskExecutionInfo( startTime, endTime, state, taskRootDirectory, - taskRootDirectoryUrl, + taskRootDirectoryUri, exitCode, containerInfo, failureInfo, @@ -698,7 +700,7 @@ public static BatchTaskContainerExecutionInfo BatchTaskContainerExecutionInfo(st /// A message describing the Task error, intended to be suitable for display in a user interface. /// A list of additional details related to the error. /// A new instance for mocking. - public static BatchTaskFailureInfo BatchTaskFailureInfo(ErrorCategory category = default, string code = null, string message = null, IEnumerable details = null) + public static BatchTaskFailureInfo BatchTaskFailureInfo(BatchErrorSourceCategory category = default, string code = null, string message = null, IEnumerable details = null) { details ??= new List(); @@ -710,20 +712,20 @@ public static BatchTaskFailureInfo BatchTaskFailureInfo(ErrorCategory category = /// The time at which the Job Release Task completed. This property is set only if the Task is in the Completed state. /// The current state of the Job Release Task on the Compute Node. /// The root directory of the Job Release Task on the Compute Node. You can use this path to retrieve files created by the Task, such as log files. - /// The URL to the root directory of the Job Release Task on the Compute Node. + /// The URL to the root directory of the Job Release Task on the Compute Node. /// The exit code of the program specified on the Task command line. This parameter is returned only if the Task is in the completed state. The exit code for a process reflects the specific convention implemented by the application developer for that process. If you use the exit code value to make decisions in your code, be sure that you know the exit code convention used by the application process. Note that the exit code may also be generated by the Compute Node operating system, such as when a process is forcibly terminated. /// Information about the container under which the Task is executing. This property is set only if the Task runs in a container context. /// Information describing the Task failure, if any. This property is set only if the Task is in the completed state and encountered a failure. /// The result of the Task execution. If the value is 'failed', then the details of the failure can be found in the failureInfo property. /// A new instance for mocking. - public static BatchJobReleaseTaskExecutionInfo BatchJobReleaseTaskExecutionInfo(DateTimeOffset startTime = default, DateTimeOffset? endTime = null, BatchJobReleaseTaskState state = default, string taskRootDirectory = null, string taskRootDirectoryUrl = null, int? exitCode = null, BatchTaskContainerExecutionInfo containerInfo = null, BatchTaskFailureInfo failureInfo = null, BatchTaskExecutionResult? result = null) + public static BatchJobReleaseTaskExecutionInfo BatchJobReleaseTaskExecutionInfo(DateTimeOffset startTime = default, DateTimeOffset? endTime = null, BatchJobReleaseTaskState state = default, string taskRootDirectory = null, Uri taskRootDirectoryUri = null, int? exitCode = null, BatchTaskContainerExecutionInfo containerInfo = null, BatchTaskFailureInfo failureInfo = null, BatchTaskExecutionResult? result = null) { return new BatchJobReleaseTaskExecutionInfo( startTime, endTime, state, taskRootDirectory, - taskRootDirectoryUrl, + taskRootDirectoryUri, exitCode, containerInfo, failureInfo, @@ -779,7 +781,7 @@ public static BatchTaskSlotCounts BatchTaskSlotCounts(int active = default, int /// Initializes a new instance of . /// The X.509 thumbprint of the Certificate. This is a sequence of up to 40 hex digits (it may include spaces but these are removed). /// The algorithm used to derive the thumbprint. This must be sha1. - /// The URL of the Certificate. + /// The URL of the Certificate. /// The state of the Certificate. /// The time at which the Certificate entered its current state. /// The previous state of the Certificate. This property is not set if the Certificate is in its initial active state. @@ -790,12 +792,12 @@ public static BatchTaskSlotCounts BatchTaskSlotCounts(int active = default, int /// The format of the Certificate data. /// The password to access the Certificate's private key. This must be omitted if the Certificate format is cer. /// A new instance for mocking. - public static BatchCertificate BatchCertificate(string thumbprint = null, string thumbprintAlgorithm = null, string url = null, BatchCertificateState? state = null, DateTimeOffset? stateTransitionTime = null, BatchCertificateState? previousState = null, DateTimeOffset? previousStateTransitionTime = null, string publicData = null, DeleteBatchCertificateError deleteCertificateError = null, string data = null, BatchCertificateFormat? certificateFormat = null, string password = null) + public static BatchCertificate BatchCertificate(string thumbprint = null, string thumbprintAlgorithm = null, Uri uri = null, BatchCertificateState? state = null, DateTimeOffset? stateTransitionTime = null, BatchCertificateState? previousState = null, DateTimeOffset? previousStateTransitionTime = null, string publicData = null, BatchCertificateDeleteError deleteCertificateError = null, BinaryData data = null, BatchCertificateFormat? certificateFormat = null, string password = null) { return new BatchCertificate( thumbprint, thumbprintAlgorithm, - url, + uri, state, stateTransitionTime, previousState, @@ -808,22 +810,22 @@ public static BatchCertificate BatchCertificate(string thumbprint = null, string serializedAdditionalRawData: null); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// An identifier for the Certificate deletion error. Codes are invariant and are intended to be consumed programmatically. /// A message describing the Certificate deletion error, intended to be suitable for display in a user interface. /// A list of additional error details related to the Certificate deletion error. This list includes details such as the active Pools and Compute Nodes referencing this Certificate. However, if a large number of resources reference the Certificate, the list contains only about the first hundred. - /// A new instance for mocking. - public static DeleteBatchCertificateError DeleteBatchCertificateError(string code = null, string message = null, IEnumerable values = null) + /// A new instance for mocking. + public static BatchCertificateDeleteError BatchCertificateDeleteError(string code = null, string message = null, IEnumerable values = null) { values ??= new List(); - return new DeleteBatchCertificateError(code, message, values?.ToList(), serializedAdditionalRawData: null); + return new BatchCertificateDeleteError(code, message, values?.ToList(), serializedAdditionalRawData: null); } /// Initializes a new instance of . /// A string that uniquely identifies the schedule within the Account. /// The display name for the schedule. - /// The URL of the Job Schedule. + /// The URL of the Job Schedule. /// The ETag of the Job Schedule. This is an opaque string. You can use it to detect whether the Job Schedule has changed between requests. In particular, you can be pass the ETag with an Update Job Schedule request to specify that your changes should take effect only if nobody else has modified the schedule in the meantime. /// The last modified time of the Job Schedule. This is the last time at which the schedule level data, such as the Job specification or recurrence information, changed. It does not factor in job-level changes such as new Jobs being created or Jobs changing state. /// The creation time of the Job Schedule. @@ -835,16 +837,16 @@ public static DeleteBatchCertificateError DeleteBatchCertificateError(string cod /// The details of the Jobs to be created on this schedule. /// Information about Jobs that have been and will be run under this schedule. /// A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - /// The lifetime resource usage statistics for the Job Schedule. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. + /// The lifetime resource usage statistics for the Job Schedule. The statistics may not be immediately available. The Batch service performs periodic roll-up of statistics. The typical delay is about 30 minutes. /// A new instance for mocking. - public static BatchJobSchedule BatchJobSchedule(string id = null, string displayName = null, string url = null, string eTag = null, DateTimeOffset? lastModified = null, DateTimeOffset? creationTime = null, BatchJobScheduleState? state = null, DateTimeOffset? stateTransitionTime = null, BatchJobScheduleState? previousState = null, DateTimeOffset? previousStateTransitionTime = null, BatchJobScheduleConfiguration schedule = null, BatchJobSpecification jobSpecification = null, BatchJobScheduleExecutionInfo executionInfo = null, IEnumerable metadata = null, BatchJobScheduleStatistics stats = null) + public static BatchJobSchedule BatchJobSchedule(string id = null, string displayName = null, Uri uri = null, ETag? eTag = null, DateTimeOffset? lastModified = null, DateTimeOffset? creationTime = null, BatchJobScheduleState? state = null, DateTimeOffset? stateTransitionTime = null, BatchJobScheduleState? previousState = null, DateTimeOffset? previousStateTransitionTime = null, BatchJobScheduleConfiguration schedule = null, BatchJobSpecification jobSpecification = null, BatchJobScheduleExecutionInfo executionInfo = null, IEnumerable metadata = null, BatchJobScheduleStatistics jobScheduleStatistics = null) { - metadata ??= new List(); + metadata ??= new List(); return new BatchJobSchedule( id, displayName, - url, + uri, eTag, lastModified, creationTime, @@ -856,7 +858,7 @@ public static BatchJobSchedule BatchJobSchedule(string id = null, string display jobSpecification, executionInfo, metadata?.ToList(), - stats, + jobScheduleStatistics, serializedAdditionalRawData: null); } @@ -872,61 +874,61 @@ public static BatchJobScheduleExecutionInfo BatchJobScheduleExecutionInfo(DateTi /// Initializes a new instance of . /// The ID of the Job. - /// The URL of the Job. + /// The URL of the Job. /// A new instance for mocking. - public static RecentBatchJob RecentBatchJob(string id = null, string url = null) + public static RecentBatchJob RecentBatchJob(string id = null, Uri uri = null) { - return new RecentBatchJob(id, url, serializedAdditionalRawData: null); + return new RecentBatchJob(id, uri, serializedAdditionalRawData: null); } /// Initializes a new instance of . - /// The URL of the statistics. + /// The URL of the statistics. /// The start time of the time range covered by the statistics. /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by all Tasks in all Jobs created under the schedule. /// The total wall clock time of all the Tasks in all the Jobs created under the schedule. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If a Task was retried, this includes the wall clock time of all the Task retries. - /// The total number of disk read operations made by all Tasks in all Jobs created under the schedule. - /// The total number of disk write operations made by all Tasks in all Jobs created under the schedule. - /// The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. - /// The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. - /// The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. - /// The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. - /// The total number of retries during the given time range on all Tasks in all Jobs created under the schedule. + /// The total number of disk read operations made by all Tasks in all Jobs created under the schedule. + /// The total number of disk write operations made by all Tasks in all Jobs created under the schedule. + /// The total gibibytes read from disk by all Tasks in all Jobs created under the schedule. + /// The total gibibytes written to disk by all Tasks in all Jobs created under the schedule. + /// The total number of Tasks successfully completed during the given time range in Jobs created under the schedule. A Task completes successfully if it returns exit code 0. + /// The total number of Tasks that failed during the given time range in Jobs created under the schedule. A Task fails if it exhausts its maximum retry count without returning exit code 0. + /// The total number of retries during the given time range on all Tasks in all Jobs created under the schedule. /// The total wait time of all Tasks in all Jobs created under the schedule. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). This value is only reported in the Account lifetime statistics; it is not included in the Job statistics. /// A new instance for mocking. - public static BatchJobScheduleStatistics BatchJobScheduleStatistics(string url = null, DateTimeOffset startTime = default, DateTimeOffset lastUpdateTime = default, TimeSpan userCpuTime = default, TimeSpan kernelCpuTime = default, TimeSpan wallClockTime = default, long readIOps = default, long writeIOps = default, float readIOGiB = default, float writeIOGiB = default, long numSucceededTasks = default, long numFailedTasks = default, long numTaskRetries = default, TimeSpan waitTime = default) + public static BatchJobScheduleStatistics BatchJobScheduleStatistics(Uri uri = null, DateTimeOffset startTime = default, DateTimeOffset lastUpdateTime = default, TimeSpan userCpuTime = default, TimeSpan kernelCpuTime = default, TimeSpan wallClockTime = default, long readIops = default, long writeIops = default, float readIoGiB = default, float writeIoGiB = default, long succeededTasksCount = default, long failedTasksCount = default, long taskRetriesCount = default, TimeSpan waitTime = default) { return new BatchJobScheduleStatistics( - url, + uri, startTime, lastUpdateTime, userCpuTime, kernelCpuTime, wallClockTime, - readIOps, - writeIOps, - readIOGiB, - writeIOGiB, - numSucceededTasks, - numFailedTasks, - numTaskRetries, + readIops, + writeIops, + readIoGiB, + writeIoGiB, + succeededTasksCount, + failedTasksCount, + taskRetriesCount, waitTime, serializedAdditionalRawData: null); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// A string that uniquely identifies the schedule within the Account. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within an Account that differ only by case). /// The display name for the schedule. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. /// The schedule according to which Jobs will be created. All times are fixed respective to UTC and are not impacted by daylight saving time. /// The details of the Jobs to be created on this schedule. /// A list of name-value pairs associated with the schedule as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. - /// A new instance for mocking. - public static BatchJobScheduleCreateContent BatchJobScheduleCreateContent(string id = null, string displayName = null, BatchJobScheduleConfiguration schedule = null, BatchJobSpecification jobSpecification = null, IEnumerable metadata = null) + /// A new instance for mocking. + public static BatchJobScheduleCreateOptions BatchJobScheduleCreateOptions(string id = null, string displayName = null, BatchJobScheduleConfiguration schedule = null, BatchJobSpecification jobSpecification = null, IEnumerable metadata = null) { - metadata ??= new List(); + metadata ??= new List(); - return new BatchJobScheduleCreateContent( + return new BatchJobScheduleCreateOptions( id, displayName, schedule, @@ -935,7 +937,7 @@ public static BatchJobScheduleCreateContent BatchJobScheduleCreateContent(string serializedAdditionalRawData: null); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. The ID is case-preserving and case-insensitive (that is, you may not have two IDs within a Job that differ only by case). /// A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. /// How the Batch service should respond when the Task completes. @@ -952,15 +954,15 @@ public static BatchJobScheduleCreateContent BatchJobScheduleCreateContent(string /// The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. If the Job does not have usesTaskDependencies set to true, and this element is present, the request fails with error code TaskDependenciesNotSpecifiedOnJob. /// A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. /// The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. - /// A new instance for mocking. - public static BatchTaskCreateContent BatchTaskCreateContent(string id = null, string displayName = null, ExitConditions exitConditions = null, string commandLine = null, BatchTaskContainerSettings containerSettings = null, IEnumerable resourceFiles = null, IEnumerable outputFiles = null, IEnumerable environmentSettings = null, AffinityInfo affinityInfo = null, BatchTaskConstraints constraints = null, int? requiredSlots = null, UserIdentity userIdentity = null, MultiInstanceSettings multiInstanceSettings = null, BatchTaskDependencies dependsOn = null, IEnumerable applicationPackageReferences = null, AuthenticationTokenSettings authenticationTokenSettings = null) + /// A new instance for mocking. + public static BatchTaskCreateOptions BatchTaskCreateOptions(string id = null, string displayName = null, ExitConditions exitConditions = null, string commandLine = null, BatchTaskContainerSettings containerSettings = null, IEnumerable resourceFiles = null, IEnumerable outputFiles = null, IEnumerable environmentSettings = null, BatchAffinityInfo affinityInfo = null, BatchTaskConstraints constraints = null, int? requiredSlots = null, UserIdentity userIdentity = null, MultiInstanceSettings multiInstanceSettings = null, BatchTaskDependencies dependsOn = null, IEnumerable applicationPackageReferences = null, AuthenticationTokenSettings authenticationTokenSettings = null) { resourceFiles ??= new List(); outputFiles ??= new List(); environmentSettings ??= new List(); applicationPackageReferences ??= new List(); - return new BatchTaskCreateContent( + return new BatchTaskCreateOptions( id, displayName, exitConditions, @@ -983,7 +985,7 @@ public static BatchTaskCreateContent BatchTaskCreateContent(string id = null, st /// Initializes a new instance of . /// A string that uniquely identifies the Task within the Job. The ID can contain any combination of alphanumeric characters including hyphens and underscores, and cannot contain more than 64 characters. /// A display name for the Task. The display name need not be unique and can contain any Unicode characters up to a maximum length of 1024. - /// The URL of the Task. + /// The URL of the Task. /// The ETag of the Task. This is an opaque string. You can use it to detect whether the Task has changed between requests. In particular, you can be pass the ETag when updating a Task to specify that your changes should take effect only if nobody else has modified the Task in the meantime. /// The last modified time of the Task. /// The creation time of the Task. @@ -1004,12 +1006,12 @@ public static BatchTaskCreateContent BatchTaskCreateContent(string id = null, st /// Information about the execution of the Task. /// Information about the Compute Node on which the Task ran. /// An object that indicates that the Task is a multi-instance Task, and contains information about how to run the multi-instance Task. - /// Resource usage statistics for the Task. + /// Resource usage statistics for the Task. /// The Tasks that this Task depends on. This Task will not be scheduled until all Tasks that it depends on have completed successfully. If any of those Tasks fail and exhaust their retry counts, this Task will never be scheduled. /// A list of Packages that the Batch service will deploy to the Compute Node before running the command line. Application packages are downloaded and deployed to a shared directory, not the Task working directory. Therefore, if a referenced package is already on the Node, and is up to date, then it is not re-downloaded; the existing copy on the Compute Node is used. If a referenced Package cannot be installed, for example because the package has been deleted or because download failed, the Task fails. /// The settings for an authentication token that the Task can use to perform Batch service operations. If this property is set, the Batch service provides the Task with an authentication token which can be used to authenticate Batch service operations without requiring an Account access key. The token is provided via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations that the Task can carry out using the token depend on the settings. For example, a Task can request Job permissions in order to add other Tasks to the Job, or check the status of the Job or of other Tasks under the Job. /// A new instance for mocking. - public static BatchTask BatchTask(string id = null, string displayName = null, string url = null, string eTag = null, DateTimeOffset? lastModified = null, DateTimeOffset? creationTime = null, ExitConditions exitConditions = null, BatchTaskState? state = null, DateTimeOffset? stateTransitionTime = null, BatchTaskState? previousState = null, DateTimeOffset? previousStateTransitionTime = null, string commandLine = null, BatchTaskContainerSettings containerSettings = null, IEnumerable resourceFiles = null, IEnumerable outputFiles = null, IEnumerable environmentSettings = null, AffinityInfo affinityInfo = null, BatchTaskConstraints constraints = null, int? requiredSlots = null, UserIdentity userIdentity = null, BatchTaskExecutionInfo executionInfo = null, BatchNodeInfo nodeInfo = null, MultiInstanceSettings multiInstanceSettings = null, BatchTaskStatistics stats = null, BatchTaskDependencies dependsOn = null, IEnumerable applicationPackageReferences = null, AuthenticationTokenSettings authenticationTokenSettings = null) + public static BatchTask BatchTask(string id = null, string displayName = null, Uri uri = null, ETag? eTag = null, DateTimeOffset? lastModified = null, DateTimeOffset? creationTime = null, ExitConditions exitConditions = null, BatchTaskState? state = null, DateTimeOffset? stateTransitionTime = null, BatchTaskState? previousState = null, DateTimeOffset? previousStateTransitionTime = null, string commandLine = null, BatchTaskContainerSettings containerSettings = null, IEnumerable resourceFiles = null, IEnumerable outputFiles = null, IEnumerable environmentSettings = null, BatchAffinityInfo affinityInfo = null, BatchTaskConstraints constraints = null, int? requiredSlots = null, UserIdentity userIdentity = null, BatchTaskExecutionInfo executionInfo = null, BatchNodeInfo nodeInfo = null, MultiInstanceSettings multiInstanceSettings = null, BatchTaskStatistics taskStatistics = null, BatchTaskDependencies dependsOn = null, IEnumerable applicationPackageReferences = null, AuthenticationTokenSettings authenticationTokenSettings = null) { resourceFiles ??= new List(); outputFiles ??= new List(); @@ -1019,7 +1021,7 @@ public static BatchTask BatchTask(string id = null, string displayName = null, s return new BatchTask( id, displayName, - url, + uri, eTag, lastModified, creationTime, @@ -1040,7 +1042,7 @@ public static BatchTask BatchTask(string id = null, string displayName = null, s executionInfo, nodeInfo, multiInstanceSettings, - stats, + taskStatistics, dependsOn, applicationPackageReferences?.ToList(), authenticationTokenSettings, @@ -1077,75 +1079,75 @@ public static BatchTaskExecutionInfo BatchTaskExecutionInfo(DateTimeOffset? star /// Initializes a new instance of . /// An identifier for the Node on which the Task ran, which can be passed when adding a Task to request that the Task be scheduled on this Compute Node. - /// The URL of the Compute Node on which the Task ran. + /// The URL of the Compute Node on which the Task ran. /// The ID of the Pool on which the Task ran. /// The ID of the Compute Node on which the Task ran. /// The root directory of the Task on the Compute Node. - /// The URL to the root directory of the Task on the Compute Node. + /// The URL to the root directory of the Task on the Compute Node. /// A new instance for mocking. - public static BatchNodeInfo BatchNodeInfo(string affinityId = null, string nodeUrl = null, string poolId = null, string nodeId = null, string taskRootDirectory = null, string taskRootDirectoryUrl = null) + public static BatchNodeInfo BatchNodeInfo(string affinityId = null, Uri nodeUri = null, string poolId = null, string nodeId = null, string taskRootDirectory = null, Uri taskRootDirectoryUri = null) { return new BatchNodeInfo( affinityId, - nodeUrl, + nodeUri, poolId, nodeId, taskRootDirectory, - taskRootDirectoryUrl, + taskRootDirectoryUri, serializedAdditionalRawData: null); } /// Initializes a new instance of . - /// The URL of the statistics. + /// The URL of the statistics. /// The start time of the time range covered by the statistics. /// The time at which the statistics were last updated. All statistics are limited to the range between startTime and lastUpdateTime. /// The total user mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. /// The total kernel mode CPU time (summed across all cores and all Compute Nodes) consumed by the Task. /// The total wall clock time of the Task. The wall clock time is the elapsed time from when the Task started running on a Compute Node to when it finished (or to the last time the statistics were updated, if the Task had not finished by then). If the Task was retried, this includes the wall clock time of all the Task retries. - /// The total number of disk read operations made by the Task. - /// The total number of disk write operations made by the Task. - /// The total gibibytes read from disk by the Task. - /// The total gibibytes written to disk by the Task. + /// The total number of disk read operations made by the Task. + /// The total number of disk write operations made by the Task. + /// The total gibibytes read from disk by the Task. + /// The total gibibytes written to disk by the Task. /// The total wait time of the Task. The wait time for a Task is defined as the elapsed time between the creation of the Task and the start of Task execution. (If the Task is retried due to failures, the wait time is the time to the most recent Task execution.). /// A new instance for mocking. - public static BatchTaskStatistics BatchTaskStatistics(string url = null, DateTimeOffset startTime = default, DateTimeOffset lastUpdateTime = default, TimeSpan userCpuTime = default, TimeSpan kernelCpuTime = default, TimeSpan wallClockTime = default, long readIOps = default, long writeIOps = default, float readIOGiB = default, float writeIOGiB = default, TimeSpan waitTime = default) + public static BatchTaskStatistics BatchTaskStatistics(Uri uri = null, DateTimeOffset startTime = default, DateTimeOffset lastUpdateTime = default, TimeSpan userCpuTime = default, TimeSpan kernelCpuTime = default, TimeSpan wallClockTime = default, long readIops = default, long writeIops = default, float readIoGiB = default, float writeIoGiB = default, TimeSpan waitTime = default) { return new BatchTaskStatistics( - url, + uri, startTime, lastUpdateTime, userCpuTime, kernelCpuTime, wallClockTime, - readIOps, - writeIOps, - readIOGiB, - writeIOGiB, + readIops, + writeIops, + readIoGiB, + writeIoGiB, waitTime, serializedAdditionalRawData: null); } - /// Initializes a new instance of . - /// The results of the add Task collection operation. - /// A new instance for mocking. - public static BatchTaskAddCollectionResult BatchTaskAddCollectionResult(IEnumerable value = null) + /// Initializes a new instance of . + /// The results of the create Task collection operation. + /// A new instance for mocking. + public static BatchCreateTaskCollectionResult BatchCreateTaskCollectionResult(IEnumerable values = null) { - value ??= new List(); + values ??= new List(); - return new BatchTaskAddCollectionResult(value?.ToList(), serializedAdditionalRawData: null); + return new BatchCreateTaskCollectionResult(values?.ToList(), serializedAdditionalRawData: null); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The status of the add Task request. /// The ID of the Task for which this is the result. /// The ETag of the Task, if the Task was successfully added. You can use this to detect whether the Task has changed between requests. In particular, you can be pass the ETag with an Update Task request to specify that your changes should take effect only if nobody else has modified the Job in the meantime. /// The last modified time of the Task. /// The URL of the Task, if the Task was successfully added. /// The error encountered while attempting to add the Task. - /// A new instance for mocking. - public static BatchTaskAddResult BatchTaskAddResult(BatchTaskAddStatus status = default, string taskId = null, string eTag = null, DateTimeOffset? lastModified = null, string location = null, BatchError error = null) + /// A new instance for mocking. + public static BatchTaskCreateResult BatchTaskCreateResult(BatchTaskAddStatus status = default, string taskId = null, ETag? eTag = null, DateTimeOffset? lastModified = null, string location = null, BatchError error = null) { - return new BatchTaskAddResult( + return new BatchTaskCreateResult( status, taskId, eTag, @@ -1189,13 +1191,13 @@ public static BatchSubtask BatchSubtask(int? id = null, BatchNodeInfo nodeInfo = /// Initializes a new instance of . /// The file path. - /// The URL of the file. + /// The URL of the file. /// Whether the object represents a directory. /// The file properties. /// A new instance for mocking. - public static BatchNodeFile BatchNodeFile(string name = null, string url = null, bool? isDirectory = null, FileProperties properties = null) + public static BatchNodeFile BatchNodeFile(string name = null, Uri uri = null, bool? isDirectory = null, FileProperties properties = null) { - return new BatchNodeFile(name, url, isDirectory, properties, serializedAdditionalRawData: null); + return new BatchNodeFile(name, uri, isDirectory, properties, serializedAdditionalRawData: null); } /// Initializes a new instance of . @@ -1216,16 +1218,16 @@ public static FileProperties FileProperties(DateTimeOffset? creationTime = null, serializedAdditionalRawData: null); } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The user name of the Account. /// Whether the Account should be an administrator on the Compute Node. The default value is false. /// The time at which the Account should expire. If omitted, the default is 1 day from the current time. For Linux Compute Nodes, the expiryTime has a precision up to a day. /// The password of the Account. The password is required for Windows Compute Nodes. For Linux Compute Nodes, the password can optionally be specified along with the sshPublicKey property. /// The SSH public key that can be used for remote login to the Compute Node. The public key should be compatible with OpenSSH encoding and should be base 64 encoded. This property can be specified only for Linux Compute Nodes. If this is specified for a Windows Compute Node, then the Batch service rejects the request; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). - /// A new instance for mocking. - public static BatchNodeUserCreateContent BatchNodeUserCreateContent(string name = null, bool? isAdmin = null, DateTimeOffset? expiryTime = null, string password = null, string sshPublicKey = null) + /// A new instance for mocking. + public static BatchNodeUserCreateOptions BatchNodeUserCreateOptions(string name = null, bool? isAdmin = null, DateTimeOffset? expiryTime = null, string password = null, string sshPublicKey = null) { - return new BatchNodeUserCreateContent( + return new BatchNodeUserCreateOptions( name, isAdmin, expiryTime, @@ -1236,7 +1238,7 @@ public static BatchNodeUserCreateContent BatchNodeUserCreateContent(string name /// Initializes a new instance of . /// The ID of the Compute Node. Every Compute Node that is added to a Pool is assigned a unique ID. Whenever a Compute Node is removed from a Pool, all of its local files are deleted, and the ID is reclaimed and could be reused for new Compute Nodes. - /// The URL of the Compute Node. + /// The URL of the Compute Node. /// The current state of the Compute Node. The Spot/Low-priority Compute Node has been preempted. Tasks which were running on the Compute Node when it was preempted will be rescheduled when another Compute Node becomes available. /// Whether the Compute Node is available for Task scheduling. /// The time at which the Compute Node entered its current state. @@ -1264,7 +1266,7 @@ public static BatchNodeUserCreateContent BatchNodeUserCreateContent(string name /// Information about the Compute Node agent version and the time the Compute Node upgraded to a new version. /// Info about the current state of the virtual machine. /// A new instance for mocking. - public static BatchNode BatchNode(string id = null, string url = null, BatchNodeState? state = null, SchedulingState? schedulingState = null, DateTimeOffset? stateTransitionTime = null, DateTimeOffset? lastBootTime = null, DateTimeOffset? allocationTime = null, string ipAddress = null, string affinityId = null, string vmSize = null, int? totalTasksRun = null, int? runningTasksCount = null, int? runningTaskSlotsCount = null, int? totalTasksSucceeded = null, IEnumerable recentTasks = null, BatchStartTask startTask = null, BatchStartTaskInfo startTaskInfo = null, IEnumerable certificateReferences = null, IEnumerable errors = null, bool? isDedicated = null, BatchNodeEndpointConfiguration endpointConfiguration = null, BatchNodeAgentInfo nodeAgentInfo = null, VirtualMachineInfo virtualMachineInfo = null) + public static BatchNode BatchNode(string id = null, Uri uri = null, BatchNodeState? state = null, SchedulingState? schedulingState = null, DateTimeOffset? stateTransitionTime = null, DateTimeOffset? lastBootTime = null, DateTimeOffset? allocationTime = null, IPAddress ipAddress = null, string affinityId = null, string vmSize = null, int? totalTasksRun = null, int? runningTasksCount = null, int? runningTaskSlotsCount = null, int? totalTasksSucceeded = null, IEnumerable recentTasks = null, BatchStartTask startTask = null, BatchStartTaskInfo startTaskInfo = null, IEnumerable certificateReferences = null, IEnumerable errors = null, bool? isDedicated = null, BatchNodeEndpointConfiguration endpointConfiguration = null, BatchNodeAgentInfo nodeAgentInfo = null, VirtualMachineInfo virtualMachineInfo = null) { recentTasks ??= new List(); certificateReferences ??= new List(); @@ -1272,7 +1274,7 @@ public static BatchNode BatchNode(string id = null, string url = null, BatchNode return new BatchNode( id, - url, + uri, state, schedulingState, stateTransitionTime, @@ -1298,17 +1300,17 @@ public static BatchNode BatchNode(string id = null, string url = null, BatchNode } /// Initializes a new instance of . - /// The URL of the Task. + /// The URL of the Task. /// The ID of the Job to which the Task belongs. /// The ID of the Task. /// The ID of the subtask if the Task is a multi-instance Task. /// The current state of the Task. /// Information about the execution of the Task. /// A new instance for mocking. - public static BatchTaskInfo BatchTaskInfo(string taskUrl = null, string jobId = null, string taskId = null, int? subtaskId = null, BatchTaskState taskState = default, BatchTaskExecutionInfo executionInfo = null) + public static BatchTaskInfo BatchTaskInfo(Uri taskUri = null, string jobId = null, string taskId = null, int? subtaskId = null, BatchTaskState taskState = default, BatchTaskExecutionInfo executionInfo = null) { return new BatchTaskInfo( - taskUrl, + taskUri, jobId, taskId, subtaskId, @@ -1373,7 +1375,7 @@ public static BatchNodeEndpointConfiguration BatchNodeEndpointConfiguration(IEnu /// The public port number of the endpoint. /// The backend port number of the endpoint. /// A new instance for mocking. - public static InboundEndpoint InboundEndpoint(string name = null, InboundEndpointProtocol protocol = default, string publicIpAddress = null, string publicFQDN = null, int frontendPort = default, int backendPort = default) + public static InboundEndpoint InboundEndpoint(string name = null, InboundEndpointProtocol protocol = default, IPAddress publicIpAddress = null, string publicFQDN = null, int frontendPort = default, int backendPort = default) { return new InboundEndpoint( name, @@ -1398,7 +1400,7 @@ public static BatchNodeAgentInfo BatchNodeAgentInfo(string version = null, DateT /// The reference to the Azure Virtual Machine's Marketplace Image. /// The resource ID of the Compute Node's current Virtual Machine Scale Set VM. Only defined if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. /// A new instance for mocking. - public static VirtualMachineInfo VirtualMachineInfo(ImageReference imageReference = null, string scaleSetVmResourceId = null) + public static VirtualMachineInfo VirtualMachineInfo(BatchVmImageReference imageReference = null, string scaleSetVmResourceId = null) { return new VirtualMachineInfo(imageReference, scaleSetVmResourceId, serializedAdditionalRawData: null); } @@ -1407,20 +1409,20 @@ public static VirtualMachineInfo VirtualMachineInfo(ImageReference imageReferenc /// The IP address used for remote login to the Compute Node. /// The port used for remote login to the Compute Node. /// A new instance for mocking. - public static BatchNodeRemoteLoginSettings BatchNodeRemoteLoginSettings(string remoteLoginIpAddress = null, int remoteLoginPort = default) + public static BatchNodeRemoteLoginSettings BatchNodeRemoteLoginSettings(IPAddress remoteLoginIpAddress = null, int remoteLoginPort = default) { return new BatchNodeRemoteLoginSettings(remoteLoginIpAddress, remoteLoginPort, serializedAdditionalRawData: null); } - /// Initializes a new instance of . - /// The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. + /// Initializes a new instance of . + /// The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. /// The start of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. /// The end of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. If omitted, the default is to upload all logs available after the startTime. /// The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. - /// A new instance for mocking. - public static UploadBatchServiceLogsContent UploadBatchServiceLogsContent(string containerUrl = null, DateTimeOffset startTime = default, DateTimeOffset? endTime = null, BatchNodeIdentityReference identityReference = null) + /// A new instance for mocking. + public static UploadBatchServiceLogsOptions UploadBatchServiceLogsOptions(Uri containerUri = null, DateTimeOffset startTime = default, DateTimeOffset? endTime = null, BatchNodeIdentityReference identityReference = null) { - return new UploadBatchServiceLogsContent(containerUrl, startTime, endTime, identityReference, serializedAdditionalRawData: null); + return new UploadBatchServiceLogsOptions(containerUri, startTime, endTime, identityReference, serializedAdditionalRawData: null); } /// Initializes a new instance of . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerRegistryReference.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerRegistryReference.Serialization.cs index f09a62f1a78c..e27c7af1f2d4 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerRegistryReference.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerRegistryReference.Serialization.cs @@ -44,10 +44,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("password"u8); writer.WriteStringValue(Password); } - if (Optional.IsDefined(RegistryServer)) + if (Optional.IsDefined(RegistryServerUri)) { writer.WritePropertyName("registryServer"u8); - writer.WriteStringValue(RegistryServer); + writer.WriteStringValue(RegistryServerUri.AbsoluteUri); } if (Optional.IsDefined(IdentityReference)) { @@ -93,7 +93,7 @@ internal static ContainerRegistryReference DeserializeContainerRegistryReference } string username = default; string password = default; - string registryServer = default; + Uri registryServer = default; BatchNodeIdentityReference identityReference = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -111,7 +111,11 @@ internal static ContainerRegistryReference DeserializeContainerRegistryReference } if (property.NameEquals("registryServer"u8)) { - registryServer = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + registryServer = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("identityReference"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerRegistryReference.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerRegistryReference.cs index 3330be8fa2fd..95fd8a357151 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerRegistryReference.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ContainerRegistryReference.cs @@ -53,14 +53,14 @@ public ContainerRegistryReference() /// Initializes a new instance of . /// The user name to log into the registry server. /// The password to log into the registry server. - /// The registry URL. If omitted, the default is "docker.io". + /// The registry URL. If omitted, the default is "docker.io". /// The reference to the user assigned identity to use to access an Azure Container Registry instead of username and password. /// Keeps track of any properties unknown to the library. - internal ContainerRegistryReference(string username, string password, string registryServer, BatchNodeIdentityReference identityReference, IDictionary serializedAdditionalRawData) + internal ContainerRegistryReference(string username, string password, Uri registryServerUri, BatchNodeIdentityReference identityReference, IDictionary serializedAdditionalRawData) { Username = username; Password = password; - RegistryServer = registryServer; + RegistryServerUri = registryServerUri; IdentityReference = identityReference; _serializedAdditionalRawData = serializedAdditionalRawData; } @@ -70,7 +70,7 @@ internal ContainerRegistryReference(string username, string password, string reg /// The password to log into the registry server. public string Password { get; set; } /// The registry URL. If omitted, the default is "docker.io". - public string RegistryServer { get; set; } + public Uri RegistryServerUri { get; set; } /// The reference to the user assigned identity to use to access an Azure Container Registry instead of username and password. public BatchNodeIdentityReference IdentityReference { get; set; } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/DisableBatchJobOption.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/DisableBatchJobOption.cs index 23707ffb0cb2..eeb57a6c432b 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/DisableBatchJobOption.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/DisableBatchJobOption.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// DisableBatchJobOption enums. - public readonly partial struct DisableBatchJobOption : IEquatable + internal readonly partial struct DisableBatchJobOption : IEquatable { private readonly string _value; diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/Docs/BatchClient.xml b/sdk/batch/Azure.Compute.Batch/src/Generated/Docs/BatchClient.xml index 8492238071b6..5c4843818588 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/Docs/BatchClient.xml +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/Docs/BatchClient.xml @@ -1,7 +1,7 @@ - + This sample shows how to call GetApplicationAsync. response = await client.GetApplicationAsync("my_application_id"); ]]> - + This sample shows how to call GetApplication. response = client.GetApplication("my_application_id"); ]]> - + This sample shows how to call GetApplicationAsync and parse the result. - + This sample shows how to call GetApplication and parse the result. - + This sample shows how to call CreatePoolAsync. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool01", "Standard_D1_v2") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool01", "Standard_D1_v2") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", @@ -86,9 +86,9 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "UbuntuServer", @@ -108,7 +108,7 @@ BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") }, }, new MountConfiguration { - AzureFileShareConfiguration = new AzureFileShareConfiguration("accountName", "https://myaccount.file.core.windows.net/fileshare", "accountKey", "filesharepath") + AzureFileShareConfiguration = new AzureFileShareConfiguration("accountName", "accountKey", new Uri("https://myaccount.file.core.windows.net/fileshare"), "filesharepath") { MountOptions = "mount options ver=1.0", }, @@ -134,18 +134,18 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool001", "standard_d2s_v3") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "standard_d2s_v3") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "0001-com-ubuntu-server-focal", Sku = "20_04-lts", }, "batch.node.ubuntu 20.04") { - OsDisk = new OSDisk + OsDisk = new BatchOsDisk { - EphemeralOSDiskSettings = new DiffDiskSettings + EphemeralOSDiskSettings = new BatchDiffDiskSettings { Placement = DiffDiskPlacement.CacheDisk, }, @@ -171,9 +171,9 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool001", "STANDARD_DC2s_V2") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "STANDARD_DC2s_V2") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "UbuntuServer", @@ -195,9 +195,9 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool001", "STANDARD_DC2s_V2") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "STANDARD_DC2s_V2") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "UbuntuServer", @@ -205,7 +205,7 @@ BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool001", "STANDARD_ Version = "latest", }, "batch.node.ubuntu 18.04") { - SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new BatchUefiSettings { SecureBootEnabled = false, }), @@ -220,9 +220,9 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "0001-com-ubuntu-server-focal", @@ -235,7 +235,7 @@ BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") EnableInterNodeCommunication = true, TaskSlotsPerNode = 3, TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, }; Response response = await client.CreatePoolAsync(pool); ]]> @@ -245,16 +245,16 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "0001-com-ubuntu-server-focal", Sku = "120_04-lts", }, "batch.node.ubuntu 20.04") { - ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + ContainerConfiguration = new BatchContainerConfiguration(ContainerType.DockerCompatible) { ContainerImageNames = { "busybox" }, }, @@ -274,9 +274,9 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "0001-com-ubuntu-server-focal", @@ -302,7 +302,7 @@ BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") EnableInterNodeCommunication = true, TaskSlotsPerNode = 3, TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, TargetNodeCommunicationMode = BatchNodeCommunicationMode.Simplified, }; Response response = await client.CreatePoolAsync(pool); @@ -313,9 +313,9 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool002", "Standard_A1_v2") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool002", "Standard_A1_v2") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", @@ -334,7 +334,7 @@ BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool002", "Standard_ Response response = await client.CreatePoolAsync(pool); ]]> - + This sample shows how to call CreatePool. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool01", "Standard_D1_v2") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool01", "Standard_D1_v2") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", @@ -365,9 +365,9 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "UbuntuServer", @@ -387,7 +387,7 @@ BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") }, }, new MountConfiguration { - AzureFileShareConfiguration = new AzureFileShareConfiguration("accountName", "https://myaccount.file.core.windows.net/fileshare", "accountKey", "filesharepath") + AzureFileShareConfiguration = new AzureFileShareConfiguration("accountName", "accountKey", new Uri("https://myaccount.file.core.windows.net/fileshare"), "filesharepath") { MountOptions = "mount options ver=1.0", }, @@ -413,18 +413,18 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool001", "standard_d2s_v3") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "standard_d2s_v3") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "0001-com-ubuntu-server-focal", Sku = "20_04-lts", }, "batch.node.ubuntu 20.04") { - OsDisk = new OSDisk + OsDisk = new BatchOsDisk { - EphemeralOSDiskSettings = new DiffDiskSettings + EphemeralOSDiskSettings = new BatchDiffDiskSettings { Placement = DiffDiskPlacement.CacheDisk, }, @@ -450,9 +450,9 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool001", "STANDARD_DC2s_V2") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "STANDARD_DC2s_V2") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "UbuntuServer", @@ -474,9 +474,9 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool001", "STANDARD_DC2s_V2") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "STANDARD_DC2s_V2") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "UbuntuServer", @@ -484,7 +484,7 @@ BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool001", "STANDARD_ Version = "latest", }, "batch.node.ubuntu 18.04") { - SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new BatchUefiSettings { SecureBootEnabled = false, }), @@ -499,9 +499,9 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "0001-com-ubuntu-server-focal", @@ -514,7 +514,7 @@ BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") EnableInterNodeCommunication = true, TaskSlotsPerNode = 3, TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, }; Response response = client.CreatePool(pool); ]]> @@ -524,16 +524,16 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "0001-com-ubuntu-server-focal", Sku = "120_04-lts", }, "batch.node.ubuntu 20.04") { - ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + ContainerConfiguration = new BatchContainerConfiguration(ContainerType.DockerCompatible) { ContainerImageNames = { "busybox" }, }, @@ -553,9 +553,9 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "0001-com-ubuntu-server-focal", @@ -581,7 +581,7 @@ BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") EnableInterNodeCommunication = true, TaskSlotsPerNode = 3, TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, TargetNodeCommunicationMode = BatchNodeCommunicationMode.Simplified, }; Response response = client.CreatePool(pool); @@ -592,9 +592,9 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool002", "Standard_A1_v2") +BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool002", "Standard_A1_v2") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", @@ -613,7 +613,7 @@ BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool002", "Standard_ Response response = client.CreatePool(pool); ]]> - + This sample shows how to call CreatePoolAsync. - + This sample shows how to call CreatePool. - - -This sample shows how to call DeletePoolAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = await client.DeletePoolAsync("poolId"); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call DeletePool. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = client.DeletePool("poolId"); - -Console.WriteLine(response.Status); -]]> - - + This sample shows how to call GetPoolAsync. response = await client.GetPoolAsync("pool"); ]]> - + This sample shows how to call GetPool. response = client.GetPool("pool"); ]]> - + This sample shows how to call GetPoolAsync and parse the result. - + This sample shows how to call GetPool and parse the result. - + This sample shows how to call UpdatePoolAsync. - + This sample shows how to call UpdatePool. - + This sample shows how to call DisablePoolAutoScaleAsync. - + This sample shows how to call DisablePoolAutoScale. - + This sample shows how to call EnablePoolAutoScaleAsync. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolEnableAutoScaleContent content = new BatchPoolEnableAutoScaleContent +BatchPoolAutoScaleEnableOptions enableAutoScaleOptions = new BatchPoolAutoScaleEnableOptions { AutoScaleFormula = "$TargetDedicated=0", AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT8M"), }; -Response response = await client.EnablePoolAutoScaleAsync("poolId", content); +Response response = await client.EnablePoolAutoScaleAsync("poolId", enableAutoScaleOptions); ]]> - + This sample shows how to call EnablePoolAutoScale. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolEnableAutoScaleContent content = new BatchPoolEnableAutoScaleContent +BatchPoolAutoScaleEnableOptions enableAutoScaleOptions = new BatchPoolAutoScaleEnableOptions { AutoScaleFormula = "$TargetDedicated=0", AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT8M"), }; -Response response = client.EnablePoolAutoScale("poolId", content); +Response response = client.EnablePoolAutoScale("poolId", enableAutoScaleOptions); ]]> - + This sample shows how to call EnablePoolAutoScaleAsync. - + This sample shows how to call EnablePoolAutoScale. - + This sample shows how to call EvaluatePoolAutoScaleAsync. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolEvaluateAutoScaleContent content = new BatchPoolEvaluateAutoScaleContent("$TargetDedicated=1"); -Response response = await client.EvaluatePoolAutoScaleAsync("poolId", content); +BatchPoolAutoScaleEvaluateOptions evaluateAutoScaleOptions = new BatchPoolAutoScaleEvaluateOptions("$TargetDedicated=1"); +Response response = await client.EvaluatePoolAutoScaleAsync("poolId", evaluateAutoScaleOptions); ]]> - + This sample shows how to call EvaluatePoolAutoScale. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolEvaluateAutoScaleContent content = new BatchPoolEvaluateAutoScaleContent("$TargetDedicated=1"); -Response response = client.EvaluatePoolAutoScale("poolId", content); +BatchPoolAutoScaleEvaluateOptions evaluateAutoScaleOptions = new BatchPoolAutoScaleEvaluateOptions("$TargetDedicated=1"); +Response response = client.EvaluatePoolAutoScale("poolId", evaluateAutoScaleOptions); ]]> - + This sample shows how to call EvaluatePoolAutoScaleAsync and parse the result. - + This sample shows how to call EvaluatePoolAutoScale and parse the result. - - -This sample shows how to call ResizePoolAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -BatchPoolResizeContent content = new BatchPoolResizeContent -{ - TargetDedicatedNodes = 1, - TargetLowPriorityNodes = 0, -}; -Response response = await client.ResizePoolAsync("resizePool", content); -]]> - - - -This sample shows how to call ResizePool. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -BatchPoolResizeContent content = new BatchPoolResizeContent -{ - TargetDedicatedNodes = 1, - TargetLowPriorityNodes = 0, -}; -Response response = client.ResizePool("resizePool", content); -]]> - - - -This sample shows how to call ResizePoolAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = RequestContent.Create(new -{ - targetDedicatedNodes = 1, - targetLowPriorityNodes = 0, -}); -Response response = await client.ResizePoolAsync("resizePool", content); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call ResizePool. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = RequestContent.Create(new -{ - targetDedicatedNodes = 1, - targetLowPriorityNodes = 0, -}); -Response response = client.ResizePool("resizePool", content); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call StopPoolResizeAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = await client.StopPoolResizeAsync("poolId"); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call StopPoolResize. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = client.StopPoolResize("poolId"); - -Console.WriteLine(response.Status); -]]> - - + This sample shows how to call ReplacePoolPropertiesAsync. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolReplaceContent pool = new BatchPoolReplaceContent(Array.Empty(), Array.Empty(), Array.Empty()) +BatchPoolReplaceOptions pool = new BatchPoolReplaceOptions(Array.Empty(), Array.Empty(), Array.Empty()) { StartTask = new BatchStartTask("/bin/bash -c 'echo start task'"), }; Response response = await client.ReplacePoolPropertiesAsync("poolId", pool); ]]> - + This sample shows how to call ReplacePoolProperties. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchPoolReplaceContent pool = new BatchPoolReplaceContent(Array.Empty(), Array.Empty(), Array.Empty()) +BatchPoolReplaceOptions pool = new BatchPoolReplaceOptions(Array.Empty(), Array.Empty(), Array.Empty()) { StartTask = new BatchStartTask("/bin/bash -c 'echo start task'"), }; Response response = client.ReplacePoolProperties("poolId", pool); ]]> - + This sample shows how to call ReplacePoolPropertiesAsync. - + This sample shows how to call ReplacePoolProperties. - - -This sample shows how to call RemoveNodesAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -BatchNodeRemoveContent content = new BatchNodeRemoveContent(new string[] { "tvm-1695681911_1-20161122t224741z", "tvm-1695681911_2-20161122t224741z" }); -Response response = await client.RemoveNodesAsync("poolId", content); -]]> - - - -This sample shows how to call RemoveNodes. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -BatchNodeRemoveContent content = new BatchNodeRemoveContent(new string[] { "tvm-1695681911_1-20161122t224741z", "tvm-1695681911_2-20161122t224741z" }); -Response response = client.RemoveNodes("poolId", content); -]]> - - - -This sample shows how to call RemoveNodesAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = RequestContent.Create(new -{ - nodeList = new object[] - { - "tvm-1695681911_1-20161122t224741z", - "tvm-1695681911_2-20161122t224741z" - }, -}); -Response response = await client.RemoveNodesAsync("poolId", content); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call RemoveNodes. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = RequestContent.Create(new -{ - nodeList = new object[] - { - "tvm-1695681911_1-20161122t224741z", - "tvm-1695681911_2-20161122t224741z" - }, -}); -Response response = client.RemoveNodes("poolId", content); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call DeleteJobAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = await client.DeleteJobAsync("jobId"); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call DeleteJob. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = client.DeleteJob("jobId"); - -Console.WriteLine(response.Status); -]]> - - + This sample shows how to call GetJobAsync. response = await client.GetJobAsync("jobId"); ]]> - + This sample shows how to call GetJob. response = client.GetJob("jobId"); ]]> - + This sample shows how to call GetJobAsync and parse the result. - + This sample shows how to call GetJob and parse the result. - + This sample shows how to call UpdateJobAsync. - + This sample shows how to call UpdateJob. - + This sample shows how to call ReplaceJobAsync. - + This sample shows how to call ReplaceJob. - + This sample shows how to call ReplaceJobAsync. - + This sample shows how to call ReplaceJob. - - -This sample shows how to call DisableJobAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -BatchJobDisableContent content = new BatchJobDisableContent(DisableBatchJobOption.Terminate); -Response response = await client.DisableJobAsync("jobId", content); -]]> - - - -This sample shows how to call DisableJob. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -BatchJobDisableContent content = new BatchJobDisableContent(DisableBatchJobOption.Terminate); -Response response = client.DisableJob("jobId", content); -]]> - - - -This sample shows how to call DisableJobAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = RequestContent.Create(new -{ - disableTasks = "terminate", -}); -Response response = await client.DisableJobAsync("jobId", content); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call DisableJob. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = RequestContent.Create(new -{ - disableTasks = "terminate", -}); -Response response = client.DisableJob("jobId", content); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call EnableJobAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = await client.EnableJobAsync("jobId"); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call EnableJob. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = client.EnableJob("jobId"); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call TerminateJobAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = await client.TerminateJobAsync("jobId"); -]]> - - - -This sample shows how to call TerminateJob. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = client.TerminateJob("jobId"); -]]> - - - -This sample shows how to call TerminateJobAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = null; -Response response = await client.TerminateJobAsync("jobId", content); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call TerminateJob. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = null; -Response response = client.TerminateJob("jobId", content); - -Console.WriteLine(response.Status); -]]> - - + This sample shows how to call CreateJobAsync. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchJobCreateContent job = new BatchJobCreateContent("jobId", new BatchPoolInfo +BatchJobCreateOptions job = new BatchJobCreateOptions("jobId", new BatchPoolInfo { PoolId = "poolId", }) @@ -2500,14 +2154,14 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchJobCreateContent job = new BatchJobCreateContent("jobId", new BatchPoolInfo +BatchJobCreateOptions job = new BatchJobCreateOptions("jobId", new BatchPoolInfo { AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.Job) { AutoPoolIdPrefix = "mypool", Pool = new BatchPoolSpecification("STANDARD_D2S_V3") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", @@ -2535,7 +2189,7 @@ BatchJobCreateContent job = new BatchJobCreateContent("jobId", new BatchPoolInfo { ResourceFiles = {new ResourceFile { - HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas", + HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas"), FilePath = "myprogram2.exe", }}, EnvironmentSettings = {new EnvironmentSetting("myvariable") @@ -2559,7 +2213,7 @@ BatchJobCreateContent job = new BatchJobCreateContent("jobId", new BatchPoolInfo StoreName = "Root", Visibility = {BatchCertificateVisibility.Task}, }}, - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, }, @@ -2575,11 +2229,11 @@ BatchJobCreateContent job = new BatchJobCreateContent("jobId", new BatchPoolInfo { ResourceFiles = {new ResourceFile { - HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas", + HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas"), FilePath = "myprogram.exe", }, new ResourceFile { - StorageContainerUrl = "http://mystorage1.blob.core.windows.net/data?sas", + StorageContainerUri = new Uri("http://mystorage1.blob.core.windows.net/data?sas"), FilePath = "datafolder", }}, EnvironmentSettings = {new EnvironmentSetting("myvariable") @@ -2604,12 +2258,12 @@ BatchJobCreateContent job = new BatchJobCreateContent("jobId", new BatchPoolInfo }, RunExclusive = true, }, - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, }; Response response = await client.CreateJobAsync(job); ]]> - + This sample shows how to call CreateJob. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchJobCreateContent job = new BatchJobCreateContent("jobId", new BatchPoolInfo +BatchJobCreateOptions job = new BatchJobCreateOptions("jobId", new BatchPoolInfo { PoolId = "poolId", }) @@ -2632,14 +2286,14 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchJobCreateContent job = new BatchJobCreateContent("jobId", new BatchPoolInfo +BatchJobCreateOptions job = new BatchJobCreateOptions("jobId", new BatchPoolInfo { AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.Job) { AutoPoolIdPrefix = "mypool", Pool = new BatchPoolSpecification("STANDARD_D2S_V3") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", @@ -2667,7 +2321,7 @@ BatchJobCreateContent job = new BatchJobCreateContent("jobId", new BatchPoolInfo { ResourceFiles = {new ResourceFile { - HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas", + HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas"), FilePath = "myprogram2.exe", }}, EnvironmentSettings = {new EnvironmentSetting("myvariable") @@ -2691,7 +2345,7 @@ BatchJobCreateContent job = new BatchJobCreateContent("jobId", new BatchPoolInfo StoreName = "Root", Visibility = {BatchCertificateVisibility.Task}, }}, - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, }, @@ -2707,11 +2361,11 @@ BatchJobCreateContent job = new BatchJobCreateContent("jobId", new BatchPoolInfo { ResourceFiles = {new ResourceFile { - HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas", + HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas"), FilePath = "myprogram.exe", }, new ResourceFile { - StorageContainerUrl = "http://mystorage1.blob.core.windows.net/data?sas", + StorageContainerUri = new Uri("http://mystorage1.blob.core.windows.net/data?sas"), FilePath = "datafolder", }}, EnvironmentSettings = {new EnvironmentSetting("myvariable") @@ -2736,12 +2390,12 @@ BatchJobCreateContent job = new BatchJobCreateContent("jobId", new BatchPoolInfo }, RunExclusive = true, }, - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, }; Response response = client.CreateJob(job); ]]> - + This sample shows how to call CreateJobAsync. - + This sample shows how to call CreateJob. - + This sample shows how to call GetJobTaskCountsAsync. response = await client.GetJobTaskCountsAsync("jobId"); ]]> - + This sample shows how to call GetJobTaskCounts. response = client.GetJobTaskCounts("jobId"); ]]> - + This sample shows how to call GetJobTaskCountsAsync and parse the result. - + This sample shows how to call GetJobTaskCounts and parse the result. - + This sample shows how to call CreateCertificateAsync. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789abcdef01234567", "sha1", "#####...") +BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789abcdef01234567", "sha1", BinaryData.FromObjectAsJson("U3dhZ2dlciByb2Nrcw==")) { CertificateFormat = BatchCertificateFormat.Pfx, Password = "", @@ -3199,7 +2853,7 @@ BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789a Response response = await client.CreateCertificateAsync(certificate); ]]> - + This sample shows how to call CreateCertificate. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789abcdef01234567", "sha1", "#####...") +BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789abcdef01234567", "sha1", BinaryData.FromObjectAsJson("U3dhZ2dlciByb2Nrcw==")) { CertificateFormat = BatchCertificateFormat.Pfx, Password = "", @@ -3215,7 +2869,7 @@ BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789a Response response = client.CreateCertificate(certificate); ]]> - + This sample shows how to call CreateCertificateAsync. ", }); @@ -3236,7 +2890,7 @@ Response response = await client.CreateCertificateAsync(content); Console.WriteLine(response.Status); ]]> - + This sample shows how to call CreateCertificate. ", }); @@ -3257,7 +2911,7 @@ Response response = client.CreateCertificate(content); Console.WriteLine(response.Status); ]]> - + This sample shows how to call CancelCertificateDeletionAsync. - + This sample shows how to call CancelCertificateDeletion. - - -This sample shows how to call DeleteCertificateAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = await client.DeleteCertificateAsync("sha1", "0123456789abcdef0123456789abcdef01234567"); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call DeleteCertificate. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = client.DeleteCertificate("sha1", "0123456789abcdef0123456789abcdef01234567"); - -Console.WriteLine(response.Status); -]]> - - + This sample shows how to call GetCertificateAsync. response = await client.GetCertificateAsync("sha1", "0123456789abcdef0123456789abcdef01234567"); ]]> - + This sample shows how to call GetCertificate. response = client.GetCertificate("sha1", "0123456789abcdef0123456789abcdef01234567"); ]]> - + This sample shows how to call GetCertificateAsync and parse the result. - + This sample shows how to call GetCertificate and parse the result. - - -This sample shows how to call DeleteJobScheduleAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = await client.DeleteJobScheduleAsync("jobScheduleId"); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call DeleteJobSchedule. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = client.DeleteJobSchedule("jobScheduleId"); - -Console.WriteLine(response.Status); -]]> - - + This sample shows how to call GetJobScheduleAsync. response = await client.GetJobScheduleAsync("jobScheduleId"); ]]> - + This sample shows how to call GetJobSchedule. response = client.GetJobSchedule("jobScheduleId"); ]]> - + This sample shows how to call GetJobScheduleAsync and parse the result. - + This sample shows how to call GetJobSchedule and parse the result. - + This sample shows how to call UpdateJobScheduleAsync. - + This sample shows how to call UpdateJobSchedule. - + This sample shows how to call ReplaceJobScheduleAsync. - + This sample shows how to call ReplaceJobSchedule. - + This sample shows how to call ReplaceJobScheduleAsync. - + This sample shows how to call ReplaceJobSchedule. - + This sample shows how to call DisableJobScheduleAsync. - + This sample shows how to call DisableJobSchedule. - + This sample shows how to call EnableJobScheduleAsync. - + This sample shows how to call EnableJobSchedule. - - -This sample shows how to call TerminateJobScheduleAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = await client.TerminateJobScheduleAsync("jobScheduleId"); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call TerminateJobSchedule. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = client.TerminateJobSchedule("jobScheduleId"); - -Console.WriteLine(response.Status); -]]> - - + This sample shows how to call CreateJobScheduleAsync. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("jobScheduleId", new BatchJobScheduleConfiguration +BatchJobScheduleCreateOptions jobSchedule = new BatchJobScheduleCreateOptions("jobScheduleId", new BatchJobScheduleConfiguration { RecurrenceInterval = XmlConvert.ToTimeSpan("PT5M"), }, new BatchJobSpecification(new BatchPoolInfo @@ -3736,7 +3312,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("jobScheduleId", new BatchJobScheduleConfiguration +BatchJobScheduleCreateOptions jobSchedule = new BatchJobScheduleCreateOptions("jobScheduleId", new BatchJobScheduleConfiguration { DoNotRunUntil = DateTimeOffset.Parse("2014-09-10T02:30:00.000Z"), DoNotRunAfter = DateTimeOffset.Parse("2014-09-10T06:30:00.000Z"), @@ -3749,7 +3325,7 @@ BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("j AutoPoolIdPrefix = "mypool", Pool = new BatchPoolSpecification("STANDARD_D2S_V3") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", @@ -3777,7 +3353,7 @@ BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("j { ResourceFiles = {new ResourceFile { - HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas", + HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas"), FilePath = "myprogram2.exe", }}, EnvironmentSettings = {new EnvironmentSetting("myvariable") @@ -3801,7 +3377,7 @@ BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("j StoreName = "Root", Visibility = {BatchCertificateVisibility.Task}, }}, - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, }, @@ -3817,11 +3393,11 @@ BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("j { ResourceFiles = {new ResourceFile { - HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas", + HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas"), FilePath = "myprogram.exe", }, new ResourceFile { - HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/test.txt?sas", + HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/test.txt?sas"), FilePath = "test.txt", }}, EnvironmentSettings = {new EnvironmentSetting("myvariable") @@ -3848,12 +3424,12 @@ BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("j }, }) { - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, }; Response response = await client.CreateJobScheduleAsync(jobSchedule); ]]> - + This sample shows how to call CreateJobSchedule. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("jobScheduleId", new BatchJobScheduleConfiguration +BatchJobScheduleCreateOptions jobSchedule = new BatchJobScheduleCreateOptions("jobScheduleId", new BatchJobScheduleConfiguration { RecurrenceInterval = XmlConvert.ToTimeSpan("PT5M"), }, new BatchJobSpecification(new BatchPoolInfo @@ -3876,7 +3452,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("jobScheduleId", new BatchJobScheduleConfiguration +BatchJobScheduleCreateOptions jobSchedule = new BatchJobScheduleCreateOptions("jobScheduleId", new BatchJobScheduleConfiguration { DoNotRunUntil = DateTimeOffset.Parse("2014-09-10T02:30:00.000Z"), DoNotRunAfter = DateTimeOffset.Parse("2014-09-10T06:30:00.000Z"), @@ -3889,7 +3465,7 @@ BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("j AutoPoolIdPrefix = "mypool", Pool = new BatchPoolSpecification("STANDARD_D2S_V3") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", @@ -3917,7 +3493,7 @@ BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("j { ResourceFiles = {new ResourceFile { - HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas", + HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas"), FilePath = "myprogram2.exe", }}, EnvironmentSettings = {new EnvironmentSetting("myvariable") @@ -3941,7 +3517,7 @@ BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("j StoreName = "Root", Visibility = {BatchCertificateVisibility.Task}, }}, - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, }, @@ -3957,11 +3533,11 @@ BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("j { ResourceFiles = {new ResourceFile { - HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas", + HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas"), FilePath = "myprogram.exe", }, new ResourceFile { - HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/test.txt?sas", + HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/test.txt?sas"), FilePath = "test.txt", }}, EnvironmentSettings = {new EnvironmentSetting("myvariable") @@ -3988,12 +3564,12 @@ BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("j }, }) { - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, }; Response response = client.CreateJobSchedule(jobSchedule); ]]> - + This sample shows how to call CreateJobScheduleAsync. - + This sample shows how to call CreateJobSchedule. - + This sample shows how to call CreateTaskAsync. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskCreateContent task = new BatchTaskCreateContent("task1", "cmd /c echo task1"); +BatchTaskCreateOptions task = new BatchTaskCreateOptions("task1", "cmd /c echo task1"); Response response = await client.CreateTaskAsync("jobId", task); ]]> This sample shows how to call CreateTaskAsync. @@ -4416,7 +3992,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") +BatchTaskCreateOptions task = new BatchTaskCreateOptions("taskId", "bash -c 'echo hello'") { ContainerSettings = new BatchTaskContainerSettings("ubuntu") { @@ -4439,7 +4015,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") +BatchTaskCreateOptions task = new BatchTaskCreateOptions("taskId", "bash -c 'echo hello'") { ContainerSettings = new BatchTaskContainerSettings("ubuntu") { @@ -4466,7 +4042,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") +BatchTaskCreateOptions task = new BatchTaskCreateOptions("taskId", "bash -c 'echo hello'") { ContainerSettings = new BatchTaskContainerSettings("ubuntu") { @@ -4497,13 +4073,13 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "cmd /c exit 3") +BatchTaskCreateOptions task = new BatchTaskCreateOptions("taskId", "cmd /c exit 3") { ExitConditions = new ExitConditions { ExitCodeRanges = {new ExitCodeRangeMapping(2, 4, new ExitOptions { - JobAction = BatchJobAction.Terminate, + JobAction = BatchJobActionKind.Terminate, })}, }, UserIdentity = new UserIdentity @@ -4523,14 +4099,14 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskCreateContent task = new BatchTaskCreateContent("task1", "cmd /c echo task1") +BatchTaskCreateOptions task = new BatchTaskCreateOptions("task1", "cmd /c echo task1") { RequiredSlots = 2, }; Response response = await client.CreateTaskAsync("jobId", task); ]]> - + This sample shows how to call CreateTask. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskCreateContent task = new BatchTaskCreateContent("task1", "cmd /c echo task1"); +BatchTaskCreateOptions task = new BatchTaskCreateOptions("task1", "cmd /c echo task1"); Response response = client.CreateTask("jobId", task); ]]> This sample shows how to call CreateTask. @@ -4547,7 +4123,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") +BatchTaskCreateOptions task = new BatchTaskCreateOptions("taskId", "bash -c 'echo hello'") { ContainerSettings = new BatchTaskContainerSettings("ubuntu") { @@ -4570,7 +4146,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") +BatchTaskCreateOptions task = new BatchTaskCreateOptions("taskId", "bash -c 'echo hello'") { ContainerSettings = new BatchTaskContainerSettings("ubuntu") { @@ -4597,7 +4173,7 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") +BatchTaskCreateOptions task = new BatchTaskCreateOptions("taskId", "bash -c 'echo hello'") { ContainerSettings = new BatchTaskContainerSettings("ubuntu") { @@ -4628,13 +4204,13 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "cmd /c exit 3") +BatchTaskCreateOptions task = new BatchTaskCreateOptions("taskId", "cmd /c exit 3") { ExitConditions = new ExitConditions { ExitCodeRanges = {new ExitCodeRangeMapping(2, 4, new ExitOptions { - JobAction = BatchJobAction.Terminate, + JobAction = BatchJobActionKind.Terminate, })}, }, UserIdentity = new UserIdentity @@ -4654,14 +4230,14 @@ Uri endpoint = new Uri(""); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskCreateContent task = new BatchTaskCreateContent("task1", "cmd /c echo task1") +BatchTaskCreateOptions task = new BatchTaskCreateOptions("task1", "cmd /c echo task1") { RequiredSlots = 2, }; Response response = client.CreateTask("jobId", task); ]]> - + This sample shows how to call CreateTaskAsync. - + This sample shows how to call CreateTask. - + This sample shows how to call CreateTaskCollectionAsync. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] +BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateOptions[] { - new BatchTaskCreateContent("simple1", "cmd /c dir /s"), - new BatchTaskCreateContent("simple2", "cmd /c dir /s") + new BatchTaskCreateOptions("simple1", "cmd /c dir /s"), + new BatchTaskCreateOptions("simple2", "cmd /c dir /s") }); -Response response = await client.CreateTaskCollectionAsync("jobId", taskCollection); +Response response = await client.CreateTaskCollectionAsync("jobId", taskCollection); ]]> This sample shows how to call CreateTaskCollectionAsync. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] +BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateOptions[] { - new BatchTaskCreateContent("complex1", "cmd /c dir /s") + new BatchTaskCreateOptions("complex1", "cmd /c dir /s") { ResourceFiles = {new ResourceFile { @@ -5048,7 +4624,7 @@ BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] { Value = "value2", }}, - AffinityInfo = new AffinityInfo("affinityId"), + AffinityInfo = new BatchAffinityInfo("affinityId"), Constraints = new BatchTaskConstraints { MaxWallClockTime = XmlConvert.ToTimeSpan("P1D"), @@ -5061,17 +4637,17 @@ BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] NumberOfInstances = 3, CommonResourceFiles = {new ResourceFile { - HttpUrl = "https://common.blob.core.windows.net/", + HttpUri = new Uri("https://common.blob.core.windows.net/"), FilePath = "common.exe", }}, }, }, - new BatchTaskCreateContent("simple3", "cmd /c dir /s") + new BatchTaskCreateOptions("simple3", "cmd /c dir /s") }); -Response response = await client.CreateTaskCollectionAsync("jobId", taskCollection); +Response response = await client.CreateTaskCollectionAsync("jobId", taskCollection); ]]> - + This sample shows how to call CreateTaskCollection. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] +BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateOptions[] { - new BatchTaskCreateContent("simple1", "cmd /c dir /s"), - new BatchTaskCreateContent("simple2", "cmd /c dir /s") + new BatchTaskCreateOptions("simple1", "cmd /c dir /s"), + new BatchTaskCreateOptions("simple2", "cmd /c dir /s") }); -Response response = client.CreateTaskCollection("jobId", taskCollection); +Response response = client.CreateTaskCollection("jobId", taskCollection); ]]> This sample shows how to call CreateTaskCollection. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] +BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateOptions[] { - new BatchTaskCreateContent("complex1", "cmd /c dir /s") + new BatchTaskCreateOptions("complex1", "cmd /c dir /s") { ResourceFiles = {new ResourceFile { @@ -5108,7 +4684,7 @@ BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] { Value = "value2", }}, - AffinityInfo = new AffinityInfo("affinityId"), + AffinityInfo = new BatchAffinityInfo("affinityId"), Constraints = new BatchTaskConstraints { MaxWallClockTime = XmlConvert.ToTimeSpan("P1D"), @@ -5121,17 +4697,17 @@ BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] NumberOfInstances = 3, CommonResourceFiles = {new ResourceFile { - HttpUrl = "https://common.blob.core.windows.net/", + HttpUri = new Uri("https://common.blob.core.windows.net/"), FilePath = "common.exe", }}, }, }, - new BatchTaskCreateContent("simple3", "cmd /c dir /s") + new BatchTaskCreateOptions("simple3", "cmd /c dir /s") }); -Response response = client.CreateTaskCollection("jobId", taskCollection); +Response response = client.CreateTaskCollection("jobId", taskCollection); ]]> - + This sample shows how to call CreateTaskCollectionAsync and parse the result. - + This sample shows how to call CreateTaskCollection and parse the result. - + This sample shows how to call DeleteTaskAsync. - + This sample shows how to call DeleteTask. - + This sample shows how to call GetTaskAsync. response = await client.GetTaskAsync("jobId", "taskId"); ]]> - + This sample shows how to call GetTask. response = client.GetTask("jobId", "taskId"); ]]> - + This sample shows how to call GetTaskAsync and parse the result. - + This sample shows how to call GetTask and parse the result. - + This sample shows how to call ReplaceTaskAsync. - + This sample shows how to call ReplaceTask. - + This sample shows how to call ReplaceTaskAsync. - + This sample shows how to call ReplaceTask. - + This sample shows how to call TerminateTaskAsync. - + This sample shows how to call TerminateTask. - + This sample shows how to call ReactivateTaskAsync. - + This sample shows how to call ReactivateTask. - + This sample shows how to call DeleteTaskFileAsync. - + This sample shows how to call DeleteTaskFile. - + This sample shows how to call GetTaskFileAsync. response = await client.GetTaskFileAsync("jobId", "task1", "wd\\testFile.txt"); ]]> - + This sample shows how to call GetTaskFile. response = client.GetTaskFile("jobId", "task1", "wd\\testFile.txt"); ]]> - + This sample shows how to call GetTaskFileAsync and parse the result. - + This sample shows how to call GetTaskFile and parse the result. - + This sample shows how to call CreateNodeUserAsync. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchNodeUserCreateContent user = new BatchNodeUserCreateContent("userName") +BatchNodeUserCreateOptions user = new BatchNodeUserCreateOptions("userName") { IsAdmin = false, ExpiryTime = DateTimeOffset.Parse("2017-08-01T00:00:00Z"), @@ -5640,7 +5216,7 @@ BatchNodeUserCreateContent user = new BatchNodeUserCreateContent("userName") Response response = await client.CreateNodeUserAsync("poolId", "tvm-1695681911_1-20161121t182739z", user); ]]> - + This sample shows how to call CreateNodeUser. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchNodeUserCreateContent user = new BatchNodeUserCreateContent("userName") +BatchNodeUserCreateOptions user = new BatchNodeUserCreateOptions("userName") { IsAdmin = false, ExpiryTime = DateTimeOffset.Parse("2017-08-01T00:00:00Z"), @@ -5657,7 +5233,7 @@ BatchNodeUserCreateContent user = new BatchNodeUserCreateContent("userName") Response response = client.CreateNodeUser("poolId", "tvm-1695681911_1-20161121t182739z", user); ]]> - + This sample shows how to call CreateNodeUserAsync. - + This sample shows how to call CreateNodeUser. - + This sample shows how to call DeleteNodeUserAsync. - + This sample shows how to call DeleteNodeUser. - + This sample shows how to call ReplaceNodeUserAsync. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchNodeUserUpdateContent content = new BatchNodeUserUpdateContent +BatchNodeUserUpdateOptions updateOptions = new BatchNodeUserUpdateOptions { Password = "12345", ExpiryTime = DateTimeOffset.Parse("2016-11-27T00:45:48.7320857Z"), }; -Response response = await client.ReplaceNodeUserAsync("poolId", "tvm-1695681911_1-20161121t182739z", "userName", content); +Response response = await client.ReplaceNodeUserAsync("poolId", "tvm-1695681911_1-20161121t182739z", "userName", updateOptions); ]]> - + This sample shows how to call ReplaceNodeUser. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -BatchNodeUserUpdateContent content = new BatchNodeUserUpdateContent +BatchNodeUserUpdateOptions updateOptions = new BatchNodeUserUpdateOptions { Password = "12345", ExpiryTime = DateTimeOffset.Parse("2016-11-27T00:45:48.7320857Z"), }; -Response response = client.ReplaceNodeUser("poolId", "tvm-1695681911_1-20161121t182739z", "userName", content); +Response response = client.ReplaceNodeUser("poolId", "tvm-1695681911_1-20161121t182739z", "userName", updateOptions); ]]> - + This sample shows how to call ReplaceNodeUserAsync. - + This sample shows how to call ReplaceNodeUser. - + This sample shows how to call GetNodeAsync. response = await client.GetNodeAsync("poolId", "tvm-1695681911_2-20161122t193202z"); ]]> - + This sample shows how to call GetNode. response = client.GetNode("poolId", "tvm-1695681911_2-20161122t193202z"); ]]> - + This sample shows how to call GetNodeAsync and parse the result. - + This sample shows how to call GetNode and parse the result. - - -This sample shows how to call RebootNodeAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = await client.RebootNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z"); -]]> - - - -This sample shows how to call RebootNode. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = client.RebootNode("poolId", "tvm-1695681911_1-20161122t193202z"); -]]> - - - -This sample shows how to call RebootNodeAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = null; -Response response = await client.RebootNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z", content); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call RebootNode. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = null; -Response response = client.RebootNode("poolId", "tvm-1695681911_1-20161122t193202z", content); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call StartNodeAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = await client.StartNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z"); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call StartNode. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = client.StartNode("poolId", "tvm-1695681911_1-20161122t193202z"); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call ReimageNodeAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = await client.ReimageNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z"); -]]> - - - -This sample shows how to call ReimageNode. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = client.ReimageNode("poolId", "tvm-1695681911_1-20161122t193202z"); -]]> - - - -This sample shows how to call ReimageNodeAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = null; -Response response = await client.ReimageNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z", content); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call ReimageNode. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = null; -Response response = client.ReimageNode("poolId", "tvm-1695681911_1-20161122t193202z", content); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call DeallocateNodeAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = await client.DeallocateNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z"); -]]> - - - -This sample shows how to call DeallocateNode. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -Response response = client.DeallocateNode("poolId", "tvm-1695681911_1-20161122t193202z"); -]]> - - - -This sample shows how to call DeallocateNodeAsync. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = null; -Response response = await client.DeallocateNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z", content); - -Console.WriteLine(response.Status); -]]> - - - -This sample shows how to call DeallocateNode. -"); -TokenCredential credential = new DefaultAzureCredential(); -BatchClient client = new BatchClient(endpoint, credential); - -using RequestContent content = null; -Response response = client.DeallocateNode("poolId", "tvm-1695681911_1-20161122t193202z", content); - -Console.WriteLine(response.Status); -]]> - - + This sample shows how to call DisableNodeSchedulingAsync. - + This sample shows how to call DisableNodeScheduling. - + This sample shows how to call DisableNodeSchedulingAsync. - + This sample shows how to call DisableNodeScheduling. - + This sample shows how to call EnableNodeSchedulingAsync. - + This sample shows how to call EnableNodeScheduling. - + This sample shows how to call GetNodeRemoteLoginSettingsAsync. response = await client.GetNodeRemoteLoginSettingsAsync("poolId", "tvm-1695681911_1-20161121t182739z"); ]]> - + This sample shows how to call GetNodeRemoteLoginSettings. response = client.GetNodeRemoteLoginSettings("poolId", "tvm-1695681911_1-20161121t182739z"); ]]> - + This sample shows how to call GetNodeRemoteLoginSettingsAsync and parse the result. - + This sample shows how to call GetNodeRemoteLoginSettings and parse the result. - + This sample shows how to call UploadNodeLogsAsync. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -UploadBatchServiceLogsContent content = new UploadBatchServiceLogsContent("https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2017-12-09T18%3A51%3A00Z&sp=w&sv=2016-05-31&sr=c&sig", DateTimeOffset.Parse("2017-11-27T00:00:00Z")); -Response response = await client.UploadNodeLogsAsync("poolId", "tvm-1695681911_1-20161121t182739z", content); +UploadBatchServiceLogsOptions uploadOptions = new UploadBatchServiceLogsOptions(new Uri("https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2017-12-09T18%3A51%3A00Z&sp=w&sv=2016-05-31&sr=c&sig"), DateTimeOffset.Parse("2017-11-27T00:00:00Z")); +Response response = await client.UploadNodeLogsAsync("poolId", "tvm-1695681911_1-20161121t182739z", uploadOptions); ]]> - + This sample shows how to call UploadNodeLogs. "); TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); -UploadBatchServiceLogsContent content = new UploadBatchServiceLogsContent("https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2017-12-09T18%3A51%3A00Z&sp=w&sv=2016-05-31&sr=c&sig", DateTimeOffset.Parse("2017-11-27T00:00:00Z")); -Response response = client.UploadNodeLogs("poolId", "tvm-1695681911_1-20161121t182739z", content); +UploadBatchServiceLogsOptions uploadOptions = new UploadBatchServiceLogsOptions(new Uri("https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2017-12-09T18%3A51%3A00Z&sp=w&sv=2016-05-31&sr=c&sig"), DateTimeOffset.Parse("2017-11-27T00:00:00Z")); +Response response = client.UploadNodeLogs("poolId", "tvm-1695681911_1-20161121t182739z", uploadOptions); ]]> - + This sample shows how to call UploadNodeLogsAsync and parse the result. - + This sample shows how to call UploadNodeLogs and parse the result. - + This sample shows how to call GetNodeExtensionAsync. response = await client.GetNodeExtensionAsync("poolId", "tvm-1695681911_2-20161122t193202z", "batchNodeExtension"); ]]> - + This sample shows how to call GetNodeExtension. response = client.GetNodeExtension("poolId", "tvm-1695681911_2-20161122t193202z", "batchNodeExtension"); ]]> - + This sample shows how to call GetNodeExtensionAsync and parse the result. - + This sample shows how to call GetNodeExtension and parse the result. - + This sample shows how to call DeleteNodeFileAsync. - + This sample shows how to call DeleteNodeFile. - + This sample shows how to call GetNodeFileAsync. response = await client.GetNodeFileAsync("poolId", "nodeId", "workitems\\jobId\\job-1\\task1\\wd\\testFile.txt"); ]]> - + This sample shows how to call GetNodeFile. response = client.GetNodeFile("poolId", "nodeId", "workitems\\jobId\\job-1\\task1\\wd\\testFile.txt"); ]]> - + This sample shows how to call GetNodeFileAsync and parse the result. - + This sample shows how to call GetNodeFile and parse the result. - + This sample shows how to call GetApplicationsAsync. - + This sample shows how to call GetApplications. - + This sample shows how to call GetApplicationsAsync and parse the result. - + This sample shows how to call GetApplications and parse the result. - + This sample shows how to call GetPoolUsageMetricsAsync. - + This sample shows how to call GetPoolUsageMetrics. - + This sample shows how to call GetPoolUsageMetricsAsync and parse the result. - + This sample shows how to call GetPoolUsageMetrics and parse the result. - + This sample shows how to call GetPoolsAsync. - + This sample shows how to call GetPools. - + This sample shows how to call GetPoolsAsync and parse the result. - + This sample shows how to call GetPools and parse the result. - + This sample shows how to call GetSupportedImagesAsync. - + This sample shows how to call GetSupportedImages. - + This sample shows how to call GetSupportedImagesAsync and parse the result. - + This sample shows how to call GetSupportedImages and parse the result. - + This sample shows how to call GetPoolNodeCountsAsync. - + This sample shows how to call GetPoolNodeCounts. - + This sample shows how to call GetPoolNodeCountsAsync and parse the result. - + This sample shows how to call GetPoolNodeCounts and parse the result. - + This sample shows how to call GetJobsAsync. - + This sample shows how to call GetJobs. - + This sample shows how to call GetJobsAsync and parse the result. - + This sample shows how to call GetJobs and parse the result. - + This sample shows how to call GetJobsFromSchedulesAsync. - + This sample shows how to call GetJobsFromSchedules. - + This sample shows how to call GetJobsFromSchedulesAsync and parse the result. - + This sample shows how to call GetJobsFromSchedules and parse the result. - + This sample shows how to call GetJobPreparationAndReleaseTaskStatusesAsync. - + This sample shows how to call GetJobPreparationAndReleaseTaskStatuses. - + This sample shows how to call GetJobPreparationAndReleaseTaskStatusesAsync and parse the result. - + This sample shows how to call GetJobPreparationAndReleaseTaskStatuses and parse the result. - + This sample shows how to call GetCertificatesAsync. - + This sample shows how to call GetCertificates. - + This sample shows how to call GetCertificatesAsync and parse the result. - + This sample shows how to call GetCertificates and parse the result. - + This sample shows how to call GetJobSchedulesAsync. - + This sample shows how to call GetJobSchedules. - + This sample shows how to call GetJobSchedulesAsync and parse the result. - + This sample shows how to call GetJobSchedules and parse the result. - + This sample shows how to call GetTasksAsync. - + This sample shows how to call GetTasks. - + This sample shows how to call GetTasksAsync and parse the result. - + This sample shows how to call GetTasks and parse the result. - + This sample shows how to call GetSubTasksAsync. - + This sample shows how to call GetSubTasks. - + This sample shows how to call GetSubTasksAsync and parse the result. - + This sample shows how to call GetSubTasks and parse the result. - + This sample shows how to call GetTaskFilesAsync. - + This sample shows how to call GetTaskFiles. - + This sample shows how to call GetTaskFilesAsync and parse the result. - + This sample shows how to call GetTaskFiles and parse the result. - + This sample shows how to call GetNodesAsync. - + This sample shows how to call GetNodes. - + This sample shows how to call GetNodesAsync and parse the result. - + This sample shows how to call GetNodes and parse the result. - + This sample shows how to call GetNodeExtensionsAsync. - + This sample shows how to call GetNodeExtensions. - + This sample shows how to call GetNodeExtensionsAsync and parse the result. - + This sample shows how to call GetNodeExtensions and parse the result. - + This sample shows how to call GetNodeFilesAsync. - + This sample shows how to call GetNodeFiles. - + This sample shows how to call GetNodeFilesAsync and parse the result. - + This sample shows how to call GetNodeFiles and parse the result. - -#nullable disable - -using System; -using System.ComponentModel; - -namespace Azure.Compute.Batch -{ - /// ErrorCategory enums. - public readonly partial struct ErrorCategory : IEquatable - { - private readonly string _value; - - /// Initializes a new instance of . - /// is null. - public ErrorCategory(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string UserErrorValue = "usererror"; - private const string ServerErrorValue = "servererror"; - - /// The error is due to a user issue, such as misconfiguration. - public static ErrorCategory UserError { get; } = new ErrorCategory(UserErrorValue); - /// The error is due to an internal server issue. - public static ErrorCategory ServerError { get; } = new ErrorCategory(ServerErrorValue); - /// Determines if two values are the same. - public static bool operator ==(ErrorCategory left, ErrorCategory right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(ErrorCategory left, ErrorCategory right) => !left.Equals(right); - /// Converts a to a . - public static implicit operator ErrorCategory(string value) => new ErrorCategory(value); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is ErrorCategory other && Equals(other); - /// - public bool Equals(ErrorCategory other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; - /// - public override string ToString() => _value; - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ExitOptions.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitOptions.Serialization.cs index 23f7deb4d6b5..797683607099 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ExitOptions.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitOptions.Serialization.cs @@ -81,7 +81,7 @@ internal static ExitOptions DeserializeExitOptions(JsonElement element, ModelRea { return null; } - BatchJobAction? jobAction = default; + BatchJobActionKind? jobAction = default; DependencyAction? dependencyAction = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -93,7 +93,7 @@ internal static ExitOptions DeserializeExitOptions(JsonElement element, ModelRea { continue; } - jobAction = new BatchJobAction(property.Value.GetString()); + jobAction = new BatchJobActionKind(property.Value.GetString()); continue; } if (property.NameEquals("dependencyAction"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ExitOptions.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitOptions.cs index 43cee79c97f7..3a71ddf05b62 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ExitOptions.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ExitOptions.cs @@ -54,7 +54,7 @@ public ExitOptions() /// An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). /// An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. /// Keeps track of any properties unknown to the library. - internal ExitOptions(BatchJobAction? jobAction, DependencyAction? dependencyAction, IDictionary serializedAdditionalRawData) + internal ExitOptions(BatchJobActionKind? jobAction, DependencyAction? dependencyAction, IDictionary serializedAdditionalRawData) { JobAction = jobAction; DependencyAction = dependencyAction; @@ -62,7 +62,7 @@ internal ExitOptions(BatchJobAction? jobAction, DependencyAction? dependencyActi } /// An action to take on the Job containing the Task, if the Task completes with the given exit condition and the Job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the Job's onTaskFailed property is noaction, then specifying this property returns an error and the add Task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). - public BatchJobAction? JobAction { get; set; } + public BatchJobActionKind? JobAction { get; set; } /// An action that the Batch service performs on Tasks that depend on this Task. Possible values are 'satisfy' (allowing dependent tasks to progress) and 'block' (dependent tasks continue to wait). Batch does not yet support cancellation of dependent tasks. public DependencyAction? DependencyAction { get; set; } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/InboundEndpoint.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/InboundEndpoint.Serialization.cs index dc56d9c66ee9..d17b766b0b26 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/InboundEndpoint.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/InboundEndpoint.Serialization.cs @@ -8,6 +8,7 @@ using System; using System.ClientModel.Primitives; using System.Collections.Generic; +using System.Net; using System.Text.Json; using Azure.Core; @@ -39,7 +40,7 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("protocol"u8); writer.WriteStringValue(Protocol.ToString()); writer.WritePropertyName("publicIPAddress"u8); - writer.WriteStringValue(PublicIpAddress); + writer.WriteStringValue(PublicIpAddress.ToString()); writer.WritePropertyName("publicFQDN"u8); writer.WriteStringValue(PublicFQDN); writer.WritePropertyName("frontendPort"u8); @@ -85,7 +86,7 @@ internal static InboundEndpoint DeserializeInboundEndpoint(JsonElement element, } string name = default; InboundEndpointProtocol protocol = default; - string publicIPAddress = default; + IPAddress publicIPAddress = default; string publicFQDN = default; int frontendPort = default; int backendPort = default; @@ -105,7 +106,7 @@ internal static InboundEndpoint DeserializeInboundEndpoint(JsonElement element, } if (property.NameEquals("publicIPAddress"u8)) { - publicIPAddress = property.Value.GetString(); + publicIPAddress = IPAddress.Parse(property.Value.GetString()); continue; } if (property.NameEquals("publicFQDN"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/InboundEndpoint.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/InboundEndpoint.cs index 8febab5a4227..60e9e2f4b126 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/InboundEndpoint.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/InboundEndpoint.cs @@ -7,6 +7,7 @@ using System; using System.Collections.Generic; +using System.Net; namespace Azure.Compute.Batch { @@ -53,7 +54,7 @@ public partial class InboundEndpoint /// The public port number of the endpoint. /// The backend port number of the endpoint. /// , or is null. - internal InboundEndpoint(string name, InboundEndpointProtocol protocol, string publicIpAddress, string publicFQDN, int frontendPort, int backendPort) + internal InboundEndpoint(string name, InboundEndpointProtocol protocol, IPAddress publicIpAddress, string publicFQDN, int frontendPort, int backendPort) { Argument.AssertNotNull(name, nameof(name)); Argument.AssertNotNull(publicIpAddress, nameof(publicIpAddress)); @@ -75,7 +76,7 @@ internal InboundEndpoint(string name, InboundEndpointProtocol protocol, string p /// The public port number of the endpoint. /// The backend port number of the endpoint. /// Keeps track of any properties unknown to the library. - internal InboundEndpoint(string name, InboundEndpointProtocol protocol, string publicIpAddress, string publicFQDN, int frontendPort, int backendPort, IDictionary serializedAdditionalRawData) + internal InboundEndpoint(string name, InboundEndpointProtocol protocol, IPAddress publicIpAddress, string publicFQDN, int frontendPort, int backendPort, IDictionary serializedAdditionalRawData) { Name = name; Protocol = protocol; @@ -96,7 +97,7 @@ internal InboundEndpoint() /// The protocol of the endpoint. public InboundEndpointProtocol Protocol { get; } /// The public IP address of the Compute Node. - public string PublicIpAddress { get; } + public IPAddress PublicIpAddress { get; } /// The public fully qualified domain name for the Compute Node. public string PublicFQDN { get; } /// The public port number of the endpoint. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.Serialization.cs index 828cedf3a8fc..3649805d2fc2 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.Serialization.cs @@ -82,7 +82,7 @@ internal static ManagedDisk DeserializeManagedDisk(JsonElement element, ModelRea return null; } StorageAccountType? storageAccountType = default; - VMDiskSecurityProfile securityProfile = default; + VmDiskSecurityProfile securityProfile = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -102,7 +102,7 @@ internal static ManagedDisk DeserializeManagedDisk(JsonElement element, ModelRea { continue; } - securityProfile = VMDiskSecurityProfile.DeserializeVMDiskSecurityProfile(property.Value, options); + securityProfile = VmDiskSecurityProfile.DeserializeVmDiskSecurityProfile(property.Value, options); continue; } if (options.Format != "W") diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.cs index 32a2f0e6cc65..46e3a2ec2f33 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ManagedDisk.cs @@ -54,7 +54,7 @@ public ManagedDisk() /// The storage account type for managed disk. /// Specifies the security profile settings for the managed disk. /// Keeps track of any properties unknown to the library. - internal ManagedDisk(StorageAccountType? storageAccountType, VMDiskSecurityProfile securityProfile, IDictionary serializedAdditionalRawData) + internal ManagedDisk(StorageAccountType? storageAccountType, VmDiskSecurityProfile securityProfile, IDictionary serializedAdditionalRawData) { StorageAccountType = storageAccountType; SecurityProfile = securityProfile; @@ -64,6 +64,6 @@ internal ManagedDisk(StorageAccountType? storageAccountType, VMDiskSecurityProfi /// The storage account type for managed disk. public StorageAccountType? StorageAccountType { get; set; } /// Specifies the security profile settings for the managed disk. - public VMDiskSecurityProfile SecurityProfile { get; set; } + public VmDiskSecurityProfile SecurityProfile { get; set; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.Serialization.cs index 1b05a3760cdf..e1994341ed05 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.Serialization.cs @@ -39,10 +39,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("subnetId"u8); writer.WriteStringValue(SubnetId); } - if (Optional.IsDefined(DynamicVNetAssignmentScope)) + if (Optional.IsDefined(DynamicVnetAssignmentScope)) { writer.WritePropertyName("dynamicVNetAssignmentScope"u8); - writer.WriteStringValue(DynamicVNetAssignmentScope.Value.ToString()); + writer.WriteStringValue(DynamicVnetAssignmentScope.Value.ToString()); } if (Optional.IsDefined(EndpointConfiguration)) { @@ -99,7 +99,7 @@ internal static NetworkConfiguration DeserializeNetworkConfiguration(JsonElement string subnetId = default; DynamicVNetAssignmentScope? dynamicVNetAssignmentScope = default; BatchPoolEndpointConfiguration endpointConfiguration = default; - PublicIpAddressConfiguration publicIPAddressConfiguration = default; + BatchPublicIpAddressConfiguration publicIPAddressConfiguration = default; bool? enableAcceleratedNetworking = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -134,7 +134,7 @@ internal static NetworkConfiguration DeserializeNetworkConfiguration(JsonElement { continue; } - publicIPAddressConfiguration = PublicIpAddressConfiguration.DeserializePublicIpAddressConfiguration(property.Value, options); + publicIPAddressConfiguration = BatchPublicIpAddressConfiguration.DeserializeBatchPublicIpAddressConfiguration(property.Value, options); continue; } if (property.NameEquals("enableAcceleratedNetworking"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.cs index 90847b622aee..11bfe637733e 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/NetworkConfiguration.cs @@ -52,15 +52,15 @@ public NetworkConfiguration() /// Initializes a new instance of . /// The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. Only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication, including ports 29876 and 29877. Also enable outbound connections to Azure Storage on port 443. For more details see: https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration. - /// The scope of dynamic vnet assignment. + /// The scope of dynamic vnet assignment. /// The configuration for endpoints on Compute Nodes in the Batch Pool. /// The Public IPAddress configuration for Compute Nodes in the Batch Pool. /// Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. /// Keeps track of any properties unknown to the library. - internal NetworkConfiguration(string subnetId, DynamicVNetAssignmentScope? dynamicVNetAssignmentScope, BatchPoolEndpointConfiguration endpointConfiguration, PublicIpAddressConfiguration publicIpAddressConfiguration, bool? enableAcceleratedNetworking, IDictionary serializedAdditionalRawData) + internal NetworkConfiguration(string subnetId, DynamicVNetAssignmentScope? dynamicVnetAssignmentScope, BatchPoolEndpointConfiguration endpointConfiguration, BatchPublicIpAddressConfiguration publicIpAddressConfiguration, bool? enableAcceleratedNetworking, IDictionary serializedAdditionalRawData) { SubnetId = subnetId; - DynamicVNetAssignmentScope = dynamicVNetAssignmentScope; + DynamicVnetAssignmentScope = dynamicVnetAssignmentScope; EndpointConfiguration = endpointConfiguration; PublicIpAddressConfiguration = publicIpAddressConfiguration; EnableAcceleratedNetworking = enableAcceleratedNetworking; @@ -70,11 +70,11 @@ internal NetworkConfiguration(string subnetId, DynamicVNetAssignmentScope? dynam /// The ARM resource identifier of the virtual network subnet which the Compute Nodes of the Pool will join. This is of the form /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. The virtual network must be in the same region and subscription as the Azure Batch Account. The specified subnet should have enough free IP addresses to accommodate the number of Compute Nodes in the Pool. If the subnet doesn't have enough free IP addresses, the Pool will partially allocate Nodes and a resize error will occur. The 'MicrosoftAzureBatch' service principal must have the 'Classic Virtual Machine Contributor' Role-Based Access Control (RBAC) role for the specified VNet. The specified subnet must allow communication from the Azure Batch service to be able to schedule Tasks on the Nodes. This can be verified by checking if the specified VNet has any associated Network Security Groups (NSG). If communication to the Nodes in the specified subnet is denied by an NSG, then the Batch service will set the state of the Compute Nodes to unusable. Only ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported. If the specified VNet has any associated Network Security Groups (NSG), then a few reserved system ports must be enabled for inbound communication, including ports 29876 and 29877. Also enable outbound connections to Azure Storage on port 443. For more details see: https://learn.microsoft.com/azure/batch/nodes-and-pools#virtual-network-vnet-and-firewall-configuration. public string SubnetId { get; set; } /// The scope of dynamic vnet assignment. - public DynamicVNetAssignmentScope? DynamicVNetAssignmentScope { get; set; } + public DynamicVNetAssignmentScope? DynamicVnetAssignmentScope { get; set; } /// The configuration for endpoints on Compute Nodes in the Batch Pool. public BatchPoolEndpointConfiguration EndpointConfiguration { get; set; } /// The Public IPAddress configuration for Compute Nodes in the Batch Pool. - public PublicIpAddressConfiguration PublicIpAddressConfiguration { get; set; } + public BatchPublicIpAddressConfiguration PublicIpAddressConfiguration { get; set; } /// Whether this pool should enable accelerated networking. Accelerated networking enables single root I/O virtualization (SR-IOV) to a VM, which may lead to improved networking performance. For more details, see: https://learn.microsoft.com/azure/virtual-network/accelerated-networking-overview. public bool? EnableAcceleratedNetworking { get; set; } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OnBatchTaskFailure.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OnBatchTaskFailure.cs deleted file mode 100644 index 27ada4fb3c3b..000000000000 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/OnBatchTaskFailure.cs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// - -#nullable disable - -using System; -using System.ComponentModel; - -namespace Azure.Compute.Batch -{ - /// OnTaskFailure enums. - public readonly partial struct OnBatchTaskFailure : IEquatable - { - private readonly string _value; - - /// Initializes a new instance of . - /// is null. - public OnBatchTaskFailure(string value) - { - _value = value ?? throw new ArgumentNullException(nameof(value)); - } - - private const string NoActionValue = "noaction"; - private const string PerformExitOptionsJobActionValue = "performexitoptionsjobaction"; - - /// Do nothing. The Job remains active unless terminated or disabled by some other means. - public static OnBatchTaskFailure NoAction { get; } = new OnBatchTaskFailure(NoActionValue); - /// Terminate the Job. The Job's terminationReason is set to 'AllTasksComplete'. - public static OnBatchTaskFailure PerformExitOptionsJobAction { get; } = new OnBatchTaskFailure(PerformExitOptionsJobActionValue); - /// Determines if two values are the same. - public static bool operator ==(OnBatchTaskFailure left, OnBatchTaskFailure right) => left.Equals(right); - /// Determines if two values are not the same. - public static bool operator !=(OnBatchTaskFailure left, OnBatchTaskFailure right) => !left.Equals(right); - /// Converts a to a . - public static implicit operator OnBatchTaskFailure(string value) => new OnBatchTaskFailure(value); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override bool Equals(object obj) => obj is OnBatchTaskFailure other && Equals(other); - /// - public bool Equals(OnBatchTaskFailure other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase); - - /// - [EditorBrowsable(EditorBrowsableState.Never)] - public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0; - /// - public override string ToString() => _value; - } -} diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.Serialization.cs index 7e72526b2af4..31b25439c96a 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.Serialization.cs @@ -40,7 +40,7 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WriteStringValue(Path); } writer.WritePropertyName("containerUrl"u8); - writer.WriteStringValue(ContainerUrl); + writer.WriteStringValue(ContainerUri.AbsoluteUri); if (Optional.IsDefined(IdentityReference)) { writer.WritePropertyName("identityReference"u8); @@ -94,9 +94,9 @@ internal static OutputFileBlobContainerDestination DeserializeOutputFileBlobCont return null; } string path = default; - string containerUrl = default; + Uri containerUrl = default; BatchNodeIdentityReference identityReference = default; - IList uploadHeaders = default; + IList uploadHeaders = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -108,7 +108,7 @@ internal static OutputFileBlobContainerDestination DeserializeOutputFileBlobCont } if (property.NameEquals("containerUrl"u8)) { - containerUrl = property.Value.GetString(); + containerUrl = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("identityReference"u8)) @@ -126,10 +126,10 @@ internal static OutputFileBlobContainerDestination DeserializeOutputFileBlobCont { continue; } - List array = new List(); + List array = new List(); foreach (var item in property.Value.EnumerateArray()) { - array.Add(HttpHeader.DeserializeHttpHeader(item, options)); + array.Add(OutputFileUploadHeader.DeserializeOutputFileUploadHeader(item, options)); } uploadHeaders = array; continue; @@ -140,7 +140,7 @@ internal static OutputFileBlobContainerDestination DeserializeOutputFileBlobCont } } serializedAdditionalRawData = rawDataDictionary; - return new OutputFileBlobContainerDestination(path, containerUrl, identityReference, uploadHeaders ?? new ChangeTrackingList(), serializedAdditionalRawData); + return new OutputFileBlobContainerDestination(path, containerUrl, identityReference, uploadHeaders ?? new ChangeTrackingList(), serializedAdditionalRawData); } BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.cs index 2271ff331361..8fd26b168f5f 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileBlobContainerDestination.cs @@ -46,26 +46,26 @@ public partial class OutputFileBlobContainerDestination private IDictionary _serializedAdditionalRawData; /// Initializes a new instance of . - /// The URL of the container within Azure Blob Storage to which to upload the file(s). If not using a managed identity, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. - /// is null. - public OutputFileBlobContainerDestination(string containerUrl) + /// The URL of the container within Azure Blob Storage to which to upload the file(s). If not using a managed identity, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. + /// is null. + public OutputFileBlobContainerDestination(Uri containerUri) { - Argument.AssertNotNull(containerUrl, nameof(containerUrl)); + Argument.AssertNotNull(containerUri, nameof(containerUri)); - ContainerUrl = containerUrl; - UploadHeaders = new ChangeTrackingList(); + ContainerUri = containerUri; + UploadHeaders = new ChangeTrackingList(); } /// Initializes a new instance of . /// The destination blob or virtual directory within the Azure Storage container. If filePattern refers to a specific file (i.e. contains no wildcards), then path is the name of the blob to which to upload that file. If filePattern contains one or more wildcards (and therefore may match multiple files), then path is the name of the blob virtual directory (which is prepended to each blob name) to which to upload the file(s). If omitted, file(s) are uploaded to the root of the container with a blob name matching their file name. - /// The URL of the container within Azure Blob Storage to which to upload the file(s). If not using a managed identity, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. + /// The URL of the container within Azure Blob Storage to which to upload the file(s). If not using a managed identity, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. /// The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. /// A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://learn.microsoft.com/rest/api/storageservices/put-blob#request-headers-all-blob-types. /// Keeps track of any properties unknown to the library. - internal OutputFileBlobContainerDestination(string path, string containerUrl, BatchNodeIdentityReference identityReference, IList uploadHeaders, IDictionary serializedAdditionalRawData) + internal OutputFileBlobContainerDestination(string path, Uri containerUri, BatchNodeIdentityReference identityReference, IList uploadHeaders, IDictionary serializedAdditionalRawData) { Path = path; - ContainerUrl = containerUrl; + ContainerUri = containerUri; IdentityReference = identityReference; UploadHeaders = uploadHeaders; _serializedAdditionalRawData = serializedAdditionalRawData; @@ -79,10 +79,10 @@ internal OutputFileBlobContainerDestination() /// The destination blob or virtual directory within the Azure Storage container. If filePattern refers to a specific file (i.e. contains no wildcards), then path is the name of the blob to which to upload that file. If filePattern contains one or more wildcards (and therefore may match multiple files), then path is the name of the blob virtual directory (which is prepended to each blob name) to which to upload the file(s). If omitted, file(s) are uploaded to the root of the container with a blob name matching their file name. public string Path { get; set; } /// The URL of the container within Azure Blob Storage to which to upload the file(s). If not using a managed identity, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. - public string ContainerUrl { get; set; } + public Uri ContainerUri { get; set; } /// The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. public BatchNodeIdentityReference IdentityReference { get; set; } /// A list of name-value pairs for headers to be used in uploading output files. These headers will be specified when uploading files to Azure Storage. Official document on allowed headers when uploading blobs: https://learn.microsoft.com/rest/api/storageservices/put-blob#request-headers-all-blob-types. - public IList UploadHeaders { get; } + public IList UploadHeaders { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/HttpHeader.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileUploadHeader.Serialization.cs similarity index 68% rename from sdk/batch/Azure.Compute.Batch/src/Generated/HttpHeader.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileUploadHeader.Serialization.cs index f992e1a1d363..5ddf5bab515f 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/HttpHeader.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileUploadHeader.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class HttpHeader : IUtf8JsonSerializable, IJsonModel + public partial class OutputFileUploadHeader : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOption /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(HttpHeader)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(OutputFileUploadHeader)} does not support writing '{format}' format."); } writer.WritePropertyName("name"u8); @@ -58,19 +58,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - HttpHeader IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + OutputFileUploadHeader IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(HttpHeader)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(OutputFileUploadHeader)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeHttpHeader(document.RootElement, options); + return DeserializeOutputFileUploadHeader(document.RootElement, options); } - internal static HttpHeader DeserializeHttpHeader(JsonElement element, ModelReaderWriterOptions options = null) + internal static OutputFileUploadHeader DeserializeOutputFileUploadHeader(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -100,46 +100,46 @@ internal static HttpHeader DeserializeHttpHeader(JsonElement element, ModelReade } } serializedAdditionalRawData = rawDataDictionary; - return new HttpHeader(name, value, serializedAdditionalRawData); + return new OutputFileUploadHeader(name, value, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(HttpHeader)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(OutputFileUploadHeader)} does not support writing '{options.Format}' format."); } } - HttpHeader IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + OutputFileUploadHeader IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeHttpHeader(document.RootElement, options); + return DeserializeOutputFileUploadHeader(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(HttpHeader)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(OutputFileUploadHeader)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static HttpHeader FromResponse(Response response) + internal static OutputFileUploadHeader FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeHttpHeader(document.RootElement); + return DeserializeOutputFileUploadHeader(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/HttpHeader.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileUploadHeader.cs similarity index 82% rename from sdk/batch/Azure.Compute.Batch/src/Generated/HttpHeader.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileUploadHeader.cs index 90e588da517e..1f8229b2127e 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/HttpHeader.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/OutputFileUploadHeader.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// An HTTP header name-value pair. - public partial class HttpHeader + public partial class OutputFileUploadHeader { /// /// Keeps track of any properties unknown to the library. @@ -45,29 +45,29 @@ public partial class HttpHeader /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . + /// Initializes a new instance of . /// The case-insensitive name of the header to be used while uploading output files. /// is null. - public HttpHeader(string name) + public OutputFileUploadHeader(string name) { Argument.AssertNotNull(name, nameof(name)); Name = name; } - /// Initializes a new instance of . + /// Initializes a new instance of . /// The case-insensitive name of the header to be used while uploading output files. /// The value of the header to be used while uploading output files. /// Keeps track of any properties unknown to the library. - internal HttpHeader(string name, string value, IDictionary serializedAdditionalRawData) + internal OutputFileUploadHeader(string name, string value, IDictionary serializedAdditionalRawData) { Name = name; Value = value; _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal HttpHeader() + /// Initializes a new instance of for deserialization. + internal OutputFileUploadHeader() { } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/RecentBatchJob.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/RecentBatchJob.Serialization.cs index 2c7d33811d88..758b9ef6b909 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/RecentBatchJob.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/RecentBatchJob.Serialization.cs @@ -39,10 +39,10 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("id"u8); writer.WriteStringValue(Id); } - if (Optional.IsDefined(Url)) + if (Optional.IsDefined(Uri)) { writer.WritePropertyName("url"u8); - writer.WriteStringValue(Url); + writer.WriteStringValue(Uri.AbsoluteUri); } if (options.Format != "W" && _serializedAdditionalRawData != null) { @@ -82,7 +82,7 @@ internal static RecentBatchJob DeserializeRecentBatchJob(JsonElement element, Mo return null; } string id = default; - string url = default; + Uri url = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -94,7 +94,11 @@ internal static RecentBatchJob DeserializeRecentBatchJob(JsonElement element, Mo } if (property.NameEquals("url"u8)) { - url = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + url = new Uri(property.Value.GetString()); continue; } if (options.Format != "W") diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/RecentBatchJob.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/RecentBatchJob.cs index 5d8d951d5f6f..ec41bf2b8cc6 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/RecentBatchJob.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/RecentBatchJob.cs @@ -52,18 +52,18 @@ internal RecentBatchJob() /// Initializes a new instance of . /// The ID of the Job. - /// The URL of the Job. + /// The URL of the Job. /// Keeps track of any properties unknown to the library. - internal RecentBatchJob(string id, string url, IDictionary serializedAdditionalRawData) + internal RecentBatchJob(string id, Uri uri, IDictionary serializedAdditionalRawData) { Id = id; - Url = url; + Uri = uri; _serializedAdditionalRawData = serializedAdditionalRawData; } /// The ID of the Job. public string Id { get; } /// The URL of the Job. - public string Url { get; } + public Uri Uri { get; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ResourceFile.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ResourceFile.Serialization.cs index 1e9f3b879348..44e0aa3f033c 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ResourceFile.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ResourceFile.Serialization.cs @@ -39,15 +39,15 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit writer.WritePropertyName("autoStorageContainerName"u8); writer.WriteStringValue(AutoStorageContainerName); } - if (Optional.IsDefined(StorageContainerUrl)) + if (Optional.IsDefined(StorageContainerUri)) { writer.WritePropertyName("storageContainerUrl"u8); - writer.WriteStringValue(StorageContainerUrl); + writer.WriteStringValue(StorageContainerUri.AbsoluteUri); } - if (Optional.IsDefined(HttpUrl)) + if (Optional.IsDefined(HttpUri)) { writer.WritePropertyName("httpUrl"u8); - writer.WriteStringValue(HttpUrl); + writer.WriteStringValue(HttpUri.AbsoluteUri); } if (Optional.IsDefined(BlobPrefix)) { @@ -107,8 +107,8 @@ internal static ResourceFile DeserializeResourceFile(JsonElement element, ModelR return null; } string autoStorageContainerName = default; - string storageContainerUrl = default; - string httpUrl = default; + Uri storageContainerUrl = default; + Uri httpUrl = default; string blobPrefix = default; string filePath = default; string fileMode = default; @@ -124,12 +124,20 @@ internal static ResourceFile DeserializeResourceFile(JsonElement element, ModelR } if (property.NameEquals("storageContainerUrl"u8)) { - storageContainerUrl = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + storageContainerUrl = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("httpUrl"u8)) { - httpUrl = property.Value.GetString(); + if (property.Value.ValueKind == JsonValueKind.Null) + { + continue; + } + httpUrl = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("blobPrefix"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/ResourceFile.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/ResourceFile.cs index b40425910042..1b27cb0b1c6c 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/ResourceFile.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/ResourceFile.cs @@ -52,18 +52,18 @@ public ResourceFile() /// Initializes a new instance of . /// The storage container name in the auto storage Account. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. - /// The URL of the blob container within Azure Blob Storage. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. This URL must be readable and listable from compute nodes. There are three ways to get such a URL for a container in Azure storage: include a Shared Access Signature (SAS) granting read and list permissions on the container, use a managed identity with read and list permissions, or set the ACL for the container to allow public access. - /// The URL of the file to download. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. If the URL points to Azure Blob Storage, it must be readable from compute nodes. There are three ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read permissions on the blob, use a managed identity with read permission, or set the ACL for the blob or its container to allow public access. + /// The URL of the blob container within Azure Blob Storage. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. This URL must be readable and listable from compute nodes. There are three ways to get such a URL for a container in Azure storage: include a Shared Access Signature (SAS) granting read and list permissions on the container, use a managed identity with read and list permissions, or set the ACL for the container to allow public access. + /// The URL of the file to download. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. If the URL points to Azure Blob Storage, it must be readable from compute nodes. There are three ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read permissions on the blob, use a managed identity with read permission, or set the ACL for the blob or its container to allow public access. /// The blob prefix to use when downloading blobs from an Azure Storage container. Only the blobs whose names begin with the specified prefix will be downloaded. The property is valid only when autoStorageContainerName or storageContainerUrl is used. This prefix can be a partial filename or a subdirectory. If a prefix is not specified, all the files in the container will be downloaded. /// The location on the Compute Node to which to download the file(s), relative to the Task's working directory. If the httpUrl property is specified, the filePath is required and describes the path which the file will be downloaded to, including the filename. Otherwise, if the autoStorageContainerName or storageContainerUrl property is specified, filePath is optional and is the directory to download the files to. In the case where filePath is used as a directory, any directory structure already associated with the input data will be retained in full and appended to the specified filePath directory. The specified relative path cannot break out of the Task's working directory (for example by using '..'). /// The file permission mode attribute in octal format. This property applies only to files being downloaded to Linux Compute Nodes. It will be ignored if it is specified for a resourceFile which will be downloaded to a Windows Compute Node. If this property is not specified for a Linux Compute Node, then a default value of 0770 is applied to the file. /// The reference to the user assigned identity to use to access Azure Blob Storage specified by storageContainerUrl or httpUrl. /// Keeps track of any properties unknown to the library. - internal ResourceFile(string autoStorageContainerName, string storageContainerUrl, string httpUrl, string blobPrefix, string filePath, string fileMode, BatchNodeIdentityReference identityReference, IDictionary serializedAdditionalRawData) + internal ResourceFile(string autoStorageContainerName, Uri storageContainerUri, Uri httpUri, string blobPrefix, string filePath, string fileMode, BatchNodeIdentityReference identityReference, IDictionary serializedAdditionalRawData) { AutoStorageContainerName = autoStorageContainerName; - StorageContainerUrl = storageContainerUrl; - HttpUrl = httpUrl; + StorageContainerUri = storageContainerUri; + HttpUri = httpUri; BlobPrefix = blobPrefix; FilePath = filePath; FileMode = fileMode; @@ -74,9 +74,9 @@ internal ResourceFile(string autoStorageContainerName, string storageContainerUr /// The storage container name in the auto storage Account. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. public string AutoStorageContainerName { get; set; } /// The URL of the blob container within Azure Blob Storage. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. This URL must be readable and listable from compute nodes. There are three ways to get such a URL for a container in Azure storage: include a Shared Access Signature (SAS) granting read and list permissions on the container, use a managed identity with read and list permissions, or set the ACL for the container to allow public access. - public string StorageContainerUrl { get; set; } + public Uri StorageContainerUri { get; set; } /// The URL of the file to download. The autoStorageContainerName, storageContainerUrl and httpUrl properties are mutually exclusive and one of them must be specified. If the URL points to Azure Blob Storage, it must be readable from compute nodes. There are three ways to get such a URL for a blob in Azure storage: include a Shared Access Signature (SAS) granting read permissions on the blob, use a managed identity with read permission, or set the ACL for the blob or its container to allow public access. - public string HttpUrl { get; set; } + public Uri HttpUri { get; set; } /// The blob prefix to use when downloading blobs from an Azure Storage container. Only the blobs whose names begin with the specified prefix will be downloaded. The property is valid only when autoStorageContainerName or storageContainerUrl is used. This prefix can be a partial filename or a subdirectory. If a prefix is not specified, all the files in the container will be downloaded. public string BlobPrefix { get; set; } /// The location on the Compute Node to which to download the file(s), relative to the Task's working directory. If the httpUrl property is specified, the filePath is required and describes the path which the file will be downloaded to, including the filename. Otherwise, if the autoStorageContainerName or storageContainerUrl property is specified, filePath is optional and is the directory to download the files to. In the case where filePath is used as a directory, any directory structure already associated with the input data will be retained in full and appended to the specified filePath directory. The specified relative path cannot break out of the Task's working directory (for example by using '..'). diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.Serialization.cs index 31e125ef8ddc..aa021679a5a2 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.Serialization.cs @@ -79,7 +79,7 @@ internal static SecurityProfile DeserializeSecurityProfile(JsonElement element, } bool encryptionAtHost = default; SecurityTypes securityType = default; - UefiSettings uefiSettings = default; + BatchUefiSettings uefiSettings = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); foreach (var property in element.EnumerateObject()) @@ -96,7 +96,7 @@ internal static SecurityProfile DeserializeSecurityProfile(JsonElement element, } if (property.NameEquals("uefiSettings"u8)) { - uefiSettings = UefiSettings.DeserializeUefiSettings(property.Value, options); + uefiSettings = BatchUefiSettings.DeserializeBatchUefiSettings(property.Value, options); continue; } if (options.Format != "W") diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.cs index d34a776e1368..98ec816033f1 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/SecurityProfile.cs @@ -50,7 +50,7 @@ public partial class SecurityProfile /// Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. /// Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. /// is null. - public SecurityProfile(bool encryptionAtHost, SecurityTypes securityType, UefiSettings uefiSettings) + public SecurityProfile(bool encryptionAtHost, SecurityTypes securityType, BatchUefiSettings uefiSettings) { Argument.AssertNotNull(uefiSettings, nameof(uefiSettings)); @@ -64,7 +64,7 @@ public SecurityProfile(bool encryptionAtHost, SecurityTypes securityType, UefiSe /// Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. /// Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. /// Keeps track of any properties unknown to the library. - internal SecurityProfile(bool encryptionAtHost, SecurityTypes securityType, UefiSettings uefiSettings, IDictionary serializedAdditionalRawData) + internal SecurityProfile(bool encryptionAtHost, SecurityTypes securityType, BatchUefiSettings uefiSettings, IDictionary serializedAdditionalRawData) { EncryptionAtHost = encryptionAtHost; SecurityType = securityType; @@ -82,6 +82,6 @@ internal SecurityProfile() /// Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. public SecurityTypes SecurityType { get; set; } /// Specifies the security settings like secure boot and vTPM used while creating the virtual machine. Specifies the security settings like secure boot and vTPM used while creating the virtual machine. - public UefiSettings UefiSettings { get; set; } + public BatchUefiSettings UefiSettings { get; set; } } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsContent.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsOptions.Serialization.cs similarity index 78% rename from sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsContent.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsOptions.Serialization.cs index ebd4305c6706..60388607fbd6 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsContent.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsOptions.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class UploadBatchServiceLogsContent : IUtf8JsonSerializable, IJsonModel + public partial class UploadBatchServiceLogsOptions : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,14 +28,14 @@ void IJsonModel.Write(Utf8JsonWriter writer, Mode /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(UploadBatchServiceLogsContent)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(UploadBatchServiceLogsOptions)} does not support writing '{format}' format."); } writer.WritePropertyName("containerUrl"u8); - writer.WriteStringValue(ContainerUrl); + writer.WriteStringValue(ContainerUri.AbsoluteUri); writer.WritePropertyName("startTime"u8); writer.WriteStringValue(StartTime, "O"); if (Optional.IsDefined(EndTime)) @@ -65,19 +65,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - UploadBatchServiceLogsContent IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + UploadBatchServiceLogsOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(UploadBatchServiceLogsContent)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(UploadBatchServiceLogsOptions)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeUploadBatchServiceLogsContent(document.RootElement, options); + return DeserializeUploadBatchServiceLogsOptions(document.RootElement, options); } - internal static UploadBatchServiceLogsContent DeserializeUploadBatchServiceLogsContent(JsonElement element, ModelReaderWriterOptions options = null) + internal static UploadBatchServiceLogsOptions DeserializeUploadBatchServiceLogsOptions(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -85,7 +85,7 @@ internal static UploadBatchServiceLogsContent DeserializeUploadBatchServiceLogsC { return null; } - string containerUrl = default; + Uri containerUrl = default; DateTimeOffset startTime = default; DateTimeOffset? endTime = default; BatchNodeIdentityReference identityReference = default; @@ -95,7 +95,7 @@ internal static UploadBatchServiceLogsContent DeserializeUploadBatchServiceLogsC { if (property.NameEquals("containerUrl"u8)) { - containerUrl = property.Value.GetString(); + containerUrl = new Uri(property.Value.GetString()); continue; } if (property.NameEquals("startTime"u8)) @@ -127,46 +127,46 @@ internal static UploadBatchServiceLogsContent DeserializeUploadBatchServiceLogsC } } serializedAdditionalRawData = rawDataDictionary; - return new UploadBatchServiceLogsContent(containerUrl, startTime, endTime, identityReference, serializedAdditionalRawData); + return new UploadBatchServiceLogsOptions(containerUrl, startTime, endTime, identityReference, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(UploadBatchServiceLogsContent)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(UploadBatchServiceLogsOptions)} does not support writing '{options.Format}' format."); } } - UploadBatchServiceLogsContent IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + UploadBatchServiceLogsOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeUploadBatchServiceLogsContent(document.RootElement, options); + return DeserializeUploadBatchServiceLogsOptions(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(UploadBatchServiceLogsContent)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(UploadBatchServiceLogsOptions)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static UploadBatchServiceLogsContent FromResponse(Response response) + internal static UploadBatchServiceLogsOptions FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeUploadBatchServiceLogsContent(document.RootElement); + return DeserializeUploadBatchServiceLogsOptions(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsContent.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsOptions.cs similarity index 86% rename from sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsContent.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsOptions.cs index e5283b9180cf..8b0bea51387a 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsContent.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/UploadBatchServiceLogsOptions.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// The Azure Batch service log files upload parameters for a Compute Node. - public partial class UploadBatchServiceLogsContent + public partial class UploadBatchServiceLogsOptions { /// /// Keeps track of any properties unknown to the library. @@ -45,40 +45,40 @@ public partial class UploadBatchServiceLogsContent /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - /// The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. + /// Initializes a new instance of . + /// The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. /// The start of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. - /// is null. - public UploadBatchServiceLogsContent(string containerUrl, DateTimeOffset startTime) + /// is null. + public UploadBatchServiceLogsOptions(Uri containerUri, DateTimeOffset startTime) { - Argument.AssertNotNull(containerUrl, nameof(containerUrl)); + Argument.AssertNotNull(containerUri, nameof(containerUri)); - ContainerUrl = containerUrl; + ContainerUri = containerUri; StartTime = startTime; } - /// Initializes a new instance of . - /// The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. + /// Initializes a new instance of . + /// The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. /// The start of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. /// The end of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. If omitted, the default is to upload all logs available after the startTime. /// The reference to the user assigned identity to use to access Azure Blob Storage specified by containerUrl. The identity must have write access to the Azure Blob Storage container. /// Keeps track of any properties unknown to the library. - internal UploadBatchServiceLogsContent(string containerUrl, DateTimeOffset startTime, DateTimeOffset? endTime, BatchNodeIdentityReference identityReference, IDictionary serializedAdditionalRawData) + internal UploadBatchServiceLogsOptions(Uri containerUri, DateTimeOffset startTime, DateTimeOffset? endTime, BatchNodeIdentityReference identityReference, IDictionary serializedAdditionalRawData) { - ContainerUrl = containerUrl; + ContainerUri = containerUri; StartTime = startTime; EndTime = endTime; IdentityReference = identityReference; _serializedAdditionalRawData = serializedAdditionalRawData; } - /// Initializes a new instance of for deserialization. - internal UploadBatchServiceLogsContent() + /// Initializes a new instance of for deserialization. + internal UploadBatchServiceLogsOptions() { } /// The URL of the container within Azure Blob Storage to which to upload the Batch Service log file(s). If a user assigned managed identity is not being used, the URL must include a Shared Access Signature (SAS) granting write permissions to the container. The SAS duration must allow enough time for the upload to finish. The start time for SAS is optional and recommended to not be specified. - public string ContainerUrl { get; } + public Uri ContainerUri { get; } /// The start of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. public DateTimeOffset StartTime { get; } /// The end of the time range from which to upload Batch Service log file(s). Any log file containing a log message in the time range will be uploaded. This means that the operation might retrieve more logs than have been requested since the entire log file is always uploaded, but the operation should not retrieve fewer logs than have been requested. If omitted, the default is to upload all logs available after the startTime. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.Serialization.cs index e9a2f1f4eddc..73beebcc2475 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.Serialization.cs @@ -135,16 +135,16 @@ internal static VirtualMachineConfiguration DeserializeVirtualMachineConfigurati { return null; } - ImageReference imageReference = default; + BatchVmImageReference imageReference = default; string nodeAgentSKUId = default; WindowsConfiguration windowsConfiguration = default; IList dataDisks = default; string licenseType = default; - ContainerConfiguration containerConfiguration = default; + BatchContainerConfiguration containerConfiguration = default; DiskEncryptionConfiguration diskEncryptionConfiguration = default; BatchNodePlacementConfiguration nodePlacementConfiguration = default; IList extensions = default; - OSDisk osDisk = default; + BatchOsDisk osDisk = default; SecurityProfile securityProfile = default; ServiceArtifactReference serviceArtifactReference = default; IDictionary serializedAdditionalRawData = default; @@ -153,7 +153,7 @@ internal static VirtualMachineConfiguration DeserializeVirtualMachineConfigurati { if (property.NameEquals("imageReference"u8)) { - imageReference = ImageReference.DeserializeImageReference(property.Value, options); + imageReference = BatchVmImageReference.DeserializeBatchVmImageReference(property.Value, options); continue; } if (property.NameEquals("nodeAgentSKUId"u8)) @@ -195,7 +195,7 @@ internal static VirtualMachineConfiguration DeserializeVirtualMachineConfigurati { continue; } - containerConfiguration = ContainerConfiguration.DeserializeContainerConfiguration(property.Value, options); + containerConfiguration = BatchContainerConfiguration.DeserializeBatchContainerConfiguration(property.Value, options); continue; } if (property.NameEquals("diskEncryptionConfiguration"u8)) @@ -236,7 +236,7 @@ internal static VirtualMachineConfiguration DeserializeVirtualMachineConfigurati { continue; } - osDisk = OSDisk.DeserializeOSDisk(property.Value, options); + osDisk = BatchOsDisk.DeserializeBatchOsDisk(property.Value, options); continue; } if (property.NameEquals("securityProfile"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.cs index ea1946689c31..8bd08c4d6556 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineConfiguration.cs @@ -52,7 +52,7 @@ public partial class VirtualMachineConfiguration /// A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image to use. /// The SKU of the Batch Compute Node agent to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent is a program that runs on each Compute Node in the Pool, and provides the command-and-control interface between the Compute Node and the Batch service. There are different implementations of the Compute Node agent, known as SKUs, for different operating systems. You must specify a Compute Node agent SKU which matches the selected Image reference. To get the list of supported Compute Node agent SKUs along with their list of verified Image references, see the 'List supported Compute Node agent SKUs' operation. /// or is null. - public VirtualMachineConfiguration(ImageReference imageReference, string nodeAgentSkuId) + public VirtualMachineConfiguration(BatchVmImageReference imageReference, string nodeAgentSkuId) { Argument.AssertNotNull(imageReference, nameof(imageReference)); Argument.AssertNotNull(nodeAgentSkuId, nameof(nodeAgentSkuId)); @@ -87,7 +87,7 @@ public VirtualMachineConfiguration(ImageReference imageReference, string nodeAge /// Specifies the security profile settings for the virtual machine or virtual machine scale set. /// Specifies the service artifact reference id used to set same image version for all virtual machines in the scale set when using 'latest' image version. The service artifact reference id in the form of /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. /// Keeps track of any properties unknown to the library. - internal VirtualMachineConfiguration(ImageReference imageReference, string nodeAgentSkuId, WindowsConfiguration windowsConfiguration, IList dataDisks, string licenseType, ContainerConfiguration containerConfiguration, DiskEncryptionConfiguration diskEncryptionConfiguration, BatchNodePlacementConfiguration nodePlacementConfiguration, IList extensions, OSDisk osDisk, SecurityProfile securityProfile, ServiceArtifactReference serviceArtifactReference, IDictionary serializedAdditionalRawData) + internal VirtualMachineConfiguration(BatchVmImageReference imageReference, string nodeAgentSkuId, WindowsConfiguration windowsConfiguration, IList dataDisks, string licenseType, BatchContainerConfiguration containerConfiguration, DiskEncryptionConfiguration diskEncryptionConfiguration, BatchNodePlacementConfiguration nodePlacementConfiguration, IList extensions, BatchOsDisk osDisk, SecurityProfile securityProfile, ServiceArtifactReference serviceArtifactReference, IDictionary serializedAdditionalRawData) { ImageReference = imageReference; NodeAgentSkuId = nodeAgentSkuId; @@ -110,7 +110,7 @@ internal VirtualMachineConfiguration() } /// A reference to the Azure Virtual Machines Marketplace Image or the custom Virtual Machine Image to use. - public ImageReference ImageReference { get; set; } + public BatchVmImageReference ImageReference { get; set; } /// The SKU of the Batch Compute Node agent to be provisioned on Compute Nodes in the Pool. The Batch Compute Node agent is a program that runs on each Compute Node in the Pool, and provides the command-and-control interface between the Compute Node and the Batch service. There are different implementations of the Compute Node agent, known as SKUs, for different operating systems. You must specify a Compute Node agent SKU which matches the selected Image reference. To get the list of supported Compute Node agent SKUs along with their list of verified Image references, see the 'List supported Compute Node agent SKUs' operation. public string NodeAgentSkuId { get; set; } /// Windows operating system settings on the virtual machine. This property must not be specified if the imageReference property specifies a Linux OS Image. @@ -130,7 +130,7 @@ internal VirtualMachineConfiguration() /// public string LicenseType { get; set; } /// The container configuration for the Pool. If specified, setup is performed on each Compute Node in the Pool to allow Tasks to run in containers. All regular Tasks and Job manager Tasks run on this Pool must specify the containerSettings property, and all other Tasks may specify it. - public ContainerConfiguration ContainerConfiguration { get; set; } + public BatchContainerConfiguration ContainerConfiguration { get; set; } /// The disk encryption configuration for the pool. If specified, encryption is performed on each node in the pool during node provisioning. public DiskEncryptionConfiguration DiskEncryptionConfiguration { get; set; } /// The node placement configuration for the pool. This configuration will specify rules on how nodes in the pool will be physically allocated. @@ -138,7 +138,7 @@ internal VirtualMachineConfiguration() /// The virtual machine extension for the pool. If specified, the extensions mentioned in this configuration will be installed on each node. public IList Extensions { get; } /// Settings for the operating system disk of the Virtual Machine. - public OSDisk OsDisk { get; set; } + public BatchOsDisk OsDisk { get; set; } /// Specifies the security profile settings for the virtual machine or virtual machine scale set. public SecurityProfile SecurityProfile { get; set; } /// Specifies the service artifact reference id used to set same image version for all virtual machines in the scale set when using 'latest' image version. The service artifact reference id in the form of /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/galleries/{galleryName}/serviceArtifacts/{serviceArtifactName}/vmArtifactsProfiles/{vmArtifactsProfilesName}. diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineInfo.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineInfo.Serialization.cs index 0674087394d1..5a62b68c50f7 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineInfo.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineInfo.Serialization.cs @@ -81,7 +81,7 @@ internal static VirtualMachineInfo DeserializeVirtualMachineInfo(JsonElement ele { return null; } - ImageReference imageReference = default; + BatchVmImageReference imageReference = default; string scaleSetVmResourceId = default; IDictionary serializedAdditionalRawData = default; Dictionary rawDataDictionary = new Dictionary(); @@ -93,7 +93,7 @@ internal static VirtualMachineInfo DeserializeVirtualMachineInfo(JsonElement ele { continue; } - imageReference = ImageReference.DeserializeImageReference(property.Value, options); + imageReference = BatchVmImageReference.DeserializeBatchVmImageReference(property.Value, options); continue; } if (property.NameEquals("scaleSetVmResourceId"u8)) diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineInfo.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineInfo.cs index 7df368ae5ef7..b26471f2cb7a 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineInfo.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/VirtualMachineInfo.cs @@ -54,7 +54,7 @@ internal VirtualMachineInfo() /// The reference to the Azure Virtual Machine's Marketplace Image. /// The resource ID of the Compute Node's current Virtual Machine Scale Set VM. Only defined if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. /// Keeps track of any properties unknown to the library. - internal VirtualMachineInfo(ImageReference imageReference, string scaleSetVmResourceId, IDictionary serializedAdditionalRawData) + internal VirtualMachineInfo(BatchVmImageReference imageReference, string scaleSetVmResourceId, IDictionary serializedAdditionalRawData) { ImageReference = imageReference; ScaleSetVmResourceId = scaleSetVmResourceId; @@ -62,7 +62,7 @@ internal VirtualMachineInfo(ImageReference imageReference, string scaleSetVmReso } /// The reference to the Azure Virtual Machine's Marketplace Image. - public ImageReference ImageReference { get; } + public BatchVmImageReference ImageReference { get; } /// The resource ID of the Compute Node's current Virtual Machine Scale Set VM. Only defined if the Batch Account was created with its poolAllocationMode property set to 'UserSubscription'. public string ScaleSetVmResourceId { get; } } diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/VMDiskSecurityProfile.Serialization.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/VmDiskSecurityProfile.Serialization.cs similarity index 79% rename from sdk/batch/Azure.Compute.Batch/src/Generated/VMDiskSecurityProfile.Serialization.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/VmDiskSecurityProfile.Serialization.cs index 28a4e671790c..6d3022827a20 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/VMDiskSecurityProfile.Serialization.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/VmDiskSecurityProfile.Serialization.cs @@ -13,11 +13,11 @@ namespace Azure.Compute.Batch { - public partial class VMDiskSecurityProfile : IUtf8JsonSerializable, IJsonModel + public partial class VmDiskSecurityProfile : IUtf8JsonSerializable, IJsonModel { - void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); + void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions); - void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) + void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options) { writer.WriteStartObject(); JsonModelWriteCore(writer, options); @@ -28,10 +28,10 @@ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderW /// The client options for reading and writing models. protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(VMDiskSecurityProfile)} does not support writing '{format}' format."); + throw new FormatException($"The model {nameof(VmDiskSecurityProfile)} does not support writing '{format}' format."); } if (Optional.IsDefined(SecurityEncryptionType)) @@ -56,19 +56,19 @@ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWrit } } - VMDiskSecurityProfile IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) + VmDiskSecurityProfile IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; if (format != "J") { - throw new FormatException($"The model {nameof(VMDiskSecurityProfile)} does not support reading '{format}' format."); + throw new FormatException($"The model {nameof(VmDiskSecurityProfile)} does not support reading '{format}' format."); } using JsonDocument document = JsonDocument.ParseValue(ref reader); - return DeserializeVMDiskSecurityProfile(document.RootElement, options); + return DeserializeVmDiskSecurityProfile(document.RootElement, options); } - internal static VMDiskSecurityProfile DeserializeVMDiskSecurityProfile(JsonElement element, ModelReaderWriterOptions options = null) + internal static VmDiskSecurityProfile DeserializeVmDiskSecurityProfile(JsonElement element, ModelReaderWriterOptions options = null) { options ??= ModelSerializationExtensions.WireOptions; @@ -96,46 +96,46 @@ internal static VMDiskSecurityProfile DeserializeVMDiskSecurityProfile(JsonEleme } } serializedAdditionalRawData = rawDataDictionary; - return new VMDiskSecurityProfile(securityEncryptionType, serializedAdditionalRawData); + return new VmDiskSecurityProfile(securityEncryptionType, serializedAdditionalRawData); } - BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) + BinaryData IPersistableModel.Write(ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": return ModelReaderWriter.Write(this, options, AzureComputeBatchContext.Default); default: - throw new FormatException($"The model {nameof(VMDiskSecurityProfile)} does not support writing '{options.Format}' format."); + throw new FormatException($"The model {nameof(VmDiskSecurityProfile)} does not support writing '{options.Format}' format."); } } - VMDiskSecurityProfile IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) + VmDiskSecurityProfile IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options) { - var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; + var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format; switch (format) { case "J": { using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeVMDiskSecurityProfile(document.RootElement, options); + return DeserializeVmDiskSecurityProfile(document.RootElement, options); } default: - throw new FormatException($"The model {nameof(VMDiskSecurityProfile)} does not support reading '{options.Format}' format."); + throw new FormatException($"The model {nameof(VmDiskSecurityProfile)} does not support reading '{options.Format}' format."); } } - string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; + string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J"; /// Deserializes the model from a raw response. /// The response to deserialize the model from. - internal static VMDiskSecurityProfile FromResponse(Response response) + internal static VmDiskSecurityProfile FromResponse(Response response) { using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions); - return DeserializeVMDiskSecurityProfile(document.RootElement); + return DeserializeVmDiskSecurityProfile(document.RootElement); } /// Convert into a . diff --git a/sdk/batch/Azure.Compute.Batch/src/Generated/VMDiskSecurityProfile.cs b/sdk/batch/Azure.Compute.Batch/src/Generated/VmDiskSecurityProfile.cs similarity index 93% rename from sdk/batch/Azure.Compute.Batch/src/Generated/VMDiskSecurityProfile.cs rename to sdk/batch/Azure.Compute.Batch/src/Generated/VmDiskSecurityProfile.cs index 2405e8506ee0..b6c35fa86187 100644 --- a/sdk/batch/Azure.Compute.Batch/src/Generated/VMDiskSecurityProfile.cs +++ b/sdk/batch/Azure.Compute.Batch/src/Generated/VmDiskSecurityProfile.cs @@ -11,7 +11,7 @@ namespace Azure.Compute.Batch { /// Specifies the security profile settings for the managed disk. **Note**: It can only be set for Confidential VMs and required when using Confidential VMs. - public partial class VMDiskSecurityProfile + public partial class VmDiskSecurityProfile { /// /// Keeps track of any properties unknown to the library. @@ -45,15 +45,15 @@ public partial class VMDiskSecurityProfile /// private IDictionary _serializedAdditionalRawData; - /// Initializes a new instance of . - public VMDiskSecurityProfile() + /// Initializes a new instance of . + public VmDiskSecurityProfile() { } - /// Initializes a new instance of . + /// Initializes a new instance of . /// Specifies the EncryptionType of the managed disk. It is set to VMGuestStateOnly for encryption of just the VMGuestState blob, and NonPersistedTPM for not persisting firmware state in the VMGuestState blob. **Note**: It can be set for only Confidential VMs and is required when using Confidential VMs. /// Keeps track of any properties unknown to the library. - internal VMDiskSecurityProfile(SecurityEncryptionTypes? securityEncryptionType, IDictionary serializedAdditionalRawData) + internal VmDiskSecurityProfile(SecurityEncryptionTypes? securityEncryptionType, IDictionary serializedAdditionalRawData) { SecurityEncryptionType = securityEncryptionType; _serializedAdditionalRawData = serializedAdditionalRawData; diff --git a/sdk/batch/Azure.Compute.Batch/tests/Generated/Samples/Samples_BatchClient.cs b/sdk/batch/Azure.Compute.Batch/tests/Generated/Samples/Samples_BatchClient.cs index ce1e76220ee4..30f21fb1de12 100644 --- a/sdk/batch/Azure.Compute.Batch/tests/Generated/Samples/Samples_BatchClient.cs +++ b/sdk/batch/Azure.Compute.Batch/tests/Generated/Samples/Samples_BatchClient.cs @@ -148,9 +148,9 @@ public void Example_BatchClient_CreatePool_CreatesAPoolWithAcceleratedNetworking TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool01", "Standard_D1_v2") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool01", "Standard_D1_v2") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", @@ -174,9 +174,9 @@ public async Task Example_BatchClient_CreatePool_CreatesAPoolWithAcceleratedNetw TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool01", "Standard_D1_v2") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool01", "Standard_D1_v2") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", @@ -362,9 +362,9 @@ public void Example_BatchClient_CreatePool_CreatesAPoolWithMountDriveSpecified_C TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "UbuntuServer", @@ -384,7 +384,7 @@ public void Example_BatchClient_CreatePool_CreatesAPoolWithMountDriveSpecified_C }, }, new MountConfiguration { -AzureFileShareConfiguration = new AzureFileShareConfiguration("accountName", "https://myaccount.file.core.windows.net/fileshare", "accountKey", "filesharepath") +AzureFileShareConfiguration = new AzureFileShareConfiguration("accountName", "accountKey", new Uri("https://myaccount.file.core.windows.net/fileshare"), "filesharepath") { MountOptions = "mount options ver=1.0", }, @@ -413,9 +413,9 @@ public async Task Example_BatchClient_CreatePool_CreatesAPoolWithMountDriveSpeci TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "UbuntuServer", @@ -435,7 +435,7 @@ public async Task Example_BatchClient_CreatePool_CreatesAPoolWithMountDriveSpeci }, }, new MountConfiguration { -AzureFileShareConfiguration = new AzureFileShareConfiguration("accountName", "https://myaccount.file.core.windows.net/fileshare", "accountKey", "filesharepath") +AzureFileShareConfiguration = new AzureFileShareConfiguration("accountName", "accountKey", new Uri("https://myaccount.file.core.windows.net/fileshare"), "filesharepath") { MountOptions = "mount options ver=1.0", }, @@ -556,18 +556,18 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool001", "standard_d2s_v3") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "standard_d2s_v3") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "0001-com-ubuntu-server-focal", Sku = "20_04-lts", }, "batch.node.ubuntu 20.04") { - OsDisk = new OSDisk + OsDisk = new BatchOsDisk { - EphemeralOSDiskSettings = new DiffDiskSettings + EphemeralOSDiskSettings = new BatchDiffDiskSettings { Placement = DiffDiskPlacement.CacheDisk, }, @@ -596,18 +596,18 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool001", "standard_d2s_v3") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "standard_d2s_v3") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "0001-com-ubuntu-server-focal", Sku = "20_04-lts", }, "batch.node.ubuntu 20.04") { - OsDisk = new OSDisk + OsDisk = new BatchOsDisk { - EphemeralOSDiskSettings = new DiffDiskSettings + EphemeralOSDiskSettings = new BatchDiffDiskSettings { Placement = DiffDiskPlacement.CacheDisk, }, @@ -706,9 +706,9 @@ public void Example_BatchClient_CreatePool_CreatesASimplePoolWithResourceTags_Co TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool001", "STANDARD_DC2s_V2") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "STANDARD_DC2s_V2") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "UbuntuServer", @@ -733,9 +733,9 @@ public async Task Example_BatchClient_CreatePool_CreatesASimplePoolWithResourceT TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool001", "STANDARD_DC2s_V2") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "STANDARD_DC2s_V2") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "UbuntuServer", @@ -838,9 +838,9 @@ public void Example_BatchClient_CreatePool_CreatesAPoolWithSecurityProfile_Conve TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool001", "STANDARD_DC2s_V2") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "STANDARD_DC2s_V2") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "UbuntuServer", @@ -848,7 +848,7 @@ public void Example_BatchClient_CreatePool_CreatesAPoolWithSecurityProfile_Conve Version = "latest", }, "batch.node.ubuntu 18.04") { - SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new BatchUefiSettings { SecureBootEnabled = false, }), @@ -866,9 +866,9 @@ public async Task Example_BatchClient_CreatePool_CreatesAPoolWithSecurityProfile TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool001", "STANDARD_DC2s_V2") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool001", "STANDARD_DC2s_V2") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "UbuntuServer", @@ -876,7 +876,7 @@ public async Task Example_BatchClient_CreatePool_CreatesAPoolWithSecurityProfile Version = "latest", }, "batch.node.ubuntu 18.04") { - SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new UefiSettings + SecurityProfile = new SecurityProfile(true, SecurityTypes.TrustedLaunch, new BatchUefiSettings { SecureBootEnabled = false, }), @@ -986,9 +986,9 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "0001-com-ubuntu-server-focal", @@ -1001,7 +1001,7 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo EnableInterNodeCommunication = true, TaskSlotsPerNode = 3, TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, }; Response response = client.CreatePool(pool); } @@ -1014,9 +1014,9 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "0001-com-ubuntu-server-focal", @@ -1029,7 +1029,7 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura EnableInterNodeCommunication = true, TaskSlotsPerNode = 3, TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, }; Response response = await client.CreatePoolAsync(pool); } @@ -1132,16 +1132,16 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "0001-com-ubuntu-server-focal", Sku = "120_04-lts", }, "batch.node.ubuntu 20.04") { - ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + ContainerConfiguration = new BatchContainerConfiguration(ContainerType.DockerCompatible) { ContainerImageNames = { "busybox" }, }, @@ -1164,16 +1164,16 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "0001-com-ubuntu-server-focal", Sku = "120_04-lts", }, "batch.node.ubuntu 20.04") { - ContainerConfiguration = new ContainerConfiguration(ContainerType.DockerCompatible) + ContainerConfiguration = new BatchContainerConfiguration(ContainerType.DockerCompatible) { ContainerImageNames = { "busybox" }, }, @@ -1324,9 +1324,9 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "0001-com-ubuntu-server-focal", @@ -1352,7 +1352,7 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo EnableInterNodeCommunication = true, TaskSlotsPerNode = 3, TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, TargetNodeCommunicationMode = BatchNodeCommunicationMode.Simplified, }; Response response = client.CreatePool(pool); @@ -1366,9 +1366,9 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("pool2", "standard_a1") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("pool2", "standard_a1") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "Canonical", Offer = "0001-com-ubuntu-server-focal", @@ -1394,7 +1394,7 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura EnableInterNodeCommunication = true, TaskSlotsPerNode = 3, TaskSchedulingPolicy = new BatchTaskSchedulingPolicy(BatchNodeFillType.Spread), - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, TargetNodeCommunicationMode = BatchNodeCommunicationMode.Simplified, }; Response response = await client.CreatePoolAsync(pool); @@ -1484,9 +1484,9 @@ public void Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigurationPo TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool002", "Standard_A1_v2") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool002", "Standard_A1_v2") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", @@ -1513,9 +1513,9 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolCreateContent pool = new BatchPoolCreateContent("mypool002", "Standard_A1_v2") + BatchPoolCreateOptions pool = new BatchPoolCreateOptions("mypool002", "Standard_A1_v2") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", @@ -1534,32 +1534,6 @@ public async Task Example_BatchClient_CreatePool_CreatesAVirtualMachineConfigura Response response = await client.CreatePoolAsync(pool); } - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_DeletePool_PoolDelete() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = client.DeletePool("poolId"); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_DeletePool_PoolDelete_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = await client.DeletePoolAsync("poolId"); - - Console.WriteLine(response.Status); - } - [Test] [Ignore("Only validating compilation of examples")] public void Example_BatchClient_GetPool_GetAPoolWithAcceleratedNetworking() @@ -1970,12 +1944,12 @@ public void Example_BatchClient_EnablePoolAutoScale_PoolEnableAutoscale_Convenie TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolEnableAutoScaleContent content = new BatchPoolEnableAutoScaleContent + BatchPoolAutoScaleEnableOptions enableAutoScaleOptions = new BatchPoolAutoScaleEnableOptions { AutoScaleFormula = "$TargetDedicated=0", AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT8M"), }; - Response response = client.EnablePoolAutoScale("poolId", content); + Response response = client.EnablePoolAutoScale("poolId", enableAutoScaleOptions); } [Test] @@ -1986,12 +1960,12 @@ public async Task Example_BatchClient_EnablePoolAutoScale_PoolEnableAutoscale_Co TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolEnableAutoScaleContent content = new BatchPoolEnableAutoScaleContent + BatchPoolAutoScaleEnableOptions enableAutoScaleOptions = new BatchPoolAutoScaleEnableOptions { AutoScaleFormula = "$TargetDedicated=0", AutoScaleEvaluationInterval = XmlConvert.ToTimeSpan("PT8M"), }; - Response response = await client.EnablePoolAutoScaleAsync("poolId", content); + Response response = await client.EnablePoolAutoScaleAsync("poolId", enableAutoScaleOptions); } [Test] @@ -2038,8 +2012,8 @@ public void Example_BatchClient_EvaluatePoolAutoScale_PoolEvaluateAutoscale_Conv TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolEvaluateAutoScaleContent content = new BatchPoolEvaluateAutoScaleContent("$TargetDedicated=1"); - Response response = client.EvaluatePoolAutoScale("poolId", content); + BatchPoolAutoScaleEvaluateOptions evaluateAutoScaleOptions = new BatchPoolAutoScaleEvaluateOptions("$TargetDedicated=1"); + Response response = client.EvaluatePoolAutoScale("poolId", evaluateAutoScaleOptions); } [Test] @@ -2050,102 +2024,8 @@ public async Task Example_BatchClient_EvaluatePoolAutoScale_PoolEvaluateAutoscal TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolEvaluateAutoScaleContent content = new BatchPoolEvaluateAutoScaleContent("$TargetDedicated=1"); - Response response = await client.EvaluatePoolAutoScaleAsync("poolId", content); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_ResizePool_PoolResize() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = RequestContent.Create(new - { - targetDedicatedNodes = 1, - targetLowPriorityNodes = 0, - }); - Response response = client.ResizePool("resizePool", content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_ResizePool_PoolResize_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = RequestContent.Create(new - { - targetDedicatedNodes = 1, - targetLowPriorityNodes = 0, - }); - Response response = await client.ResizePoolAsync("resizePool", content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_ResizePool_PoolResize_Convenience() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - BatchPoolResizeContent content = new BatchPoolResizeContent - { - TargetDedicatedNodes = 1, - TargetLowPriorityNodes = 0, - }; - Response response = client.ResizePool("resizePool", content); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_ResizePool_PoolResize_Convenience_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - BatchPoolResizeContent content = new BatchPoolResizeContent - { - TargetDedicatedNodes = 1, - TargetLowPriorityNodes = 0, - }; - Response response = await client.ResizePoolAsync("resizePool", content); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_StopPoolResize_PoolStopResize() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = client.StopPoolResize("poolId"); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_StopPoolResize_PoolStopResize_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = await client.StopPoolResizeAsync("poolId"); - - Console.WriteLine(response.Status); + BatchPoolAutoScaleEvaluateOptions evaluateAutoScaleOptions = new BatchPoolAutoScaleEvaluateOptions("$TargetDedicated=1"); + Response response = await client.EvaluatePoolAutoScaleAsync("poolId", evaluateAutoScaleOptions); } [Test] @@ -2202,7 +2082,7 @@ public void Example_BatchClient_ReplacePoolProperties_PoolUpdate_Convenience() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolReplaceContent pool = new BatchPoolReplaceContent(Array.Empty(), Array.Empty(), Array.Empty()) + BatchPoolReplaceOptions pool = new BatchPoolReplaceOptions(Array.Empty(), Array.Empty(), Array.Empty()) { StartTask = new BatchStartTask("/bin/bash -c 'echo start task'"), }; @@ -2217,105 +2097,13 @@ public async Task Example_BatchClient_ReplacePoolProperties_PoolUpdate_Convenien TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchPoolReplaceContent pool = new BatchPoolReplaceContent(Array.Empty(), Array.Empty(), Array.Empty()) + BatchPoolReplaceOptions pool = new BatchPoolReplaceOptions(Array.Empty(), Array.Empty(), Array.Empty()) { StartTask = new BatchStartTask("/bin/bash -c 'echo start task'"), }; Response response = await client.ReplacePoolPropertiesAsync("poolId", pool); } - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_RemoveNodes_PoolRemoveNodes() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = RequestContent.Create(new - { - nodeList = new object[] - { -"tvm-1695681911_1-20161122t224741z", -"tvm-1695681911_2-20161122t224741z" - }, - }); - Response response = client.RemoveNodes("poolId", content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_RemoveNodes_PoolRemoveNodes_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = RequestContent.Create(new - { - nodeList = new object[] - { -"tvm-1695681911_1-20161122t224741z", -"tvm-1695681911_2-20161122t224741z" - }, - }); - Response response = await client.RemoveNodesAsync("poolId", content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_RemoveNodes_PoolRemoveNodes_Convenience() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - BatchNodeRemoveContent content = new BatchNodeRemoveContent(new string[] { "tvm-1695681911_1-20161122t224741z", "tvm-1695681911_2-20161122t224741z" }); - Response response = client.RemoveNodes("poolId", content); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_RemoveNodes_PoolRemoveNodes_Convenience_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - BatchNodeRemoveContent content = new BatchNodeRemoveContent(new string[] { "tvm-1695681911_1-20161122t224741z", "tvm-1695681911_2-20161122t224741z" }); - Response response = await client.RemoveNodesAsync("poolId", content); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_DeleteJob_DeleteJob() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = client.DeleteJob("jobId"); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_DeleteJob_DeleteJob_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = await client.DeleteJobAsync("jobId"); - - Console.WriteLine(response.Status); - } - [Test] [Ignore("Only validating compilation of examples")] public void Example_BatchClient_GetJob_JobGet() @@ -2516,140 +2304,6 @@ public async Task Example_BatchClient_ReplaceJob_JobPatch_Convenience_Async() Response response = await client.ReplaceJobAsync("jobId", job); } - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_DisableJob_JobDisable() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = RequestContent.Create(new - { - disableTasks = "terminate", - }); - Response response = client.DisableJob("jobId", content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_DisableJob_JobDisable_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = RequestContent.Create(new - { - disableTasks = "terminate", - }); - Response response = await client.DisableJobAsync("jobId", content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_DisableJob_JobDisable_Convenience() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - BatchJobDisableContent content = new BatchJobDisableContent(DisableBatchJobOption.Terminate); - Response response = client.DisableJob("jobId", content); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_DisableJob_JobDisable_Convenience_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - BatchJobDisableContent content = new BatchJobDisableContent(DisableBatchJobOption.Terminate); - Response response = await client.DisableJobAsync("jobId", content); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_EnableJob_JobEnable() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = client.EnableJob("jobId"); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_EnableJob_JobEnable_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = await client.EnableJobAsync("jobId"); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_TerminateJob_JobTerminate() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = null; - Response response = client.TerminateJob("jobId", content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_TerminateJob_JobTerminate_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = null; - Response response = await client.TerminateJobAsync("jobId", content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_TerminateJob_JobTerminate_Convenience() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = client.TerminateJob("jobId"); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_TerminateJob_JobTerminate_Convenience_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = await client.TerminateJobAsync("jobId"); - } - [Test] [Ignore("Only validating compilation of examples")] public void Example_BatchClient_CreateJob_CreatesABasicJob() @@ -2702,7 +2356,7 @@ public void Example_BatchClient_CreateJob_CreatesABasicJob_Convenience() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchJobCreateContent job = new BatchJobCreateContent("jobId", new BatchPoolInfo + BatchJobCreateOptions job = new BatchJobCreateOptions("jobId", new BatchPoolInfo { PoolId = "poolId", }) @@ -2720,7 +2374,7 @@ public async Task Example_BatchClient_CreateJob_CreatesABasicJob_Convenience_Asy TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchJobCreateContent job = new BatchJobCreateContent("jobId", new BatchPoolInfo + BatchJobCreateOptions job = new BatchJobCreateOptions("jobId", new BatchPoolInfo { PoolId = "poolId", }) @@ -3074,14 +2728,14 @@ public void Example_BatchClient_CreateJob_CreatesAComplexJob_Convenience() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchJobCreateContent job = new BatchJobCreateContent("jobId", new BatchPoolInfo + BatchJobCreateOptions job = new BatchJobCreateOptions("jobId", new BatchPoolInfo { AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.Job) { AutoPoolIdPrefix = "mypool", Pool = new BatchPoolSpecification("STANDARD_D2S_V3") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", @@ -3109,7 +2763,7 @@ public void Example_BatchClient_CreateJob_CreatesAComplexJob_Convenience() { ResourceFiles = {new ResourceFile { -HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas", +HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas"), FilePath = "myprogram2.exe", }}, EnvironmentSettings = {new EnvironmentSetting("myvariable") @@ -3133,7 +2787,7 @@ public void Example_BatchClient_CreateJob_CreatesAComplexJob_Convenience() StoreName = "Root", Visibility = {BatchCertificateVisibility.Task}, }}, - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, }, @@ -3149,11 +2803,11 @@ public void Example_BatchClient_CreateJob_CreatesAComplexJob_Convenience() { ResourceFiles = {new ResourceFile { -HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas", +HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas"), FilePath = "myprogram.exe", }, new ResourceFile { -StorageContainerUrl = "http://mystorage1.blob.core.windows.net/data?sas", +StorageContainerUri = new Uri("http://mystorage1.blob.core.windows.net/data?sas"), FilePath = "datafolder", }}, EnvironmentSettings = {new EnvironmentSetting("myvariable") @@ -3178,7 +2832,7 @@ public void Example_BatchClient_CreateJob_CreatesAComplexJob_Convenience() }, RunExclusive = true, }, - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, }; Response response = client.CreateJob(job); } @@ -3191,14 +2845,14 @@ public async Task Example_BatchClient_CreateJob_CreatesAComplexJob_Convenience_A TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchJobCreateContent job = new BatchJobCreateContent("jobId", new BatchPoolInfo + BatchJobCreateOptions job = new BatchJobCreateOptions("jobId", new BatchPoolInfo { AutoPoolSpecification = new BatchAutoPoolSpecification(BatchPoolLifetimeOption.Job) { AutoPoolIdPrefix = "mypool", Pool = new BatchPoolSpecification("STANDARD_D2S_V3") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", @@ -3226,7 +2880,7 @@ public async Task Example_BatchClient_CreateJob_CreatesAComplexJob_Convenience_A { ResourceFiles = {new ResourceFile { -HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas", +HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas"), FilePath = "myprogram2.exe", }}, EnvironmentSettings = {new EnvironmentSetting("myvariable") @@ -3250,7 +2904,7 @@ public async Task Example_BatchClient_CreateJob_CreatesAComplexJob_Convenience_A StoreName = "Root", Visibility = {BatchCertificateVisibility.Task}, }}, - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, }, @@ -3266,11 +2920,11 @@ public async Task Example_BatchClient_CreateJob_CreatesAComplexJob_Convenience_A { ResourceFiles = {new ResourceFile { -HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas", +HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas"), FilePath = "myprogram.exe", }, new ResourceFile { -StorageContainerUrl = "http://mystorage1.blob.core.windows.net/data?sas", +StorageContainerUri = new Uri("http://mystorage1.blob.core.windows.net/data?sas"), FilePath = "datafolder", }}, EnvironmentSettings = {new EnvironmentSetting("myvariable") @@ -3295,7 +2949,7 @@ public async Task Example_BatchClient_CreateJob_CreatesAComplexJob_Convenience_A }, RunExclusive = true, }, - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, }; Response response = await client.CreateJobAsync(job); } @@ -3380,7 +3034,7 @@ public void Example_BatchClient_CreateCertificate_CertificateCreate() { thumbprintAlgorithm = "sha1", thumbprint = "0123456789abcdef0123456789abcdef01234567", - data = "#####...", + data = "U3dhZ2dlciByb2Nrcw==", certificateFormat = "pfx", password = "", }); @@ -3401,7 +3055,7 @@ public async Task Example_BatchClient_CreateCertificate_CertificateCreate_Async( { thumbprintAlgorithm = "sha1", thumbprint = "0123456789abcdef0123456789abcdef01234567", - data = "#####...", + data = "U3dhZ2dlciByb2Nrcw==", certificateFormat = "pfx", password = "", }); @@ -3418,7 +3072,7 @@ public void Example_BatchClient_CreateCertificate_CertificateCreate_Convenience( TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789abcdef01234567", "sha1", "#####...") + BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789abcdef01234567", "sha1", BinaryData.FromObjectAsJson("U3dhZ2dlciByb2Nrcw==")) { CertificateFormat = BatchCertificateFormat.Pfx, Password = "", @@ -3434,7 +3088,7 @@ public async Task Example_BatchClient_CreateCertificate_CertificateCreate_Conven TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789abcdef01234567", "sha1", "#####...") + BatchCertificate certificate = new BatchCertificate("0123456789abcdef0123456789abcdef01234567", "sha1", BinaryData.FromObjectAsJson("U3dhZ2dlciByb2Nrcw==")) { CertificateFormat = BatchCertificateFormat.Pfx, Password = "", @@ -3468,32 +3122,6 @@ public async Task Example_BatchClient_CancelCertificateDeletion_CertificateCance Console.WriteLine(response.Status); } - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_DeleteCertificate_CertificateDelete() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = client.DeleteCertificate("sha1", "0123456789abcdef0123456789abcdef01234567"); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_DeleteCertificate_CertificateDelete_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = await client.DeleteCertificateAsync("sha1", "0123456789abcdef0123456789abcdef01234567"); - - Console.WriteLine(response.Status); - } - [Test] [Ignore("Only validating compilation of examples")] public void Example_BatchClient_GetCertificate_CertificateGet() @@ -3548,32 +3176,6 @@ public async Task Example_BatchClient_GetCertificate_CertificateGet_Convenience_ Response response = await client.GetCertificateAsync("sha1", "0123456789abcdef0123456789abcdef01234567"); } - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_DeleteJobSchedule_JobScheduleDelete() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = client.DeleteJobSchedule("jobScheduleId"); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_DeleteJobSchedule_JobScheduleDelete_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = await client.DeleteJobScheduleAsync("jobScheduleId"); - - Console.WriteLine(response.Status); - } - [Test] [Ignore("Only validating compilation of examples")] public void Example_BatchClient_GetJobSchedule_JobScheduleGet() @@ -3872,32 +3474,6 @@ public async Task Example_BatchClient_EnableJobSchedule_JobScheduleEnable_Async( Console.WriteLine(response.Status); } - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_TerminateJobSchedule_JobScheduleTerminate() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = client.TerminateJobSchedule("jobScheduleId"); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_TerminateJobSchedule_JobScheduleTerminate_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = await client.TerminateJobScheduleAsync("jobScheduleId"); - - Console.WriteLine(response.Status); - } - [Test] [Ignore("Only validating compilation of examples")] public void Example_BatchClient_CreateJobSchedule_CreatesABasicJobSchedule() @@ -3962,7 +3538,7 @@ public void Example_BatchClient_CreateJobSchedule_CreatesABasicJobSchedule_Conve TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("jobScheduleId", new BatchJobScheduleConfiguration + BatchJobScheduleCreateOptions jobSchedule = new BatchJobScheduleCreateOptions("jobScheduleId", new BatchJobScheduleConfiguration { RecurrenceInterval = XmlConvert.ToTimeSpan("PT5M"), }, new BatchJobSpecification(new BatchPoolInfo @@ -3980,7 +3556,7 @@ public async Task Example_BatchClient_CreateJobSchedule_CreatesABasicJobSchedule TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("jobScheduleId", new BatchJobScheduleConfiguration + BatchJobScheduleCreateOptions jobSchedule = new BatchJobScheduleCreateOptions("jobScheduleId", new BatchJobScheduleConfiguration { RecurrenceInterval = XmlConvert.ToTimeSpan("PT5M"), }, new BatchJobSpecification(new BatchPoolInfo @@ -4354,7 +3930,7 @@ public void Example_BatchClient_CreateJobSchedule_CreatesAComplexJobScheduleAdd_ TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("jobScheduleId", new BatchJobScheduleConfiguration + BatchJobScheduleCreateOptions jobSchedule = new BatchJobScheduleCreateOptions("jobScheduleId", new BatchJobScheduleConfiguration { DoNotRunUntil = DateTimeOffset.Parse("2014-09-10T02:30:00.000Z"), DoNotRunAfter = DateTimeOffset.Parse("2014-09-10T06:30:00.000Z"), @@ -4367,7 +3943,7 @@ public void Example_BatchClient_CreateJobSchedule_CreatesAComplexJobScheduleAdd_ AutoPoolIdPrefix = "mypool", Pool = new BatchPoolSpecification("STANDARD_D2S_V3") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", @@ -4395,7 +3971,7 @@ public void Example_BatchClient_CreateJobSchedule_CreatesAComplexJobScheduleAdd_ { ResourceFiles = {new ResourceFile { -HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas", +HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas"), FilePath = "myprogram2.exe", }}, EnvironmentSettings = {new EnvironmentSetting("myvariable") @@ -4419,7 +3995,7 @@ public void Example_BatchClient_CreateJobSchedule_CreatesAComplexJobScheduleAdd_ StoreName = "Root", Visibility = {BatchCertificateVisibility.Task}, }}, - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, }, @@ -4435,11 +4011,11 @@ public void Example_BatchClient_CreateJobSchedule_CreatesAComplexJobScheduleAdd_ { ResourceFiles = {new ResourceFile { -HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas", +HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas"), FilePath = "myprogram.exe", }, new ResourceFile { -HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/test.txt?sas", +HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/test.txt?sas"), FilePath = "test.txt", }}, EnvironmentSettings = {new EnvironmentSetting("myvariable") @@ -4466,7 +4042,7 @@ public void Example_BatchClient_CreateJobSchedule_CreatesAComplexJobScheduleAdd_ }, }) { - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, }; Response response = client.CreateJobSchedule(jobSchedule); } @@ -4479,7 +4055,7 @@ public async Task Example_BatchClient_CreateJobSchedule_CreatesAComplexJobSchedu TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchJobScheduleCreateContent jobSchedule = new BatchJobScheduleCreateContent("jobScheduleId", new BatchJobScheduleConfiguration + BatchJobScheduleCreateOptions jobSchedule = new BatchJobScheduleCreateOptions("jobScheduleId", new BatchJobScheduleConfiguration { DoNotRunUntil = DateTimeOffset.Parse("2014-09-10T02:30:00.000Z"), DoNotRunAfter = DateTimeOffset.Parse("2014-09-10T06:30:00.000Z"), @@ -4492,7 +4068,7 @@ public async Task Example_BatchClient_CreateJobSchedule_CreatesAComplexJobSchedu AutoPoolIdPrefix = "mypool", Pool = new BatchPoolSpecification("STANDARD_D2S_V3") { - VirtualMachineConfiguration = new VirtualMachineConfiguration(new ImageReference + VirtualMachineConfiguration = new VirtualMachineConfiguration(new BatchVmImageReference { Publisher = "MicrosoftWindowsServer", Offer = "WindowsServer", @@ -4520,7 +4096,7 @@ public async Task Example_BatchClient_CreateJobSchedule_CreatesAComplexJobSchedu { ResourceFiles = {new ResourceFile { -HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas", +HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram2.exe?sas"), FilePath = "myprogram2.exe", }}, EnvironmentSettings = {new EnvironmentSetting("myvariable") @@ -4544,7 +4120,7 @@ public async Task Example_BatchClient_CreateJobSchedule_CreatesAComplexJobSchedu StoreName = "Root", Visibility = {BatchCertificateVisibility.Task}, }}, - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, TargetNodeCommunicationMode = BatchNodeCommunicationMode.Default, }, }, @@ -4560,11 +4136,11 @@ public async Task Example_BatchClient_CreateJobSchedule_CreatesAComplexJobSchedu { ResourceFiles = {new ResourceFile { -HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas", +HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/myprogram.exe?sas"), FilePath = "myprogram.exe", }, new ResourceFile { -HttpUrl = "http://mystorage1.blob.core.windows.net/scripts/test.txt?sas", +HttpUri = new Uri("http://mystorage1.blob.core.windows.net/scripts/test.txt?sas"), FilePath = "test.txt", }}, EnvironmentSettings = {new EnvironmentSetting("myvariable") @@ -4591,7 +4167,7 @@ public async Task Example_BatchClient_CreateJobSchedule_CreatesAComplexJobSchedu }, }) { - Metadata = { new MetadataItem("myproperty", "myvalue") }, + Metadata = { new BatchMetadataItem("myproperty", "myvalue") }, }; Response response = await client.CreateJobScheduleAsync(jobSchedule); } @@ -4640,7 +4216,7 @@ public void Example_BatchClient_CreateTask_CreatesABasicTask_Convenience() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskCreateContent task = new BatchTaskCreateContent("task1", "cmd /c echo task1"); + BatchTaskCreateOptions task = new BatchTaskCreateOptions("task1", "cmd /c echo task1"); Response response = client.CreateTask("jobId", task); } @@ -4652,7 +4228,7 @@ public async Task Example_BatchClient_CreateTask_CreatesABasicTask_Convenience_A TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskCreateContent task = new BatchTaskCreateContent("task1", "cmd /c echo task1"); + BatchTaskCreateOptions task = new BatchTaskCreateOptions("task1", "cmd /c echo task1"); Response response = await client.CreateTaskAsync("jobId", task); } @@ -4726,7 +4302,7 @@ public void Example_BatchClient_CreateTask_CreatesATaskWithContainerSettings_Con TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") + BatchTaskCreateOptions task = new BatchTaskCreateOptions("taskId", "bash -c 'echo hello'") { ContainerSettings = new BatchTaskContainerSettings("ubuntu") { @@ -4752,7 +4328,7 @@ public async Task Example_BatchClient_CreateTask_CreatesATaskWithContainerSettin TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") + BatchTaskCreateOptions task = new BatchTaskCreateOptions("taskId", "bash -c 'echo hello'") { ContainerSettings = new BatchTaskContainerSettings("ubuntu") { @@ -4854,7 +4430,7 @@ public void Example_BatchClient_CreateTask_CreatesATaskWithContainerSettingsWith TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") + BatchTaskCreateOptions task = new BatchTaskCreateOptions("taskId", "bash -c 'echo hello'") { ContainerSettings = new BatchTaskContainerSettings("ubuntu") { @@ -4884,7 +4460,7 @@ public async Task Example_BatchClient_CreateTask_CreatesATaskWithContainerSettin TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") + BatchTaskCreateOptions task = new BatchTaskCreateOptions("taskId", "bash -c 'echo hello'") { ContainerSettings = new BatchTaskContainerSettings("ubuntu") { @@ -5000,7 +4576,7 @@ public void Example_BatchClient_CreateTask_CreatesATaskWithContainerSettingsWith TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") + BatchTaskCreateOptions task = new BatchTaskCreateOptions("taskId", "bash -c 'echo hello'") { ContainerSettings = new BatchTaskContainerSettings("ubuntu") { @@ -5034,7 +4610,7 @@ public async Task Example_BatchClient_CreateTask_CreatesATaskWithContainerSettin TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "bash -c 'echo hello'") + BatchTaskCreateOptions task = new BatchTaskCreateOptions("taskId", "bash -c 'echo hello'") { ContainerSettings = new BatchTaskContainerSettings("ubuntu") { @@ -5150,13 +4726,13 @@ public void Example_BatchClient_CreateTask_CreatesATaskWithExitConditions_Conven TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "cmd /c exit 3") + BatchTaskCreateOptions task = new BatchTaskCreateOptions("taskId", "cmd /c exit 3") { ExitConditions = new ExitConditions { ExitCodeRanges = {new ExitCodeRangeMapping(2, 4, new ExitOptions { -JobAction = BatchJobAction.Terminate, +JobAction = BatchJobActionKind.Terminate, })}, }, UserIdentity = new UserIdentity @@ -5179,13 +4755,13 @@ public async Task Example_BatchClient_CreateTask_CreatesATaskWithExitConditions_ TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskCreateContent task = new BatchTaskCreateContent("taskId", "cmd /c exit 3") + BatchTaskCreateOptions task = new BatchTaskCreateOptions("taskId", "cmd /c exit 3") { ExitConditions = new ExitConditions { ExitCodeRanges = {new ExitCodeRangeMapping(2, 4, new ExitOptions { -JobAction = BatchJobAction.Terminate, +JobAction = BatchJobActionKind.Terminate, })}, }, UserIdentity = new UserIdentity @@ -5246,7 +4822,7 @@ public void Example_BatchClient_CreateTask_CreatesATaskWithExtraSlotRequirement_ TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskCreateContent task = new BatchTaskCreateContent("task1", "cmd /c echo task1") + BatchTaskCreateOptions task = new BatchTaskCreateOptions("task1", "cmd /c echo task1") { RequiredSlots = 2, }; @@ -5261,7 +4837,7 @@ public async Task Example_BatchClient_CreateTask_CreatesATaskWithExtraSlotRequir TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskCreateContent task = new BatchTaskCreateContent("task1", "cmd /c echo task1") + BatchTaskCreateOptions task = new BatchTaskCreateOptions("task1", "cmd /c echo task1") { RequiredSlots = 2, }; @@ -5336,12 +4912,12 @@ public void Example_BatchClient_CreateTaskCollection_CreatesABasicCollectionOfTa TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] + BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateOptions[] { -new BatchTaskCreateContent("simple1", "cmd /c dir /s"), -new BatchTaskCreateContent("simple2", "cmd /c dir /s") +new BatchTaskCreateOptions("simple1", "cmd /c dir /s"), +new BatchTaskCreateOptions("simple2", "cmd /c dir /s") }); - Response response = client.CreateTaskCollection("jobId", taskCollection); + Response response = client.CreateTaskCollection("jobId", taskCollection); } [Test] @@ -5352,12 +4928,12 @@ public async Task Example_BatchClient_CreateTaskCollection_CreatesABasicCollecti TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] + BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateOptions[] { -new BatchTaskCreateContent("simple1", "cmd /c dir /s"), -new BatchTaskCreateContent("simple2", "cmd /c dir /s") +new BatchTaskCreateOptions("simple1", "cmd /c dir /s"), +new BatchTaskCreateOptions("simple2", "cmd /c dir /s") }); - Response response = await client.CreateTaskCollectionAsync("jobId", taskCollection); + Response response = await client.CreateTaskCollectionAsync("jobId", taskCollection); } [Test] @@ -5518,9 +5094,9 @@ public void Example_BatchClient_CreateTaskCollection_CreatesAComplexCollectionOf TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] + BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateOptions[] { -new BatchTaskCreateContent("complex1", "cmd /c dir /s") +new BatchTaskCreateOptions("complex1", "cmd /c dir /s") { ResourceFiles = {new ResourceFile { @@ -5534,7 +5110,7 @@ public void Example_BatchClient_CreateTaskCollection_CreatesAComplexCollectionOf { Value = "value2", }}, -AffinityInfo = new AffinityInfo("affinityId"), +AffinityInfo = new BatchAffinityInfo("affinityId"), Constraints = new BatchTaskConstraints { MaxWallClockTime = XmlConvert.ToTimeSpan("P1D"), @@ -5547,14 +5123,14 @@ public void Example_BatchClient_CreateTaskCollection_CreatesAComplexCollectionOf NumberOfInstances = 3, CommonResourceFiles = {new ResourceFile { -HttpUrl = "https://common.blob.core.windows.net/", +HttpUri = new Uri("https://common.blob.core.windows.net/"), FilePath = "common.exe", }}, }, }, -new BatchTaskCreateContent("simple3", "cmd /c dir /s") +new BatchTaskCreateOptions("simple3", "cmd /c dir /s") }); - Response response = client.CreateTaskCollection("jobId", taskCollection); + Response response = client.CreateTaskCollection("jobId", taskCollection); } [Test] @@ -5565,9 +5141,9 @@ public async Task Example_BatchClient_CreateTaskCollection_CreatesAComplexCollec TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateContent[] + BatchTaskGroup taskCollection = new BatchTaskGroup(new BatchTaskCreateOptions[] { -new BatchTaskCreateContent("complex1", "cmd /c dir /s") +new BatchTaskCreateOptions("complex1", "cmd /c dir /s") { ResourceFiles = {new ResourceFile { @@ -5581,7 +5157,7 @@ public async Task Example_BatchClient_CreateTaskCollection_CreatesAComplexCollec { Value = "value2", }}, -AffinityInfo = new AffinityInfo("affinityId"), +AffinityInfo = new BatchAffinityInfo("affinityId"), Constraints = new BatchTaskConstraints { MaxWallClockTime = XmlConvert.ToTimeSpan("P1D"), @@ -5594,14 +5170,14 @@ public async Task Example_BatchClient_CreateTaskCollection_CreatesAComplexCollec NumberOfInstances = 3, CommonResourceFiles = {new ResourceFile { -HttpUrl = "https://common.blob.core.windows.net/", +HttpUri = new Uri("https://common.blob.core.windows.net/"), FilePath = "common.exe", }}, }, }, -new BatchTaskCreateContent("simple3", "cmd /c dir /s") +new BatchTaskCreateOptions("simple3", "cmd /c dir /s") }); - Response response = await client.CreateTaskCollectionAsync("jobId", taskCollection); + Response response = await client.CreateTaskCollectionAsync("jobId", taskCollection); } [Test] @@ -5940,7 +5516,7 @@ public void Example_BatchClient_CreateNodeUser_NodeCreateUser_Convenience() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchNodeUserCreateContent user = new BatchNodeUserCreateContent("userName") + BatchNodeUserCreateOptions user = new BatchNodeUserCreateOptions("userName") { IsAdmin = false, ExpiryTime = DateTimeOffset.Parse("2017-08-01T00:00:00Z"), @@ -5957,7 +5533,7 @@ public async Task Example_BatchClient_CreateNodeUser_NodeCreateUser_Convenience_ TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchNodeUserCreateContent user = new BatchNodeUserCreateContent("userName") + BatchNodeUserCreateOptions user = new BatchNodeUserCreateOptions("userName") { IsAdmin = false, ExpiryTime = DateTimeOffset.Parse("2017-08-01T00:00:00Z"), @@ -6036,12 +5612,12 @@ public void Example_BatchClient_ReplaceNodeUser_NodeUpdateUser_Convenience() TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchNodeUserUpdateContent content = new BatchNodeUserUpdateContent + BatchNodeUserUpdateOptions updateOptions = new BatchNodeUserUpdateOptions { Password = "12345", ExpiryTime = DateTimeOffset.Parse("2016-11-27T00:45:48.7320857Z"), }; - Response response = client.ReplaceNodeUser("poolId", "tvm-1695681911_1-20161121t182739z", "userName", content); + Response response = client.ReplaceNodeUser("poolId", "tvm-1695681911_1-20161121t182739z", "userName", updateOptions); } [Test] @@ -6052,12 +5628,12 @@ public async Task Example_BatchClient_ReplaceNodeUser_NodeUpdateUser_Convenience TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - BatchNodeUserUpdateContent content = new BatchNodeUserUpdateContent + BatchNodeUserUpdateOptions updateOptions = new BatchNodeUserUpdateOptions { Password = "12345", ExpiryTime = DateTimeOffset.Parse("2016-11-27T00:45:48.7320857Z"), }; - Response response = await client.ReplaceNodeUserAsync("poolId", "tvm-1695681911_1-20161121t182739z", "userName", content); + Response response = await client.ReplaceNodeUserAsync("poolId", "tvm-1695681911_1-20161121t182739z", "userName", updateOptions); } [Test] @@ -6110,182 +5686,6 @@ public async Task Example_BatchClient_GetNode_NodeGet_Convenience_Async() Response response = await client.GetNodeAsync("poolId", "tvm-1695681911_2-20161122t193202z"); } - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_RebootNode_NodeReboot() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = null; - Response response = client.RebootNode("poolId", "tvm-1695681911_1-20161122t193202z", content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_RebootNode_NodeReboot_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = null; - Response response = await client.RebootNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z", content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_RebootNode_NodeReboot_Convenience() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = client.RebootNode("poolId", "tvm-1695681911_1-20161122t193202z"); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_RebootNode_NodeReboot_Convenience_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = await client.RebootNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z"); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_StartNode_NodeStart() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = client.StartNode("poolId", "tvm-1695681911_1-20161122t193202z"); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_StartNode_NodeStart_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = await client.StartNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z"); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_ReimageNode_NodeReimage() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = null; - Response response = client.ReimageNode("poolId", "tvm-1695681911_1-20161122t193202z", content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_ReimageNode_NodeReimage_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = null; - Response response = await client.ReimageNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z", content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_ReimageNode_NodeReimage_Convenience() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = client.ReimageNode("poolId", "tvm-1695681911_1-20161122t193202z"); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_ReimageNode_NodeReimage_Convenience_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = await client.ReimageNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z"); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_DeallocateNode_NodeDeallocate() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = null; - Response response = client.DeallocateNode("poolId", "tvm-1695681911_1-20161122t193202z", content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_DeallocateNode_NodeDeallocate_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - using RequestContent content = null; - Response response = await client.DeallocateNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z", content); - - Console.WriteLine(response.Status); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public void Example_BatchClient_DeallocateNode_NodeDeallocate_Convenience() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = client.DeallocateNode("poolId", "tvm-1695681911_1-20161122t193202z"); - } - - [Test] - [Ignore("Only validating compilation of examples")] - public async Task Example_BatchClient_DeallocateNode_NodeDeallocate_Convenience_Async() - { - Uri endpoint = new Uri(""); - TokenCredential credential = new DefaultAzureCredential(); - BatchClient client = new BatchClient(endpoint, credential); - - Response response = await client.DeallocateNodeAsync("poolId", "tvm-1695681911_1-20161122t193202z"); - } - [Test] [Ignore("Only validating compilation of examples")] public void Example_BatchClient_DisableNodeScheduling_NodeDisableScheduling() @@ -6462,8 +5862,8 @@ public void Example_BatchClient_UploadNodeLogs_UploadBatchServiceLogs_Convenienc TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - UploadBatchServiceLogsContent content = new UploadBatchServiceLogsContent("https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2017-12-09T18%3A51%3A00Z&sp=w&sv=2016-05-31&sr=c&sig", DateTimeOffset.Parse("2017-11-27T00:00:00Z")); - Response response = client.UploadNodeLogs("poolId", "tvm-1695681911_1-20161121t182739z", content); + UploadBatchServiceLogsOptions uploadOptions = new UploadBatchServiceLogsOptions(new Uri("https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2017-12-09T18%3A51%3A00Z&sp=w&sv=2016-05-31&sr=c&sig"), DateTimeOffset.Parse("2017-11-27T00:00:00Z")); + Response response = client.UploadNodeLogs("poolId", "tvm-1695681911_1-20161121t182739z", uploadOptions); } [Test] @@ -6474,8 +5874,8 @@ public async Task Example_BatchClient_UploadNodeLogs_UploadBatchServiceLogs_Conv TokenCredential credential = new DefaultAzureCredential(); BatchClient client = new BatchClient(endpoint, credential); - UploadBatchServiceLogsContent content = new UploadBatchServiceLogsContent("https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2017-12-09T18%3A51%3A00Z&sp=w&sv=2016-05-31&sr=c&sig", DateTimeOffset.Parse("2017-11-27T00:00:00Z")); - Response response = await client.UploadNodeLogsAsync("poolId", "tvm-1695681911_1-20161121t182739z", content); + UploadBatchServiceLogsOptions uploadOptions = new UploadBatchServiceLogsOptions(new Uri("https://somestorageacct.blob.core.windows.net/batch-compute-node-logs?se=2017-12-09T18%3A51%3A00Z&sp=w&sv=2016-05-31&sr=c&sig"), DateTimeOffset.Parse("2017-11-27T00:00:00Z")); + Response response = await client.UploadNodeLogsAsync("poolId", "tvm-1695681911_1-20161121t182739z", uploadOptions); } [Test] diff --git a/sdk/batch/Azure.Compute.Batch/tsp-location.yaml b/sdk/batch/Azure.Compute.Batch/tsp-location.yaml index 8e6c9fccab46..dfa636e3fa57 100644 --- a/sdk/batch/Azure.Compute.Batch/tsp-location.yaml +++ b/sdk/batch/Azure.Compute.Batch/tsp-location.yaml @@ -1,3 +1,4 @@ directory: specification/batch/Azure.Batch -commit: e7ed63002df7e9cc1d3e4cd139d76c4d7040acd3 +commit: da5f436da0537251f7336b56f1e2df48c634d147 repo: Azure/azure-rest-api-specs +additionalDirectories: