diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AIOpenAIClientBuilderExtensions.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AIOpenAIClientBuilderExtensions.cs
new file mode 100644
index 000000000000..bc32a7f2b1a4
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AIOpenAIClientBuilderExtensions.cs
@@ -0,0 +1,52 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using Azure;
+using Azure.AI.OpenAI;
+using Azure.Core.Extensions;
+
+namespace Microsoft.Extensions.Azure
+{
+ /// Extension methods to add to client builder.
+ public static partial class AIOpenAIClientBuilderExtensions
+ {
+ /// Registers a instance.
+ /// The builder to register with.
+ ///
+ /// Supported Cognitive Services endpoints (protocol and hostname, for example:
+ /// https://westus.api.cognitive.microsoft.com).
+ ///
+ /// A credential used to authenticate to an Azure Service.
+ public static IAzureClientBuilder AddOpenAIClient(this TBuilder builder, Uri endpoint, AzureKeyCredential credential)
+ where TBuilder : IAzureClientFactoryBuilder
+ {
+ return builder.RegisterClientFactory((options) => new OpenAIClient(endpoint, credential, options));
+ }
+
+ /// Registers a instance.
+ /// The builder to register with.
+ ///
+ /// Supported Cognitive Services endpoints (protocol and hostname, for example:
+ /// https://westus.api.cognitive.microsoft.com).
+ ///
+ public static IAzureClientBuilder AddOpenAIClient(this TBuilder builder, Uri endpoint)
+ where TBuilder : IAzureClientFactoryBuilderWithCredential
+ {
+ return builder.RegisterClientFactory((options, cred) => new OpenAIClient(endpoint, cred, options));
+ }
+
+ /// Registers a instance.
+ /// The builder to register with.
+ /// The configuration values.
+ public static IAzureClientBuilder AddOpenAIClient(this TBuilder builder, TConfiguration configuration)
+ where TBuilder : IAzureClientFactoryBuilderWithConfiguration
+ {
+ return builder.RegisterClientFactory(configuration);
+ }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AIOpenAIModelFactory.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AIOpenAIModelFactory.cs
new file mode 100644
index 000000000000..fa7196db6f7d
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AIOpenAIModelFactory.cs
@@ -0,0 +1,1620 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+
+namespace Azure.AI.OpenAI
+{
+ /// Model factory for models.
+ public static partial class AIOpenAIModelFactory
+ {
+ /// Initializes a new instance of .
+ ///
+ /// The audio data to transcribe. This must be the binary content of a file in one of the supported media formats:
+ /// flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.
+ ///
+ /// The optional filename or descriptive identifier to associate with with the audio data.
+ /// The requested format of the transcription response data, which will influence the content and detail of the result.
+ ///
+ /// The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code
+ /// such as 'en' or 'fr'.
+ /// Providing this known input language is optional but may improve the accuracy and/or latency of transcription.
+ ///
+ ///
+ /// An optional hint to guide the model's style or continue from a prior audio segment. The written language of the
+ /// prompt should match the primary spoken language of the audio data.
+ ///
+ ///
+ /// The sampling temperature, between 0 and 1.
+ /// Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
+ /// If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
+ ///
+ ///
+ /// The timestamp granularities to populate for this transcription.
+ /// `response_format` must be set `verbose_json` to use timestamp granularities.
+ /// Either or both of these options are supported: `word`, or `segment`.
+ /// Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.
+ ///
+ /// The model to use for this transcription request.
+ /// A new instance for mocking.
+ public static AudioTranscriptionOptions AudioTranscriptionOptions(Stream audioData = null, string filename = null, AudioTranscriptionFormat? responseFormat = null, string language = null, string prompt = null, float? temperature = null, IEnumerable timestampGranularities = null, string deploymentName = null)
+ {
+ timestampGranularities ??= new List();
+
+ return new AudioTranscriptionOptions(
+ audioData,
+ filename,
+ responseFormat,
+ language,
+ prompt,
+ temperature,
+ timestampGranularities?.ToList(),
+ deploymentName,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The 0-based index of this segment within a transcription.
+ /// The time at which this segment started relative to the beginning of the transcribed audio.
+ /// The time at which this segment ended relative to the beginning of the transcribed audio.
+ /// The transcribed text that was part of this audio segment.
+ /// The temperature score associated with this audio segment.
+ /// The average log probability associated with this audio segment.
+ /// The compression ratio of this audio segment.
+ /// The probability of no speech detection within this audio segment.
+ /// The token IDs matching the transcribed text in this audio segment.
+ ///
+ /// The seek position associated with the processing of this audio segment.
+ /// Seek positions are expressed as hundredths of seconds.
+ /// The model may process several segments from a single seek position, so while the seek position will never represent
+ /// a later time than the segment's start, the segment's start may represent a significantly later time than the
+ /// segment's associated seek position.
+ ///
+ /// A new instance for mocking.
+ public static AudioTranscriptionSegment AudioTranscriptionSegment(int id = default, TimeSpan start = default, TimeSpan end = default, string text = null, float temperature = default, float averageLogProbability = default, float compressionRatio = default, float noSpeechProbability = default, IEnumerable tokens = null, int seek = default)
+ {
+ tokens ??= new List();
+
+ return new AudioTranscriptionSegment(
+ id,
+ start,
+ end,
+ text,
+ temperature,
+ averageLogProbability,
+ compressionRatio,
+ noSpeechProbability,
+ tokens?.ToList(),
+ seek,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The textual content of the word.
+ /// The start time of the word relative to the beginning of the audio, expressed in seconds.
+ /// The end time of the word relative to the beginning of the audio, expressed in seconds.
+ /// A new instance for mocking.
+ public static AudioTranscriptionWord AudioTranscriptionWord(string word = null, TimeSpan start = default, TimeSpan end = default)
+ {
+ return new AudioTranscriptionWord(word, start, end, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ ///
+ /// The audio data to translate. This must be the binary content of a file in one of the supported media formats:
+ /// flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.
+ ///
+ /// The optional filename or descriptive identifier to associate with with the audio data.
+ /// The requested format of the translation response data, which will influence the content and detail of the result.
+ ///
+ /// An optional hint to guide the model's style or continue from a prior audio segment. The written language of the
+ /// prompt should match the primary spoken language of the audio data.
+ ///
+ ///
+ /// The sampling temperature, between 0 and 1.
+ /// Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
+ /// If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
+ ///
+ /// The model to use for this translation request.
+ /// A new instance for mocking.
+ public static AudioTranslationOptions AudioTranslationOptions(Stream audioData = null, string filename = null, AudioTranslationFormat? responseFormat = null, string prompt = null, float? temperature = null, string deploymentName = null)
+ {
+ return new AudioTranslationOptions(
+ audioData,
+ filename,
+ responseFormat,
+ prompt,
+ temperature,
+ deploymentName,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The 0-based index of this segment within a translation.
+ /// The time at which this segment started relative to the beginning of the translated audio.
+ /// The time at which this segment ended relative to the beginning of the translated audio.
+ /// The translated text that was part of this audio segment.
+ /// The temperature score associated with this audio segment.
+ /// The average log probability associated with this audio segment.
+ /// The compression ratio of this audio segment.
+ /// The probability of no speech detection within this audio segment.
+ /// The token IDs matching the translated text in this audio segment.
+ ///
+ /// The seek position associated with the processing of this audio segment.
+ /// Seek positions are expressed as hundredths of seconds.
+ /// The model may process several segments from a single seek position, so while the seek position will never represent
+ /// a later time than the segment's start, the segment's start may represent a significantly later time than the
+ /// segment's associated seek position.
+ ///
+ /// A new instance for mocking.
+ public static AudioTranslationSegment AudioTranslationSegment(int id = default, TimeSpan start = default, TimeSpan end = default, string text = null, float temperature = default, float averageLogProbability = default, float compressionRatio = default, float noSpeechProbability = default, IEnumerable tokens = null, int seek = default)
+ {
+ tokens ??= new List();
+
+ return new AudioTranslationSegment(
+ id,
+ start,
+ end,
+ text,
+ temperature,
+ averageLogProbability,
+ compressionRatio,
+ noSpeechProbability,
+ tokens?.ToList(),
+ seek,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// A unique identifier associated with this completions response.
+ ///
+ /// The first timestamp associated with generation activity for this completions response,
+ /// represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970.
+ ///
+ ///
+ /// Content filtering results for zero or more prompts in the request. In a streaming request,
+ /// results for different prompts may arrive at different times or in different orders.
+ ///
+ ///
+ /// The collection of completions choices associated with this completions response.
+ /// Generally, `n` choices are generated per provided prompt with a default value of 1.
+ /// Token limits and other settings may limit the number of choices generated.
+ ///
+ /// Usage information for tokens processed and generated as part of this completions operation.
+ ///
+ /// This fingerprint represents the backend configuration that the model runs with.
+ ///
+ /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.
+ ///
+ /// A new instance for mocking.
+ public static Completions Completions(string id = null, DateTimeOffset created = default, IEnumerable promptFilterResults = null, IEnumerable choices = null, CompletionsUsage usage = null, string systemFingerprint = null)
+ {
+ promptFilterResults ??= new List();
+ choices ??= new List();
+
+ return new Completions(
+ id,
+ created,
+ promptFilterResults?.ToList(),
+ choices?.ToList(),
+ usage,
+ systemFingerprint,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The index of this prompt in the set of prompt results.
+ /// Content filtering results for this prompt.
+ /// A new instance for mocking.
+ public static ContentFilterResultsForPrompt ContentFilterResultsForPrompt(int promptIndex = default, ContentFilterResultDetailsForPrompt contentFilterResults = null)
+ {
+ return new ContentFilterResultsForPrompt(promptIndex, contentFilterResults, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ ///
+ /// Describes language related to anatomical organs and genitals, romantic relationships,
+ /// acts portrayed in erotic or affectionate terms, physical sexual acts, including
+ /// those portrayed as an assault or a forced sexual violent act against one’s will,
+ /// prostitution, pornography, and abuse.
+ ///
+ ///
+ /// Describes language related to physical actions intended to hurt, injure, damage, or
+ /// kill someone or something; describes weapons, etc.
+ ///
+ ///
+ /// Describes language attacks or uses that include pejorative or discriminatory language
+ /// with reference to a person or identity group on the basis of certain differentiating
+ /// attributes of these groups including but not limited to race, ethnicity, nationality,
+ /// gender identity and expression, sexual orientation, religion, immigration status, ability
+ /// status, personal appearance, and body size.
+ ///
+ ///
+ /// Describes language related to physical actions intended to purposely hurt, injure,
+ /// or damage one’s body, or kill oneself.
+ ///
+ /// Describes whether profanity was detected.
+ /// Describes detection results against configured custom blocklists.
+ ///
+ /// Describes an error returned if the content filtering system is
+ /// down or otherwise unable to complete the operation in time.
+ ///
+ /// Whether a jailbreak attempt was detected in the prompt.
+ /// Whether an indirect attack was detected in the prompt.
+ /// A new instance for mocking.
+ public static ContentFilterResultDetailsForPrompt ContentFilterResultDetailsForPrompt(ContentFilterResult sexual = null, ContentFilterResult violence = null, ContentFilterResult hate = null, ContentFilterResult selfHarm = null, ContentFilterDetectionResult profanity = null, ContentFilterDetailedResults customBlocklists = null, ResponseError error = null, ContentFilterDetectionResult jailbreak = null, ContentFilterDetectionResult indirectAttack = null)
+ {
+ return new ContentFilterResultDetailsForPrompt(
+ sexual,
+ violence,
+ hate,
+ selfHarm,
+ profanity,
+ customBlocklists,
+ error,
+ jailbreak,
+ indirectAttack,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// A value indicating whether or not the content has been filtered.
+ /// Ratings for the intensity and risk level of filtered content.
+ /// A new instance for mocking.
+ public static ContentFilterResult ContentFilterResult(bool filtered = default, ContentFilterSeverity severity = default)
+ {
+ return new ContentFilterResult(filtered, severity, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// A value indicating whether or not the content has been filtered.
+ /// A value indicating whether detection occurred, irrespective of severity or whether the content was filtered.
+ /// A new instance for mocking.
+ public static ContentFilterDetectionResult ContentFilterDetectionResult(bool filtered = default, bool detected = default)
+ {
+ return new ContentFilterDetectionResult(filtered, detected, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// A value indicating whether or not the content has been filtered.
+ /// The collection of detailed blocklist result information.
+ /// A new instance for mocking.
+ public static ContentFilterDetailedResults ContentFilterDetailedResults(bool filtered = default, IEnumerable details = null)
+ {
+ details ??= new List();
+
+ return new ContentFilterDetailedResults(filtered, details?.ToList(), serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// A value indicating whether or not the content has been filtered.
+ /// The ID of the custom blocklist evaluated.
+ /// A new instance for mocking.
+ public static ContentFilterBlocklistIdResult ContentFilterBlocklistIdResult(bool filtered = default, string id = null)
+ {
+ return new ContentFilterBlocklistIdResult(filtered, id, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The generated text for a given completions prompt.
+ /// The ordered index associated with this completions choice.
+ ///
+ /// Information about the content filtering category (hate, sexual, violence, self_harm), if it
+ /// has been detected, as well as the severity level (very_low, low, medium, high-scale that
+ /// determines the intensity and risk level of harmful content) and if it has been filtered or not.
+ ///
+ /// The log probabilities model for tokens associated with this completions choice.
+ /// Reason for finishing.
+ /// A new instance for mocking.
+ public static Choice Choice(string text = null, int index = default, ContentFilterResultsForChoice contentFilterResults = null, CompletionsLogProbabilityModel logProbabilityModel = null, CompletionsFinishReason? finishReason = null)
+ {
+ return new Choice(
+ text,
+ index,
+ contentFilterResults,
+ logProbabilityModel,
+ finishReason,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ ///
+ /// Describes language related to anatomical organs and genitals, romantic relationships,
+ /// acts portrayed in erotic or affectionate terms, physical sexual acts, including
+ /// those portrayed as an assault or a forced sexual violent act against one’s will,
+ /// prostitution, pornography, and abuse.
+ ///
+ ///
+ /// Describes language related to physical actions intended to hurt, injure, damage, or
+ /// kill someone or something; describes weapons, etc.
+ ///
+ ///
+ /// Describes language attacks or uses that include pejorative or discriminatory language
+ /// with reference to a person or identity group on the basis of certain differentiating
+ /// attributes of these groups including but not limited to race, ethnicity, nationality,
+ /// gender identity and expression, sexual orientation, religion, immigration status, ability
+ /// status, personal appearance, and body size.
+ ///
+ ///
+ /// Describes language related to physical actions intended to purposely hurt, injure,
+ /// or damage one’s body, or kill oneself.
+ ///
+ /// Describes whether profanity was detected.
+ /// Describes detection results against configured custom blocklists.
+ ///
+ /// Describes an error returned if the content filtering system is
+ /// down or otherwise unable to complete the operation in time.
+ ///
+ /// Information about detection of protected text material.
+ /// Information about detection of protected code material.
+ /// Information about detection of ungrounded material.
+ /// A new instance for mocking.
+ public static ContentFilterResultsForChoice ContentFilterResultsForChoice(ContentFilterResult sexual = null, ContentFilterResult violence = null, ContentFilterResult hate = null, ContentFilterResult selfHarm = null, ContentFilterDetectionResult profanity = null, ContentFilterDetailedResults customBlocklists = null, ResponseError error = null, ContentFilterDetectionResult protectedMaterialText = null, ContentFilterCitedDetectionResult protectedMaterialCode = null, ContentFilterCompletionTextSpanResult ungroundedMaterial = null)
+ {
+ return new ContentFilterResultsForChoice(
+ sexual,
+ violence,
+ hate,
+ selfHarm,
+ profanity,
+ customBlocklists,
+ error,
+ protectedMaterialText,
+ protectedMaterialCode,
+ ungroundedMaterial,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// A value indicating whether or not the content has been filtered.
+ /// A value indicating whether detection occurred, irrespective of severity or whether the content was filtered.
+ /// The internet location associated with the detection.
+ /// The license description associated with the detection.
+ /// A new instance for mocking.
+ public static ContentFilterCitedDetectionResult ContentFilterCitedDetectionResult(bool filtered = default, bool detected = default, Uri url = null, string license = null)
+ {
+ return new ContentFilterCitedDetectionResult(filtered, detected, url, license, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// A value indicating whether or not the content has been filtered.
+ /// A value indicating whether detection occurred, irrespective of severity or whether the content was filtered.
+ /// The collection of completion text spans.
+ /// A new instance for mocking.
+ public static ContentFilterCompletionTextSpanResult ContentFilterCompletionTextSpanResult(bool filtered = default, bool detected = default, IEnumerable details = null)
+ {
+ details ??= new List();
+
+ return new ContentFilterCompletionTextSpanResult(filtered, detected, details?.ToList(), serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// Offset of the UTF32 code point which begins the span.
+ ///
+ /// Offset of the first UTF32 code point which is excluded from the span.
+ /// This field is always equal to completion_start_offset for empty spans.
+ /// This field is always larger than completion_start_offset for non-empty spans.
+ ///
+ /// A new instance for mocking.
+ public static ContentFilterCompletionTextSpan ContentFilterCompletionTextSpan(int completionStartOffset = default, int completionEndOffset = default)
+ {
+ return new ContentFilterCompletionTextSpan(completionStartOffset, completionEndOffset, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The textual forms of tokens evaluated in this probability model.
+ /// A collection of log probability values for the tokens in this completions data.
+ /// A mapping of tokens to maximum log probability values in this completions data.
+ /// The text offsets associated with tokens in this completions data.
+ /// A new instance for mocking.
+ public static CompletionsLogProbabilityModel CompletionsLogProbabilityModel(IEnumerable tokens = null, IEnumerable tokenLogProbabilities = null, IEnumerable> topLogProbabilities = null, IEnumerable textOffsets = null)
+ {
+ tokens ??= new List();
+ tokenLogProbabilities ??= new List();
+ topLogProbabilities ??= new List>();
+ textOffsets ??= new List();
+
+ return new CompletionsLogProbabilityModel(tokens?.ToList(), tokenLogProbabilities?.ToList(), topLogProbabilities?.ToList(), textOffsets?.ToList(), serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The number of tokens generated across all completions emissions.
+ /// The number of tokens in the provided prompts for the completions request.
+ /// The total number of tokens processed for the completions request and response.
+ /// Details of the prompt tokens.
+ /// Breakdown of tokens used in a completion.
+ /// A new instance for mocking.
+ public static CompletionsUsage CompletionsUsage(int completionTokens = default, int promptTokens = default, int totalTokens = default, CompletionsUsagePromptTokensDetails promptTokensDetails = null, CompletionsUsageCompletionTokensDetails completionTokensDetails = null)
+ {
+ return new CompletionsUsage(
+ completionTokens,
+ promptTokens,
+ totalTokens,
+ promptTokensDetails,
+ completionTokensDetails,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// Audio input tokens present in the prompt.
+ /// Cached tokens present in the prompt.
+ /// A new instance for mocking.
+ public static CompletionsUsagePromptTokensDetails CompletionsUsagePromptTokensDetails(int? audioTokens = null, int? cachedTokens = null)
+ {
+ return new CompletionsUsagePromptTokensDetails(audioTokens, cachedTokens, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ ///
+ /// When using Predicted Outputs, the number of tokens in the
+ /// prediction that appeared in the completion.
+ ///
+ /// Audio input tokens generated by the model.
+ /// Tokens generated by the model for reasoning.
+ ///
+ /// When using Predicted Outputs, the number of tokens in the
+ /// prediction that did not appear in the completion. However, like
+ /// reasoning tokens, these tokens are still counted in the total
+ /// completion tokens for purposes of billing, output, and context
+ /// window limits.
+ ///
+ /// A new instance for mocking.
+ public static CompletionsUsageCompletionTokensDetails CompletionsUsageCompletionTokensDetails(int? acceptedPredictionTokens = null, int? audioTokens = null, int? reasoningTokens = null, int? rejectedPredictionTokens = null)
+ {
+ return new CompletionsUsageCompletionTokensDetails(acceptedPredictionTokens, audioTokens, reasoningTokens, rejectedPredictionTokens, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The contents of the system message.
+ /// An optional name for the participant.
+ /// A new instance for mocking.
+ public static ChatRequestSystemMessage ChatRequestSystemMessage(BinaryData content = null, string name = null)
+ {
+ return new ChatRequestSystemMessage(ChatRole.System, serializedAdditionalRawData: null, content, name);
+ }
+
+ /// Initializes a new instance of .
+ /// The content of the message.
+ /// A new instance for mocking.
+ public static ChatMessageTextContentItem ChatMessageTextContentItem(string text = null)
+ {
+ return new ChatMessageTextContentItem("text", serializedAdditionalRawData: null, text);
+ }
+
+ /// Initializes a new instance of .
+ /// The refusal message.
+ /// A new instance for mocking.
+ public static ChatMessageRefusalContentItem ChatMessageRefusalContentItem(string refusal = null)
+ {
+ return new ChatMessageRefusalContentItem("refusal", serializedAdditionalRawData: null, refusal);
+ }
+
+ /// Initializes a new instance of .
+ /// An internet location, which must be accessible to the model,from which the image may be retrieved.
+ /// A new instance for mocking.
+ public static ChatMessageImageContentItem ChatMessageImageContentItem(ChatMessageImageUrl imageUrl = null)
+ {
+ return new ChatMessageImageContentItem("image_url", serializedAdditionalRawData: null, imageUrl);
+ }
+
+ /// Initializes a new instance of .
+ /// The URL of the image.
+ ///
+ /// The evaluation quality setting to use, which controls relative prioritization of speed, token consumption, and
+ /// accuracy.
+ ///
+ /// A new instance for mocking.
+ public static ChatMessageImageUrl ChatMessageImageUrl(Uri url = null, ChatMessageImageDetailLevel? detail = null)
+ {
+ return new ChatMessageImageUrl(url, detail, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The audio data.
+ /// A new instance for mocking.
+ public static ChatMessageAudioContentItem ChatMessageAudioContentItem(InputAudioContent inputAudio = null)
+ {
+ return new ChatMessageAudioContentItem("input_audio", serializedAdditionalRawData: null, inputAudio);
+ }
+
+ /// Initializes a new instance of .
+ /// An array of content parts with a defined type. For developer messages, only type `text` is supported.
+ /// An optional name for the participant. Provides the model information to differentiate between participants of the same role.
+ /// A new instance for mocking.
+ public static ChatRequestDeveloperMessage ChatRequestDeveloperMessage(BinaryData content = null, string name = null)
+ {
+ return new ChatRequestDeveloperMessage(ChatRole.Developer, serializedAdditionalRawData: null, content, name);
+ }
+
+ /// Initializes a new instance of .
+ /// The contents of the user message, with available input types varying by selected model.
+ /// An optional name for the participant.
+ /// A new instance for mocking.
+ public static ChatRequestUserMessage ChatRequestUserMessage(BinaryData content = null, string name = null)
+ {
+ return new ChatRequestUserMessage(ChatRole.User, serializedAdditionalRawData: null, content, name);
+ }
+
+ /// Initializes a new instance of .
+ /// The content of the message.
+ /// An optional name for the participant.
+ ///
+ /// The tool calls that must be resolved and have their outputs appended to subsequent input messages for the chat
+ /// completions request to resolve as configured.
+ /// Please note is the base class. According to the scenario, a derived class of the base class might need to be assigned here, or this property needs to be casted to one of the possible derived classes.
+ /// The available derived classes include .
+ ///
+ ///
+ /// The function call that must be resolved and have its output appended to subsequent input messages for the chat
+ /// completions request to resolve as configured.
+ ///
+ /// The refusal message by the assistant.
+ /// A new instance for mocking.
+ public static ChatRequestAssistantMessage ChatRequestAssistantMessage(BinaryData content = null, string name = null, IEnumerable toolCalls = null, FunctionCall functionCall = null, string refusal = null)
+ {
+ toolCalls ??= new List();
+
+ return new ChatRequestAssistantMessage(
+ ChatRole.Assistant,
+ serializedAdditionalRawData: null,
+ content,
+ name,
+ toolCalls?.ToList(),
+ functionCall,
+ refusal);
+ }
+
+ /// Initializes a new instance of .
+ /// The content of the message.
+ /// The ID of the tool call resolved by the provided content.
+ /// A new instance for mocking.
+ public static ChatRequestToolMessage ChatRequestToolMessage(BinaryData content = null, string toolCallId = null)
+ {
+ return new ChatRequestToolMessage(ChatRole.Tool, serializedAdditionalRawData: null, content, toolCallId);
+ }
+
+ /// Initializes a new instance of .
+ /// The name of the function that was called to produce output.
+ /// The output of the function as requested by the function call.
+ /// A new instance for mocking.
+ public static ChatRequestFunctionMessage ChatRequestFunctionMessage(string name = null, string content = null)
+ {
+ return new ChatRequestFunctionMessage(ChatRole.Function, serializedAdditionalRawData: null, name, content);
+ }
+
+ /// Initializes a new instance of .
+ /// The name of the function to be called.
+ ///
+ /// A description of what the function does. The model will use this description when selecting the function and
+ /// interpreting its parameters.
+ ///
+ /// The parameters the function accepts, described as a JSON Schema object.
+ /// A new instance for mocking.
+ public static FunctionDefinition FunctionDefinition(string name = null, string description = null, BinaryData parameters = null)
+ {
+ return new FunctionDefinition(name, description, parameters, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The parameters to use when configuring Azure Search.
+ /// A new instance for mocking.
+ public static AzureSearchChatExtensionConfiguration AzureSearchChatExtensionConfiguration(AzureSearchChatExtensionParameters parameters = null)
+ {
+ return new AzureSearchChatExtensionConfiguration(AzureChatExtensionType.AzureSearch, serializedAdditionalRawData: null, parameters);
+ }
+
+ /// Initializes a new instance of .
+ /// The configured top number of documents to feature for the configured query.
+ /// Whether queries should be restricted to use of indexed data.
+ /// The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer.
+ ///
+ /// The max number of rewritten queries should be send to search provider for one user message. If not specified,
+ /// the system will decide the number of queries to send.
+ ///
+ ///
+ /// If specified as true, the system will allow partial search results to be used and the request fails if all the queries fail.
+ /// If not specified, or specified as false, the request will fail if any search query fails.
+ ///
+ /// The included properties of the output context. If not specified, the default value is `citations` and `intent`.
+ ///
+ /// The authentication method to use when accessing the defined data source.
+ /// Each data source type supports a specific set of available authentication methods; please see the documentation of
+ /// the data source for supported mechanisms.
+ /// If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential)
+ /// authentication.
+ /// Please note is the base class. According to the scenario, a derived class of the base class might need to be assigned here, or this property needs to be casted to one of the possible derived classes.
+ /// The available derived classes include , , , , , , and .
+ ///
+ /// The absolute endpoint path for the Azure Cognitive Search resource to use.
+ /// The name of the index to use as available in the referenced Azure Cognitive Search resource.
+ /// Customized field mapping behavior to use when interacting with the search index.
+ /// The query type to use with Azure Cognitive Search.
+ /// The additional semantic configuration for the query.
+ /// Search filter.
+ ///
+ /// The embedding dependency for vector search.
+ /// Please note is the base class. According to the scenario, a derived class of the base class might need to be assigned here, or this property needs to be casted to one of the possible derived classes.
+ /// The available derived classes include , , and .
+ ///
+ /// A new instance for mocking.
+ public static AzureSearchChatExtensionParameters AzureSearchChatExtensionParameters(int? documentCount = null, bool? shouldRestrictResultScope = null, int? strictness = null, int? maxSearchQueries = null, bool? allowPartialResult = null, IEnumerable includeContexts = null, OnYourDataAuthenticationOptions authentication = null, Uri searchEndpoint = null, string indexName = null, AzureSearchIndexFieldMappingOptions fieldMappingOptions = null, AzureSearchQueryType? queryType = null, string semanticConfiguration = null, string filter = null, OnYourDataVectorizationSource embeddingDependency = null)
+ {
+ includeContexts ??= new List();
+
+ return new AzureSearchChatExtensionParameters(
+ documentCount,
+ shouldRestrictResultScope,
+ strictness,
+ maxSearchQueries,
+ allowPartialResult,
+ includeContexts?.ToList(),
+ authentication,
+ searchEndpoint,
+ indexName,
+ fieldMappingOptions,
+ queryType,
+ semanticConfiguration,
+ filter,
+ embeddingDependency,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The API key to use for authentication.
+ /// A new instance for mocking.
+ public static OnYourDataApiKeyAuthenticationOptions OnYourDataApiKeyAuthenticationOptions(string key = null)
+ {
+ return new OnYourDataApiKeyAuthenticationOptions(OnYourDataAuthenticationType.ApiKey, serializedAdditionalRawData: null, key);
+ }
+
+ /// Initializes a new instance of .
+ /// The connection string to use for authentication.
+ /// A new instance for mocking.
+ public static OnYourDataConnectionStringAuthenticationOptions OnYourDataConnectionStringAuthenticationOptions(string connectionString = null)
+ {
+ return new OnYourDataConnectionStringAuthenticationOptions(OnYourDataAuthenticationType.ConnectionString, serializedAdditionalRawData: null, connectionString);
+ }
+
+ /// Initializes a new instance of .
+ /// The key to use for authentication.
+ /// The key ID to use for authentication.
+ /// A new instance for mocking.
+ public static OnYourDataKeyAndKeyIdAuthenticationOptions OnYourDataKeyAndKeyIdAuthenticationOptions(string key = null, string keyId = null)
+ {
+ return new OnYourDataKeyAndKeyIdAuthenticationOptions(OnYourDataAuthenticationType.KeyAndKeyId, serializedAdditionalRawData: null, key, keyId);
+ }
+
+ /// Initializes a new instance of .
+ /// The encoded API key to use for authentication.
+ /// A new instance for mocking.
+ public static OnYourDataEncodedApiKeyAuthenticationOptions OnYourDataEncodedApiKeyAuthenticationOptions(string encodedApiKey = null)
+ {
+ return new OnYourDataEncodedApiKeyAuthenticationOptions(OnYourDataAuthenticationType.EncodedApiKey, serializedAdditionalRawData: null, encodedApiKey);
+ }
+
+ /// Initializes a new instance of .
+ /// The username.
+ /// The password.
+ /// A new instance for mocking.
+ public static OnYourDataUsernameAndPasswordAuthenticationOptions OnYourDataUsernameAndPasswordAuthenticationOptions(string username = null, string password = null)
+ {
+ return new OnYourDataUsernameAndPasswordAuthenticationOptions(OnYourDataAuthenticationType.UsernameAndPassword, serializedAdditionalRawData: null, username, password);
+ }
+
+ /// Initializes a new instance of .
+ /// The access token to use for authentication.
+ /// A new instance for mocking.
+ public static OnYourDataAccessTokenAuthenticationOptions OnYourDataAccessTokenAuthenticationOptions(string accessToken = null)
+ {
+ return new OnYourDataAccessTokenAuthenticationOptions(OnYourDataAuthenticationType.AccessToken, serializedAdditionalRawData: null, accessToken);
+ }
+
+ /// Initializes a new instance of .
+ /// The resource ID of the user-assigned managed identity to use for authentication.
+ /// A new instance for mocking.
+ public static OnYourDataUserAssignedManagedIdentityAuthenticationOptions OnYourDataUserAssignedManagedIdentityAuthenticationOptions(string managedIdentityResourceId = null)
+ {
+ return new OnYourDataUserAssignedManagedIdentityAuthenticationOptions(OnYourDataAuthenticationType.UserAssignedManagedIdentity, serializedAdditionalRawData: null, managedIdentityResourceId);
+ }
+
+ /// Initializes a new instance of .
+ /// Specifies the resource endpoint URL from which embeddings should be retrieved. It should be in the format of https://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/YOUR_DEPLOYMENT_NAME/embeddings. The api-version query parameter is not allowed.
+ ///
+ /// Specifies the authentication options to use when retrieving embeddings from the specified endpoint.
+ /// Please note is the base class. According to the scenario, a derived class of the base class might need to be assigned here, or this property needs to be casted to one of the possible derived classes.
+ /// The available derived classes include and .
+ ///
+ /// A new instance for mocking.
+ public static OnYourDataEndpointVectorizationSource OnYourDataEndpointVectorizationSource(Uri endpoint = null, OnYourDataVectorSearchAuthenticationOptions authentication = null)
+ {
+ return new OnYourDataEndpointVectorizationSource(OnYourDataVectorizationSourceType.Endpoint, serializedAdditionalRawData: null, endpoint, authentication);
+ }
+
+ /// Initializes a new instance of .
+ /// The API key to use for authentication.
+ /// A new instance for mocking.
+ public static OnYourDataVectorSearchApiKeyAuthenticationOptions OnYourDataVectorSearchApiKeyAuthenticationOptions(string key = null)
+ {
+ return new OnYourDataVectorSearchApiKeyAuthenticationOptions(OnYourDataVectorSearchAuthenticationType.ApiKey, serializedAdditionalRawData: null, key);
+ }
+
+ /// Initializes a new instance of .
+ /// The access token to use for authentication.
+ /// A new instance for mocking.
+ public static OnYourDataVectorSearchAccessTokenAuthenticationOptions OnYourDataVectorSearchAccessTokenAuthenticationOptions(string accessToken = null)
+ {
+ return new OnYourDataVectorSearchAccessTokenAuthenticationOptions(OnYourDataVectorSearchAuthenticationType.AccessToken, serializedAdditionalRawData: null, accessToken);
+ }
+
+ /// Initializes a new instance of .
+ /// The embedding model deployment name within the same Azure OpenAI resource. This enables you to use vector search without Azure OpenAI api-key and without Azure OpenAI public network access.
+ /// The number of dimensions the embeddings should have. Only supported in `text-embedding-3` and later models.
+ /// A new instance for mocking.
+ public static OnYourDataDeploymentNameVectorizationSource OnYourDataDeploymentNameVectorizationSource(string deploymentName = null, int? dimensions = null)
+ {
+ return new OnYourDataDeploymentNameVectorizationSource(OnYourDataVectorizationSourceType.DeploymentName, serializedAdditionalRawData: null, deploymentName, dimensions);
+ }
+
+ /// Initializes a new instance of .
+ /// The embedding model ID build inside the search service. Currently only supported by Elasticsearch®.
+ /// A new instance for mocking.
+ public static OnYourDataModelIdVectorizationSource OnYourDataModelIdVectorizationSource(string modelId = null)
+ {
+ return new OnYourDataModelIdVectorizationSource(OnYourDataVectorizationSourceType.ModelId, serializedAdditionalRawData: null, modelId);
+ }
+
+ /// Initializes a new instance of .
+ /// The parameters to use when configuring Azure OpenAI CosmosDB chat extensions.
+ /// A new instance for mocking.
+ public static AzureCosmosDBChatExtensionConfiguration AzureCosmosDBChatExtensionConfiguration(AzureCosmosDBChatExtensionParameters parameters = null)
+ {
+ return new AzureCosmosDBChatExtensionConfiguration(AzureChatExtensionType.AzureCosmosDB, serializedAdditionalRawData: null, parameters);
+ }
+
+ /// Initializes a new instance of .
+ /// The configured top number of documents to feature for the configured query.
+ /// Whether queries should be restricted to use of indexed data.
+ /// The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer.
+ ///
+ /// The max number of rewritten queries should be send to search provider for one user message. If not specified,
+ /// the system will decide the number of queries to send.
+ ///
+ ///
+ /// If specified as true, the system will allow partial search results to be used and the request fails if all the queries fail.
+ /// If not specified, or specified as false, the request will fail if any search query fails.
+ ///
+ /// The included properties of the output context. If not specified, the default value is `citations` and `intent`.
+ ///
+ /// The authentication method to use when accessing the defined data source.
+ /// Each data source type supports a specific set of available authentication methods; please see the documentation of
+ /// the data source for supported mechanisms.
+ /// If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential)
+ /// authentication.
+ /// Please note is the base class. According to the scenario, a derived class of the base class might need to be assigned here, or this property needs to be casted to one of the possible derived classes.
+ /// The available derived classes include , , , , , , and .
+ ///
+ /// The MongoDB vCore database name to use with Azure Cosmos DB.
+ /// The name of the Azure Cosmos DB resource container.
+ /// The MongoDB vCore index name to use with Azure Cosmos DB.
+ /// Customized field mapping behavior to use when interacting with the search index.
+ ///
+ /// The embedding dependency for vector search.
+ /// Please note is the base class. According to the scenario, a derived class of the base class might need to be assigned here, or this property needs to be casted to one of the possible derived classes.
+ /// The available derived classes include , , and .
+ ///
+ /// A new instance for mocking.
+ public static AzureCosmosDBChatExtensionParameters AzureCosmosDBChatExtensionParameters(int? documentCount = null, bool? shouldRestrictResultScope = null, int? strictness = null, int? maxSearchQueries = null, bool? allowPartialResult = null, IEnumerable includeContexts = null, OnYourDataAuthenticationOptions authentication = null, string databaseName = null, string containerName = null, string indexName = null, AzureCosmosDBFieldMappingOptions fieldMappingOptions = null, OnYourDataVectorizationSource embeddingDependency = null)
+ {
+ includeContexts ??= new List();
+
+ return new AzureCosmosDBChatExtensionParameters(
+ documentCount,
+ shouldRestrictResultScope,
+ strictness,
+ maxSearchQueries,
+ allowPartialResult,
+ includeContexts?.ToList(),
+ authentication,
+ databaseName,
+ containerName,
+ indexName,
+ fieldMappingOptions,
+ embeddingDependency,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The parameters to use when configuring Elasticsearch®.
+ /// A new instance for mocking.
+ public static ElasticsearchChatExtensionConfiguration ElasticsearchChatExtensionConfiguration(ElasticsearchChatExtensionParameters parameters = null)
+ {
+ return new ElasticsearchChatExtensionConfiguration(AzureChatExtensionType.Elasticsearch, serializedAdditionalRawData: null, parameters);
+ }
+
+ /// Initializes a new instance of .
+ /// The configured top number of documents to feature for the configured query.
+ /// Whether queries should be restricted to use of indexed data.
+ /// The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer.
+ ///
+ /// The max number of rewritten queries should be send to search provider for one user message. If not specified,
+ /// the system will decide the number of queries to send.
+ ///
+ ///
+ /// If specified as true, the system will allow partial search results to be used and the request fails if all the queries fail.
+ /// If not specified, or specified as false, the request will fail if any search query fails.
+ ///
+ /// The included properties of the output context. If not specified, the default value is `citations` and `intent`.
+ ///
+ /// The authentication method to use when accessing the defined data source.
+ /// Each data source type supports a specific set of available authentication methods; please see the documentation of
+ /// the data source for supported mechanisms.
+ /// If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential)
+ /// authentication.
+ /// Please note is the base class. According to the scenario, a derived class of the base class might need to be assigned here, or this property needs to be casted to one of the possible derived classes.
+ /// The available derived classes include , , , , , , and .
+ ///
+ /// The endpoint of Elasticsearch®.
+ /// The index name of Elasticsearch®.
+ /// The index field mapping options of Elasticsearch®.
+ /// The query type of Elasticsearch®.
+ ///
+ /// The embedding dependency for vector search.
+ /// Please note is the base class. According to the scenario, a derived class of the base class might need to be assigned here, or this property needs to be casted to one of the possible derived classes.
+ /// The available derived classes include , , and .
+ ///
+ /// A new instance for mocking.
+ public static ElasticsearchChatExtensionParameters ElasticsearchChatExtensionParameters(int? documentCount = null, bool? shouldRestrictResultScope = null, int? strictness = null, int? maxSearchQueries = null, bool? allowPartialResult = null, IEnumerable includeContexts = null, OnYourDataAuthenticationOptions authentication = null, Uri endpoint = null, string indexName = null, ElasticsearchIndexFieldMappingOptions fieldMappingOptions = null, ElasticsearchQueryType? queryType = null, OnYourDataVectorizationSource embeddingDependency = null)
+ {
+ includeContexts ??= new List();
+
+ return new ElasticsearchChatExtensionParameters(
+ documentCount,
+ shouldRestrictResultScope,
+ strictness,
+ maxSearchQueries,
+ allowPartialResult,
+ includeContexts?.ToList(),
+ authentication,
+ endpoint,
+ indexName,
+ fieldMappingOptions,
+ queryType,
+ embeddingDependency,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The parameters for the MongoDB chat extension.
+ /// A new instance for mocking.
+ public static MongoDBChatExtensionConfiguration MongoDBChatExtensionConfiguration(MongoDBChatExtensionParameters parameters = null)
+ {
+ return new MongoDBChatExtensionConfiguration(AzureChatExtensionType.MongoDB, serializedAdditionalRawData: null, parameters);
+ }
+
+ /// Initializes a new instance of .
+ /// The configured top number of documents to feature for the configured query.
+ /// Whether queries should be restricted to use of indexed data.
+ /// The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer.
+ ///
+ /// The max number of rewritten queries should be send to search provider for one user message. If not specified,
+ /// the system will decide the number of queries to send.
+ ///
+ ///
+ /// If specified as true, the system will allow partial search results to be used and the request fails if all the queries fail.
+ /// If not specified, or specified as false, the request will fail if any search query fails.
+ ///
+ /// The included properties of the output context. If not specified, the default value is `citations` and `intent`.
+ ///
+ /// The authentication method to use when accessing the defined data source.
+ /// Each data source type supports a specific set of available authentication methods; please see the documentation of
+ /// the data source for supported mechanisms.
+ /// If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential)
+ /// authentication.
+ ///
+ /// The endpoint name for MongoDB.
+ /// The collection name for MongoDB.
+ /// The database name for MongoDB.
+ /// The app name for MongoDB.
+ /// The name of the MongoDB index.
+ ///
+ /// Field mappings to apply to data used by the MongoDB data source.
+ /// Note that content and vector field mappings are required for MongoDB.
+ ///
+ /// The vectorization source to use with the MongoDB chat extension.
+ /// A new instance for mocking.
+ public static MongoDBChatExtensionParameters MongoDBChatExtensionParameters(int? documentCount = null, bool? shouldRestrictResultScope = null, int? strictness = null, int? maxSearchQueries = null, bool? allowPartialResult = null, IEnumerable includeContexts = null, OnYourDataUsernameAndPasswordAuthenticationOptions authentication = null, string endpoint = null, string collectionName = null, string databaseName = null, string appName = null, string indexName = null, MongoDBChatExtensionParametersFieldsMapping fieldsMapping = null, BinaryData embeddingDependency = null)
+ {
+ includeContexts ??= new List();
+
+ return new MongoDBChatExtensionParameters(
+ documentCount,
+ shouldRestrictResultScope,
+ strictness,
+ maxSearchQueries,
+ allowPartialResult,
+ includeContexts?.ToList(),
+ authentication,
+ endpoint,
+ collectionName,
+ databaseName,
+ appName,
+ indexName,
+ fieldsMapping,
+ embeddingDependency,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The parameters to use when configuring Azure OpenAI chat extensions.
+ /// A new instance for mocking.
+ public static PineconeChatExtensionConfiguration PineconeChatExtensionConfiguration(PineconeChatExtensionParameters parameters = null)
+ {
+ return new PineconeChatExtensionConfiguration(AzureChatExtensionType.Pinecone, serializedAdditionalRawData: null, parameters);
+ }
+
+ /// Initializes a new instance of .
+ /// The configured top number of documents to feature for the configured query.
+ /// Whether queries should be restricted to use of indexed data.
+ /// The configured strictness of the search relevance filtering. The higher of strictness, the higher of the precision but lower recall of the answer.
+ ///
+ /// The max number of rewritten queries should be send to search provider for one user message. If not specified,
+ /// the system will decide the number of queries to send.
+ ///
+ ///
+ /// If specified as true, the system will allow partial search results to be used and the request fails if all the queries fail.
+ /// If not specified, or specified as false, the request will fail if any search query fails.
+ ///
+ /// The included properties of the output context. If not specified, the default value is `citations` and `intent`.
+ ///
+ /// The authentication method to use when accessing the defined data source.
+ /// Each data source type supports a specific set of available authentication methods; please see the documentation of
+ /// the data source for supported mechanisms.
+ /// If not otherwise provided, On Your Data will attempt to use System Managed Identity (default credential)
+ /// authentication.
+ /// Please note is the base class. According to the scenario, a derived class of the base class might need to be assigned here, or this property needs to be casted to one of the possible derived classes.
+ /// The available derived classes include , , , , , , and .
+ ///
+ /// The environment name of Pinecone.
+ /// The name of the Pinecone database index.
+ /// Customized field mapping behavior to use when interacting with the search index.
+ ///
+ /// The embedding dependency for vector search.
+ /// Please note is the base class. According to the scenario, a derived class of the base class might need to be assigned here, or this property needs to be casted to one of the possible derived classes.
+ /// The available derived classes include , , and .
+ ///
+ /// A new instance for mocking.
+ public static PineconeChatExtensionParameters PineconeChatExtensionParameters(int? documentCount = null, bool? shouldRestrictResultScope = null, int? strictness = null, int? maxSearchQueries = null, bool? allowPartialResult = null, IEnumerable includeContexts = null, OnYourDataAuthenticationOptions authentication = null, string environmentName = null, string indexName = null, PineconeFieldMappingOptions fieldMappingOptions = null, OnYourDataVectorizationSource embeddingDependency = null)
+ {
+ includeContexts ??= new List();
+
+ return new PineconeChatExtensionParameters(
+ documentCount,
+ shouldRestrictResultScope,
+ strictness,
+ maxSearchQueries,
+ allowPartialResult,
+ includeContexts?.ToList(),
+ authentication,
+ environmentName,
+ indexName,
+ fieldMappingOptions,
+ embeddingDependency,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ ///
+ /// A new instance for mocking.
+ public static ChatCompletionsJsonSchemaResponseFormat ChatCompletionsJsonSchemaResponseFormat(ChatCompletionsJsonSchemaResponseFormatJsonSchema jsonSchema = null)
+ {
+ return new ChatCompletionsJsonSchemaResponseFormat("json_schema", serializedAdditionalRawData: null, jsonSchema);
+ }
+
+ /// Initializes a new instance of .
+ /// A description of what the response format is for, used by the model to determine how to respond in the format.
+ /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
+ ///
+ /// Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs).
+ /// A new instance for mocking.
+ public static ChatCompletionsJsonSchemaResponseFormatJsonSchema ChatCompletionsJsonSchemaResponseFormatJsonSchema(string description = null, string name = null, BinaryData schema = null, bool? strict = null)
+ {
+ return new ChatCompletionsJsonSchemaResponseFormatJsonSchema(description, name, schema, strict, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The function definition details for the function tool.
+ /// A new instance for mocking.
+ public static ChatCompletionsFunctionToolDefinition ChatCompletionsFunctionToolDefinition(ChatCompletionsFunctionToolDefinitionFunction function = null)
+ {
+ return new ChatCompletionsFunctionToolDefinition("function", serializedAdditionalRawData: null, function);
+ }
+
+ /// Initializes a new instance of .
+ /// A description of what the function does, used by the model to choose when and how to call the function.
+ /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
+ ///
+ /// Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling).
+ /// A new instance for mocking.
+ public static ChatCompletionsFunctionToolDefinitionFunction ChatCompletionsFunctionToolDefinitionFunction(string description = null, string name = null, BinaryData parameters = null, bool? strict = null)
+ {
+ return new ChatCompletionsFunctionToolDefinitionFunction(description, name, parameters, strict, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The function that should be called.
+ /// A new instance for mocking.
+ public static ChatCompletionsNamedFunctionToolSelection ChatCompletionsNamedFunctionToolSelection(ChatCompletionsFunctionToolSelection function = null)
+ {
+ return new ChatCompletionsNamedFunctionToolSelection("function", serializedAdditionalRawData: null, function);
+ }
+
+ /// Initializes a new instance of .
+ ///
+ /// The type of the predicted content you want to provide. This type is
+ /// currently always `content`.
+ ///
+ ///
+ /// The content that should be matched when generating a model response.
+ /// If generated tokens would match this content, the entire model response
+ /// can be returned much more quickly.
+ ///
+ /// A new instance for mocking.
+ public static PredictionContent PredictionContent(PredictionContentType type = default, BinaryData content = null)
+ {
+ return new PredictionContent(type, content, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// A unique identifier associated with this chat completions response.
+ ///
+ /// The first timestamp associated with generation activity for this completions response,
+ /// represented as seconds since the beginning of the Unix epoch of 00:00 on 1 Jan 1970.
+ ///
+ ///
+ /// The collection of completions choices associated with this completions response.
+ /// Generally, `n` choices are generated per provided prompt with a default value of 1.
+ /// Token limits and other settings may limit the number of choices generated.
+ ///
+ /// The model name used for this completions request.
+ ///
+ /// Content filtering results for zero or more prompts in the request. In a streaming request,
+ /// results for different prompts may arrive at different times or in different orders.
+ ///
+ ///
+ /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that
+ /// might impact determinism.
+ ///
+ /// Usage information for tokens processed and generated as part of this completions operation.
+ /// A new instance for mocking.
+ public static ChatCompletions ChatCompletions(string id = null, DateTimeOffset created = default, IEnumerable choices = null, string model = null, IEnumerable promptFilterResults = null, string systemFingerprint = null, CompletionsUsage usage = null)
+ {
+ choices ??= new List();
+ promptFilterResults ??= new List();
+
+ return new ChatCompletions(
+ id,
+ created,
+ choices?.ToList(),
+ model,
+ promptFilterResults?.ToList(),
+ systemFingerprint,
+ usage,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The chat message for a given chat completions prompt.
+ /// The log probability information for this choice, as enabled via the 'logprobs' request option.
+ /// The ordered index associated with this chat completions choice.
+ /// The reason that this chat completions choice completed its generated.
+ /// The delta message content for a streaming response.
+ ///
+ /// Information about the content filtering category (hate, sexual, violence, self_harm), if it
+ /// has been detected, as well as the severity level (very_low, low, medium, high-scale that
+ /// determines the intensity and risk level of harmful content) and if it has been filtered or not.
+ ///
+ ///
+ /// Represents the output results of Azure OpenAI enhancements to chat completions, as configured via the matching input
+ /// provided in the request. This supplementary information is only available when using Azure OpenAI and only when the
+ /// request is configured to use enhancements.
+ ///
+ /// A new instance for mocking.
+ public static ChatChoice ChatChoice(ChatResponseMessage message = null, ChatChoiceLogProbabilityInfo logProbabilityInfo = null, int index = default, CompletionsFinishReason? finishReason = null, ChatResponseMessage internalStreamingDeltaMessage = null, ContentFilterResultsForChoice contentFilterResults = null, AzureChatEnhancements enhancements = null)
+ {
+ return new ChatChoice(
+ message,
+ logProbabilityInfo,
+ index,
+ finishReason,
+ internalStreamingDeltaMessage,
+ contentFilterResults,
+ enhancements,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The chat role associated with the message.
+ /// The refusal message generated by the model.
+ /// The content of the message.
+ ///
+ /// The tool calls that must be resolved and have their outputs appended to subsequent input messages for the chat
+ /// completions request to resolve as configured.
+ /// Please note is the base class. According to the scenario, a derived class of the base class might need to be assigned here, or this property needs to be casted to one of the possible derived classes.
+ /// The available derived classes include .
+ ///
+ ///
+ /// The function call that must be resolved and have its output appended to subsequent input messages for the chat
+ /// completions request to resolve as configured.
+ ///
+ ///
+ /// If the audio output modality is requested, this object contains data
+ /// about the audio response from the model.
+ ///
+ ///
+ /// If Azure OpenAI chat extensions are configured, this array represents the incremental steps performed by those
+ /// extensions while processing the chat completions request.
+ ///
+ /// A new instance for mocking.
+ public static ChatResponseMessage ChatResponseMessage(ChatRole role = default, string refusal = null, string content = null, IEnumerable toolCalls = null, FunctionCall functionCall = null, AudioResponseData audio = null, AzureChatExtensionsMessageContext azureExtensionsContext = null)
+ {
+ toolCalls ??= new List();
+
+ return new ChatResponseMessage(
+ role,
+ refusal,
+ content,
+ toolCalls?.ToList(),
+ functionCall,
+ audio,
+ azureExtensionsContext,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// Unique identifier for this audio response.
+ ///
+ /// The Unix timestamp (in seconds) for when this audio response
+ /// will no longer be accessible on the server for use in multi-turn
+ /// conversations.
+ ///
+ ///
+ /// Base64 encoded audio bytes generated by the model, in the format
+ /// specified in the request.
+ ///
+ /// Transcript of the audio generated by the model.
+ /// A new instance for mocking.
+ public static AudioResponseData AudioResponseData(string id = null, DateTimeOffset expiresAt = default, string data = null, string transcript = null)
+ {
+ return new AudioResponseData(id, expiresAt, data, transcript, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ ///
+ /// The contextual information associated with the Azure chat extensions used for a chat completions request.
+ /// These messages describe the data source retrievals, plugin invocations, and other intermediate steps taken in the
+ /// course of generating a chat completions response that was augmented by capabilities from Azure OpenAI chat
+ /// extensions.
+ ///
+ /// The detected intent from the chat history, used to pass to the next turn to carry over the context.
+ /// All the retrieved documents.
+ /// A new instance for mocking.
+ public static AzureChatExtensionsMessageContext AzureChatExtensionsMessageContext(IEnumerable citations = null, string intent = null, IEnumerable allRetrievedDocuments = null)
+ {
+ citations ??= new List();
+ allRetrievedDocuments ??= new List();
+
+ return new AzureChatExtensionsMessageContext(citations?.ToList(), intent, allRetrievedDocuments?.ToList(), serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The content of the citation.
+ /// The title of the citation.
+ /// The URL of the citation.
+ /// The file path of the citation.
+ /// The chunk ID of the citation.
+ /// The rerank score of the retrieved document.
+ /// A new instance for mocking.
+ public static AzureChatExtensionDataSourceResponseCitation AzureChatExtensionDataSourceResponseCitation(string content = null, string title = null, string url = null, string filepath = null, string chunkId = null, double? rerankScore = null)
+ {
+ return new AzureChatExtensionDataSourceResponseCitation(
+ content,
+ title,
+ url,
+ filepath,
+ chunkId,
+ rerankScore,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The content of the citation.
+ /// The title of the citation.
+ /// The URL of the citation.
+ /// The file path of the citation.
+ /// The chunk ID of the citation.
+ /// The rerank score of the retrieved document.
+ /// The search queries used to retrieve the document.
+ /// The index of the data source.
+ /// The original search score of the retrieved document.
+ ///
+ /// Represents the rationale for filtering the document. If the document does not undergo filtering,
+ /// this field will remain unset.
+ ///
+ /// A new instance for mocking.
+ public static AzureChatExtensionRetrievedDocument AzureChatExtensionRetrievedDocument(string content = null, string title = null, string url = null, string filepath = null, string chunkId = null, double? rerankScore = null, IEnumerable searchQueries = null, int dataSourceIndex = default, double? originalSearchScore = null, AzureChatExtensionRetrieveDocumentFilterReason? filterReason = null)
+ {
+ searchQueries ??= new List();
+
+ return new AzureChatExtensionRetrievedDocument(
+ content,
+ title,
+ url,
+ filepath,
+ chunkId,
+ rerankScore,
+ searchQueries?.ToList(),
+ dataSourceIndex,
+ originalSearchScore,
+ filterReason,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The list of log probability information entries for the choice's message content tokens, as requested via the 'logprobs' option.
+ /// The list of log probability information entries for the choice's message refusal message tokens, as requested via the 'logprobs' option.
+ /// A new instance for mocking.
+ public static ChatChoiceLogProbabilityInfo ChatChoiceLogProbabilityInfo(IEnumerable tokenLogProbabilityResults = null, IEnumerable refusal = null)
+ {
+ tokenLogProbabilityResults ??= new List();
+ refusal ??= new List();
+
+ return new ChatChoiceLogProbabilityInfo(tokenLogProbabilityResults?.ToList(), refusal?.ToList(), serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The message content token.
+ /// The log probability of the message content token.
+ /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.
+ /// The list of most likely tokens and their log probability information, as requested via 'top_logprobs'.
+ /// A new instance for mocking.
+ public static ChatTokenLogProbabilityResult ChatTokenLogProbabilityResult(string token = null, float logProbability = default, IEnumerable utf8ByteValues = null, IEnumerable topLogProbabilityEntries = null)
+ {
+ utf8ByteValues ??= new List();
+ topLogProbabilityEntries ??= new List();
+
+ return new ChatTokenLogProbabilityResult(token, logProbability, utf8ByteValues?.ToList(), topLogProbabilityEntries?.ToList(), serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The message content token.
+ /// The log probability of the message content token.
+ /// A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.
+ /// A new instance for mocking.
+ public static ChatTokenLogProbabilityInfo ChatTokenLogProbabilityInfo(string token = null, float logProbability = default, IEnumerable utf8ByteValues = null)
+ {
+ utf8ByteValues ??= new List();
+
+ return new ChatTokenLogProbabilityInfo(token, logProbability, utf8ByteValues?.ToList(), serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The grounding enhancement that returns the bounding box of the objects detected in the image.
+ /// A new instance for mocking.
+ public static AzureChatEnhancements AzureChatEnhancements(AzureGroundingEnhancement grounding = null)
+ {
+ return new AzureChatEnhancements(grounding, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The lines of text detected by the grounding enhancement.
+ /// A new instance for mocking.
+ public static AzureGroundingEnhancement AzureGroundingEnhancement(IEnumerable lines = null)
+ {
+ lines ??= new List();
+
+ return new AzureGroundingEnhancement(lines?.ToList(), serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The text within the line.
+ /// An array of spans that represent detected objects and its bounding box information.
+ /// A new instance for mocking.
+ public static AzureGroundingEnhancementLine AzureGroundingEnhancementLine(string text = null, IEnumerable spans = null)
+ {
+ spans ??= new List();
+
+ return new AzureGroundingEnhancementLine(text, spans?.ToList(), serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The text content of the span that represents the detected object.
+ ///
+ /// The character offset within the text where the span begins. This offset is defined as the position of the first
+ /// character of the span, counting from the start of the text as Unicode codepoints.
+ ///
+ /// The length of the span in characters, measured in Unicode codepoints.
+ /// An array of objects representing points in the polygon that encloses the detected object.
+ /// A new instance for mocking.
+ public static AzureGroundingEnhancementLineSpan AzureGroundingEnhancementLineSpan(string text = null, int offset = default, int length = default, IEnumerable polygon = null)
+ {
+ polygon ??= new List();
+
+ return new AzureGroundingEnhancementLineSpan(text, offset, length, polygon?.ToList(), serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The x-coordinate (horizontal axis) of the point.
+ /// The y-coordinate (vertical axis) of the point.
+ /// A new instance for mocking.
+ public static AzureGroundingEnhancementCoordinatePoint AzureGroundingEnhancementCoordinatePoint(float x = default, float y = default)
+ {
+ return new AzureGroundingEnhancementCoordinatePoint(x, y, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ ///
+ /// A timestamp representing when this operation was started.
+ /// Expressed in seconds since the Unix epoch of 1970-01-01T00:00:00+0000.
+ ///
+ /// The images generated by the operation.
+ /// A new instance for mocking.
+ public static ImageGenerations ImageGenerations(DateTimeOffset created = default, IEnumerable data = null)
+ {
+ data ??= new List();
+
+ return new ImageGenerations(created, data?.ToList(), serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The URL that provides temporary access to download the generated image.
+ /// The complete data for an image, represented as a base64-encoded string.
+ /// Information about the content filtering results.
+ ///
+ /// The final prompt used by the model to generate the image.
+ /// Only provided with dall-3-models and only when revisions were made to the prompt.
+ ///
+ ///
+ /// Information about the content filtering category (hate, sexual, violence, self_harm), if
+ /// it has been detected, as well as the severity level (very_low, low, medium, high-scale
+ /// that determines the intensity and risk level of harmful content) and if it has been
+ /// filtered or not. Information about jailbreak content and profanity, if it has been detected,
+ /// and if it has been filtered or not. And information about customer block list, if it has
+ /// been filtered and its id.
+ ///
+ /// A new instance for mocking.
+ public static ImageGenerationData ImageGenerationData(Uri url = null, string base64Data = null, ImageGenerationContentFilterResults contentFilterResults = null, string revisedPrompt = null, ImageGenerationPromptFilterResults promptFilterResults = null)
+ {
+ return new ImageGenerationData(
+ url,
+ base64Data,
+ contentFilterResults,
+ revisedPrompt,
+ promptFilterResults,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ ///
+ /// Describes language related to anatomical organs and genitals, romantic relationships,
+ /// acts portrayed in erotic or affectionate terms, physical sexual acts, including
+ /// those portrayed as an assault or a forced sexual violent act against one’s will,
+ /// prostitution, pornography, and abuse.
+ ///
+ ///
+ /// Describes language related to physical actions intended to hurt, injure, damage, or
+ /// kill someone or something; describes weapons, etc.
+ ///
+ ///
+ /// Describes language attacks or uses that include pejorative or discriminatory language
+ /// with reference to a person or identity group on the basis of certain differentiating
+ /// attributes of these groups including but not limited to race, ethnicity, nationality,
+ /// gender identity and expression, sexual orientation, religion, immigration status, ability
+ /// status, personal appearance, and body size.
+ ///
+ ///
+ /// Describes language related to physical actions intended to purposely hurt, injure,
+ /// or damage one’s body, or kill oneself.
+ ///
+ /// A new instance for mocking.
+ public static ImageGenerationContentFilterResults ImageGenerationContentFilterResults(ContentFilterResult sexual = null, ContentFilterResult violence = null, ContentFilterResult hate = null, ContentFilterResult selfHarm = null)
+ {
+ return new ImageGenerationContentFilterResults(sexual, violence, hate, selfHarm, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ ///
+ /// Describes language related to anatomical organs and genitals, romantic relationships,
+ /// acts portrayed in erotic or affectionate terms, physical sexual acts, including
+ /// those portrayed as an assault or a forced sexual violent act against one’s will,
+ /// prostitution, pornography, and abuse.
+ ///
+ ///
+ /// Describes language related to physical actions intended to hurt, injure, damage, or
+ /// kill someone or something; describes weapons, etc.
+ ///
+ ///
+ /// Describes language attacks or uses that include pejorative or discriminatory language
+ /// with reference to a person or identity group on the basis of certain differentiating
+ /// attributes of these groups including but not limited to race, ethnicity, nationality,
+ /// gender identity and expression, sexual orientation, religion, immigration status, ability
+ /// status, personal appearance, and body size.
+ ///
+ ///
+ /// Describes language related to physical actions intended to purposely hurt, injure,
+ /// or damage one’s body, or kill oneself.
+ ///
+ /// Describes whether profanity was detected.
+ /// Whether a jailbreak attempt was detected in the prompt.
+ /// Information about customer block lists and if something was detected the associated list ID.
+ /// A new instance for mocking.
+ public static ImageGenerationPromptFilterResults ImageGenerationPromptFilterResults(ContentFilterResult sexual = null, ContentFilterResult violence = null, ContentFilterResult hate = null, ContentFilterResult selfHarm = null, ContentFilterDetectionResult profanity = null, ContentFilterDetectionResult jailbreak = null, ContentFilterDetailedResults customBlocklists = null)
+ {
+ return new ImageGenerationPromptFilterResults(
+ sexual,
+ violence,
+ hate,
+ selfHarm,
+ profanity,
+ jailbreak,
+ customBlocklists,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The text to generate audio for. The maximum length is 4096 characters.
+ /// The voice to use for text-to-speech.
+ /// The audio output format for the spoken text. By default, the MP3 format will be used.
+ /// The speed of speech for generated audio. Values are valid in the range from 0.25 to 4.0, with 1.0 the default and higher values corresponding to faster speech.
+ /// The model to use for this text-to-speech request.
+ /// A new instance for mocking.
+ public static SpeechGenerationOptions SpeechGenerationOptions(string input = null, SpeechVoice voice = default, SpeechGenerationResponseFormat? responseFormat = null, float? speed = null, string deploymentName = null)
+ {
+ return new SpeechGenerationOptions(
+ input,
+ voice,
+ responseFormat,
+ speed,
+ deploymentName,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The object type, which is always 'list'.
+ /// The files returned for the request.
+ /// A new instance for mocking.
+ public static FileListResponse FileListResponse(FileListResponseObject @object = default, IEnumerable data = null)
+ {
+ data ??= new List();
+
+ return new FileListResponse(@object, data?.ToList(), serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The object type, which is always 'file'.
+ /// The identifier, which can be referenced in API endpoints.
+ /// The size of the file, in bytes.
+ /// The name of the file.
+ /// The Unix timestamp, in seconds, representing when this object was created.
+ /// The intended purpose of a file.
+ /// The state of the file. This field is available in Azure OpenAI only.
+ /// The error message with details in case processing of this file failed. This field is available in Azure OpenAI only.
+ /// A new instance for mocking.
+ public static OpenAIFile OpenAIFile(OpenAIFileObject @object = default, string id = null, int bytes = default, string filename = null, DateTimeOffset createdAt = default, FilePurpose purpose = default, FileState? status = null, string statusDetails = null)
+ {
+ return new OpenAIFile(
+ @object,
+ id,
+ bytes,
+ filename,
+ createdAt,
+ purpose,
+ status,
+ statusDetails,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The file data (not filename) to upload.
+ /// The intended purpose of the file.
+ /// A filename to associate with the uploaded data.
+ /// A new instance for mocking.
+ public static UploadFileRequest UploadFileRequest(Stream data = null, FilePurpose purpose = default, string filename = null)
+ {
+ return new UploadFileRequest(data, purpose, filename, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The ID of the resource specified for deletion.
+ /// A value indicating whether deletion was successful.
+ /// The object type, which is always 'file'.
+ /// A new instance for mocking.
+ public static FileDeletionStatus FileDeletionStatus(string id = null, bool deleted = default, FileDeletionStatusObject @object = default)
+ {
+ return new FileDeletionStatus(id, deleted, @object, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The object type, which is always list.
+ /// The requested list of items.
+ /// The first ID represented in this list.
+ /// The last ID represented in this list.
+ /// A value indicating whether there are additional values available not captured in this list.
+ /// A new instance for mocking.
+ public static OpenAIPageableListOfBatch OpenAIPageableListOfBatch(OpenAIPageableListOfBatchObject @object = default, IEnumerable data = null, string firstId = null, string lastId = null, bool? hasMore = null)
+ {
+ data ??= new List();
+
+ return new OpenAIPageableListOfBatch(
+ @object,
+ data?.ToList(),
+ firstId,
+ lastId,
+ hasMore,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The API endpoint used by the batch.
+ /// The ID of the input file for the batch.
+ /// The time frame within which the batch should be processed.
+ /// A set of key-value pairs that can be attached to the batch. This can be useful for storing additional information about the batch in a structured format.
+ /// A new instance for mocking.
+ public static BatchCreateRequest BatchCreateRequest(string endpoint = null, string inputFileId = null, string completionWindow = null, IDictionary metadata = null)
+ {
+ metadata ??= new Dictionary();
+
+ return new BatchCreateRequest(endpoint, inputFileId, completionWindow, metadata, serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The Upload unique identifier, which can be referenced in API endpoints.
+ /// The Unix timestamp (in seconds) for when the Upload was created.
+ /// The name of the file to be uploaded.
+ /// The intended number of bytes to be uploaded.
+ /// The intended purpose of the file.
+ /// The status of the Upload.
+ /// The Unix timestamp (in seconds) for when the Upload was created.
+ /// The object type, which is always "upload".
+ /// The ready File object after the Upload is completed.
+ /// A new instance for mocking.
+ public static Upload Upload(string id = null, DateTimeOffset createdAt = default, string filename = null, long bytes = default, UploadPurpose purpose = default, UploadStatus status = default, DateTimeOffset expiresAt = default, UploadObject? @object = null, OpenAIFile file = null)
+ {
+ return new Upload(
+ id,
+ createdAt,
+ filename,
+ bytes,
+ purpose,
+ status,
+ expiresAt,
+ @object,
+ file,
+ serializedAdditionalRawData: null);
+ }
+
+ /// Initializes a new instance of .
+ /// The upload Part unique identifier, which can be referenced in API endpoints.
+ /// The Unix timestamp (in seconds) for when the Part was created.
+ /// The ID of the Upload object that this Part was added to.
+ /// The object type, which is always `upload.part`.
+ /// Azure only field.
+ /// A new instance for mocking.
+ public static UploadPart UploadPart(string id = null, DateTimeOffset createdAt = default, string uploadId = null, UploadPartObject @object = default, string azureBlockId = null)
+ {
+ return new UploadPart(
+ id,
+ createdAt,
+ uploadId,
+ @object,
+ azureBlockId,
+ serializedAdditionalRawData: null);
+ }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AddUploadPartRequest.Serialization.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AddUploadPartRequest.Serialization.cs
new file mode 100644
index 000000000000..22e6ff2fa9da
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AddUploadPartRequest.Serialization.cs
@@ -0,0 +1,174 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.ClientModel.Primitives;
+using System.Collections.Generic;
+using System.IO;
+using System.Text.Json;
+using Azure.Core;
+
+namespace Azure.AI.OpenAI
+{
+ public partial class AddUploadPartRequest : IUtf8JsonSerializable, IJsonModel
+ {
+ void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions);
+
+ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ writer.WriteStartObject();
+ JsonModelWriteCore(writer, options);
+ writer.WriteEndObject();
+ }
+
+ /// The JSON writer.
+ /// The client options for reading and writing models.
+ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AddUploadPartRequest)} does not support writing '{format}' format.");
+ }
+
+ writer.WritePropertyName("data"u8);
+#if NET6_0_OR_GREATER
+ writer.WriteRawValue(global::System.BinaryData.FromStream(Data));
+#else
+ using (JsonDocument document = JsonDocument.Parse(BinaryData.FromStream(Data), ModelSerializationExtensions.JsonDocumentOptions))
+ {
+ JsonSerializer.Serialize(writer, document.RootElement);
+ }
+#endif
+ if (options.Format != "W" && _serializedAdditionalRawData != null)
+ {
+ foreach (var item in _serializedAdditionalRawData)
+ {
+ writer.WritePropertyName(item.Key);
+#if NET6_0_OR_GREATER
+ writer.WriteRawValue(item.Value);
+#else
+ using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions))
+ {
+ JsonSerializer.Serialize(writer, document.RootElement);
+ }
+#endif
+ }
+ }
+ }
+
+ AddUploadPartRequest IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AddUploadPartRequest)} does not support reading '{format}' format.");
+ }
+
+ using JsonDocument document = JsonDocument.ParseValue(ref reader);
+ return DeserializeAddUploadPartRequest(document.RootElement, options);
+ }
+
+ internal static AddUploadPartRequest DeserializeAddUploadPartRequest(JsonElement element, ModelReaderWriterOptions options = null)
+ {
+ options ??= ModelSerializationExtensions.WireOptions;
+
+ if (element.ValueKind == JsonValueKind.Null)
+ {
+ return null;
+ }
+ Stream data = default;
+ IDictionary serializedAdditionalRawData = default;
+ Dictionary rawDataDictionary = new Dictionary();
+ foreach (var property in element.EnumerateObject())
+ {
+ if (property.NameEquals("data"u8))
+ {
+ data = BinaryData.FromString(property.Value.GetRawText()).ToStream();
+ continue;
+ }
+ if (options.Format != "W")
+ {
+ rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText()));
+ }
+ }
+ serializedAdditionalRawData = rawDataDictionary;
+ return new AddUploadPartRequest(data, serializedAdditionalRawData);
+ }
+
+ private BinaryData SerializeMultipart(ModelReaderWriterOptions options)
+ {
+ using MultipartFormDataRequestContent content = ToMultipartRequestContent();
+ using MemoryStream stream = new MemoryStream();
+ content.WriteTo(stream);
+ if (stream.Position > int.MaxValue)
+ {
+ return BinaryData.FromStream(stream);
+ }
+ else
+ {
+ return new BinaryData(stream.GetBuffer().AsMemory(0, (int)stream.Position));
+ }
+ }
+
+ internal virtual MultipartFormDataRequestContent ToMultipartRequestContent()
+ {
+ MultipartFormDataRequestContent content = new MultipartFormDataRequestContent();
+ content.Add(Data, "data", "data", "application/octet-stream");
+ return content;
+ }
+
+ BinaryData IPersistableModel.Write(ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+
+ switch (format)
+ {
+ case "J":
+ return ModelReaderWriter.Write(this, options);
+ case "MFD":
+ return SerializeMultipart(options);
+ default:
+ throw new FormatException($"The model {nameof(AddUploadPartRequest)} does not support writing '{options.Format}' format.");
+ }
+ }
+
+ AddUploadPartRequest IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+
+ switch (format)
+ {
+ case "J":
+ {
+ using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions);
+ return DeserializeAddUploadPartRequest(document.RootElement, options);
+ }
+ default:
+ throw new FormatException($"The model {nameof(AddUploadPartRequest)} does not support reading '{options.Format}' format.");
+ }
+ }
+
+ string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "MFD";
+
+ /// Deserializes the model from a raw response.
+ /// The response to deserialize the model from.
+ internal static AddUploadPartRequest FromResponse(Response response)
+ {
+ using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions);
+ return DeserializeAddUploadPartRequest(document.RootElement);
+ }
+
+ /// Convert into a .
+ internal virtual RequestContent ToRequestContent()
+ {
+ var content = new Utf8JsonRequestContent();
+ content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions);
+ return content;
+ }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AddUploadPartRequest.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AddUploadPartRequest.cs
new file mode 100644
index 000000000000..9f8e672d50eb
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AddUploadPartRequest.cs
@@ -0,0 +1,76 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.Collections.Generic;
+using System.IO;
+
+namespace Azure.AI.OpenAI
+{
+ /// The multipart/form-data request body of a data part addition request for an upload.
+ public partial class AddUploadPartRequest
+ {
+ ///
+ /// Keeps track of any properties unknown to the library.
+ ///
+ /// To assign an object to the value of this property use .
+ ///
+ ///
+ /// To assign an already formatted json string to this property use .
+ ///
+ ///
+ /// Examples:
+ ///
+ /// -
+ /// BinaryData.FromObjectAsJson("foo")
+ /// Creates a payload of "foo".
+ ///
+ /// -
+ /// BinaryData.FromString("\"foo\"")
+ /// Creates a payload of "foo".
+ ///
+ /// -
+ /// BinaryData.FromObjectAsJson(new { key = "value" })
+ /// Creates a payload of { "key": "value" }.
+ ///
+ /// -
+ /// BinaryData.FromString("{\"key\": \"value\"}")
+ /// Creates a payload of { "key": "value" }.
+ ///
+ ///
+ ///
+ ///
+ private IDictionary _serializedAdditionalRawData;
+
+ /// Initializes a new instance of .
+ /// The chunk of bytes for this Part.
+ /// is null.
+ public AddUploadPartRequest(Stream data)
+ {
+ Argument.AssertNotNull(data, nameof(data));
+
+ Data = data;
+ }
+
+ /// Initializes a new instance of .
+ /// The chunk of bytes for this Part.
+ /// Keeps track of any properties unknown to the library.
+ internal AddUploadPartRequest(Stream data, IDictionary serializedAdditionalRawData)
+ {
+ Data = data;
+ _serializedAdditionalRawData = serializedAdditionalRawData;
+ }
+
+ /// Initializes a new instance of for deserialization.
+ internal AddUploadPartRequest()
+ {
+ }
+
+ /// The chunk of bytes for this Part.
+ public Stream Data { get; }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioOutputParameters.Serialization.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioOutputParameters.Serialization.cs
new file mode 100644
index 000000000000..a07525f7b53c
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioOutputParameters.Serialization.cs
@@ -0,0 +1,150 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.ClientModel.Primitives;
+using System.Collections.Generic;
+using System.Text.Json;
+using Azure.Core;
+
+namespace Azure.AI.OpenAI
+{
+ public partial class AudioOutputParameters : IUtf8JsonSerializable, IJsonModel
+ {
+ void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions);
+
+ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ writer.WriteStartObject();
+ JsonModelWriteCore(writer, options);
+ writer.WriteEndObject();
+ }
+
+ /// The JSON writer.
+ /// The client options for reading and writing models.
+ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AudioOutputParameters)} does not support writing '{format}' format.");
+ }
+
+ writer.WritePropertyName("voice"u8);
+ writer.WriteStringValue(Voice.ToString());
+ writer.WritePropertyName("format"u8);
+ writer.WriteStringValue(Format.ToString());
+ if (options.Format != "W" && _serializedAdditionalRawData != null)
+ {
+ foreach (var item in _serializedAdditionalRawData)
+ {
+ writer.WritePropertyName(item.Key);
+#if NET6_0_OR_GREATER
+ writer.WriteRawValue(item.Value);
+#else
+ using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions))
+ {
+ JsonSerializer.Serialize(writer, document.RootElement);
+ }
+#endif
+ }
+ }
+ }
+
+ AudioOutputParameters IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AudioOutputParameters)} does not support reading '{format}' format.");
+ }
+
+ using JsonDocument document = JsonDocument.ParseValue(ref reader);
+ return DeserializeAudioOutputParameters(document.RootElement, options);
+ }
+
+ internal static AudioOutputParameters DeserializeAudioOutputParameters(JsonElement element, ModelReaderWriterOptions options = null)
+ {
+ options ??= ModelSerializationExtensions.WireOptions;
+
+ if (element.ValueKind == JsonValueKind.Null)
+ {
+ return null;
+ }
+ SpeechVoice voice = default;
+ OutputAudioFormat format = default;
+ IDictionary serializedAdditionalRawData = default;
+ Dictionary rawDataDictionary = new Dictionary();
+ foreach (var property in element.EnumerateObject())
+ {
+ if (property.NameEquals("voice"u8))
+ {
+ voice = new SpeechVoice(property.Value.GetString());
+ continue;
+ }
+ if (property.NameEquals("format"u8))
+ {
+ format = new OutputAudioFormat(property.Value.GetString());
+ continue;
+ }
+ if (options.Format != "W")
+ {
+ rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText()));
+ }
+ }
+ serializedAdditionalRawData = rawDataDictionary;
+ return new AudioOutputParameters(voice, format, serializedAdditionalRawData);
+ }
+
+ BinaryData IPersistableModel.Write(ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+
+ switch (format)
+ {
+ case "J":
+ return ModelReaderWriter.Write(this, options);
+ default:
+ throw new FormatException($"The model {nameof(AudioOutputParameters)} does not support writing '{options.Format}' format.");
+ }
+ }
+
+ AudioOutputParameters IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+
+ switch (format)
+ {
+ case "J":
+ {
+ using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions);
+ return DeserializeAudioOutputParameters(document.RootElement, options);
+ }
+ default:
+ throw new FormatException($"The model {nameof(AudioOutputParameters)} does not support reading '{options.Format}' format.");
+ }
+ }
+
+ string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J";
+
+ /// Deserializes the model from a raw response.
+ /// The response to deserialize the model from.
+ internal static AudioOutputParameters FromResponse(Response response)
+ {
+ using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions);
+ return DeserializeAudioOutputParameters(document.RootElement);
+ }
+
+ /// Convert into a .
+ internal virtual RequestContent ToRequestContent()
+ {
+ var content = new Utf8JsonRequestContent();
+ content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions);
+ return content;
+ }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioOutputParameters.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioOutputParameters.cs
new file mode 100644
index 000000000000..b710d4d1f946
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioOutputParameters.cs
@@ -0,0 +1,78 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.Collections.Generic;
+
+namespace Azure.AI.OpenAI
+{
+ /// Describes the parameters for audio output.
+ public partial class AudioOutputParameters
+ {
+ ///
+ /// Keeps track of any properties unknown to the library.
+ ///
+ /// To assign an object to the value of this property use .
+ ///
+ ///
+ /// To assign an already formatted json string to this property use .
+ ///
+ ///
+ /// Examples:
+ ///
+ /// -
+ /// BinaryData.FromObjectAsJson("foo")
+ /// Creates a payload of "foo".
+ ///
+ /// -
+ /// BinaryData.FromString("\"foo\"")
+ /// Creates a payload of "foo".
+ ///
+ /// -
+ /// BinaryData.FromObjectAsJson(new { key = "value" })
+ /// Creates a payload of { "key": "value" }.
+ ///
+ /// -
+ /// BinaryData.FromString("{\"key\": \"value\"}")
+ /// Creates a payload of { "key": "value" }.
+ ///
+ ///
+ ///
+ ///
+ private IDictionary _serializedAdditionalRawData;
+
+ /// Initializes a new instance of .
+ /// Specifies the voice type.
+ /// Specifies the output audio format.
+ public AudioOutputParameters(SpeechVoice voice, OutputAudioFormat format)
+ {
+ Voice = voice;
+ Format = format;
+ }
+
+ /// Initializes a new instance of .
+ /// Specifies the voice type.
+ /// Specifies the output audio format.
+ /// Keeps track of any properties unknown to the library.
+ internal AudioOutputParameters(SpeechVoice voice, OutputAudioFormat format, IDictionary serializedAdditionalRawData)
+ {
+ Voice = voice;
+ Format = format;
+ _serializedAdditionalRawData = serializedAdditionalRawData;
+ }
+
+ /// Initializes a new instance of for deserialization.
+ internal AudioOutputParameters()
+ {
+ }
+
+ /// Specifies the voice type.
+ public SpeechVoice Voice { get; }
+ /// Specifies the output audio format.
+ public OutputAudioFormat Format { get; }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioResponseData.Serialization.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioResponseData.Serialization.cs
new file mode 100644
index 000000000000..83e67b496d45
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioResponseData.Serialization.cs
@@ -0,0 +1,166 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.ClientModel.Primitives;
+using System.Collections.Generic;
+using System.Text.Json;
+using Azure.Core;
+
+namespace Azure.AI.OpenAI
+{
+ public partial class AudioResponseData : IUtf8JsonSerializable, IJsonModel
+ {
+ void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions);
+
+ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ writer.WriteStartObject();
+ JsonModelWriteCore(writer, options);
+ writer.WriteEndObject();
+ }
+
+ /// The JSON writer.
+ /// The client options for reading and writing models.
+ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AudioResponseData)} does not support writing '{format}' format.");
+ }
+
+ writer.WritePropertyName("id"u8);
+ writer.WriteStringValue(Id);
+ writer.WritePropertyName("expires_at"u8);
+ writer.WriteNumberValue(ExpiresAt, "U");
+ writer.WritePropertyName("data"u8);
+ writer.WriteStringValue(Data);
+ writer.WritePropertyName("transcript"u8);
+ writer.WriteStringValue(Transcript);
+ if (options.Format != "W" && _serializedAdditionalRawData != null)
+ {
+ foreach (var item in _serializedAdditionalRawData)
+ {
+ writer.WritePropertyName(item.Key);
+#if NET6_0_OR_GREATER
+ writer.WriteRawValue(item.Value);
+#else
+ using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions))
+ {
+ JsonSerializer.Serialize(writer, document.RootElement);
+ }
+#endif
+ }
+ }
+ }
+
+ AudioResponseData IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AudioResponseData)} does not support reading '{format}' format.");
+ }
+
+ using JsonDocument document = JsonDocument.ParseValue(ref reader);
+ return DeserializeAudioResponseData(document.RootElement, options);
+ }
+
+ internal static AudioResponseData DeserializeAudioResponseData(JsonElement element, ModelReaderWriterOptions options = null)
+ {
+ options ??= ModelSerializationExtensions.WireOptions;
+
+ if (element.ValueKind == JsonValueKind.Null)
+ {
+ return null;
+ }
+ string id = default;
+ DateTimeOffset expiresAt = default;
+ string data = default;
+ string transcript = default;
+ IDictionary serializedAdditionalRawData = default;
+ Dictionary rawDataDictionary = new Dictionary();
+ foreach (var property in element.EnumerateObject())
+ {
+ if (property.NameEquals("id"u8))
+ {
+ id = property.Value.GetString();
+ continue;
+ }
+ if (property.NameEquals("expires_at"u8))
+ {
+ expiresAt = DateTimeOffset.FromUnixTimeSeconds(property.Value.GetInt64());
+ continue;
+ }
+ if (property.NameEquals("data"u8))
+ {
+ data = property.Value.GetString();
+ continue;
+ }
+ if (property.NameEquals("transcript"u8))
+ {
+ transcript = property.Value.GetString();
+ continue;
+ }
+ if (options.Format != "W")
+ {
+ rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText()));
+ }
+ }
+ serializedAdditionalRawData = rawDataDictionary;
+ return new AudioResponseData(id, expiresAt, data, transcript, serializedAdditionalRawData);
+ }
+
+ BinaryData IPersistableModel.Write(ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+
+ switch (format)
+ {
+ case "J":
+ return ModelReaderWriter.Write(this, options);
+ default:
+ throw new FormatException($"The model {nameof(AudioResponseData)} does not support writing '{options.Format}' format.");
+ }
+ }
+
+ AudioResponseData IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+
+ switch (format)
+ {
+ case "J":
+ {
+ using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions);
+ return DeserializeAudioResponseData(document.RootElement, options);
+ }
+ default:
+ throw new FormatException($"The model {nameof(AudioResponseData)} does not support reading '{options.Format}' format.");
+ }
+ }
+
+ string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J";
+
+ /// Deserializes the model from a raw response.
+ /// The response to deserialize the model from.
+ internal static AudioResponseData FromResponse(Response response)
+ {
+ using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions);
+ return DeserializeAudioResponseData(document.RootElement);
+ }
+
+ /// Convert into a .
+ internal virtual RequestContent ToRequestContent()
+ {
+ var content = new Utf8JsonRequestContent();
+ content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions);
+ return content;
+ }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioResponseData.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioResponseData.cs
new file mode 100644
index 000000000000..11b58d1feb11
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioResponseData.cs
@@ -0,0 +1,116 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.Collections.Generic;
+
+namespace Azure.AI.OpenAI
+{
+ /// Object containing audio response data and its metadata.
+ public partial class AudioResponseData
+ {
+ ///
+ /// Keeps track of any properties unknown to the library.
+ ///
+ /// To assign an object to the value of this property use .
+ ///
+ ///
+ /// To assign an already formatted json string to this property use .
+ ///
+ ///
+ /// Examples:
+ ///
+ /// -
+ /// BinaryData.FromObjectAsJson("foo")
+ /// Creates a payload of "foo".
+ ///
+ /// -
+ /// BinaryData.FromString("\"foo\"")
+ /// Creates a payload of "foo".
+ ///
+ /// -
+ /// BinaryData.FromObjectAsJson(new { key = "value" })
+ /// Creates a payload of { "key": "value" }.
+ ///
+ /// -
+ /// BinaryData.FromString("{\"key\": \"value\"}")
+ /// Creates a payload of { "key": "value" }.
+ ///
+ ///
+ ///
+ ///
+ private IDictionary _serializedAdditionalRawData;
+
+ /// Initializes a new instance of .
+ /// Unique identifier for this audio response.
+ ///
+ /// The Unix timestamp (in seconds) for when this audio response
+ /// will no longer be accessible on the server for use in multi-turn
+ /// conversations.
+ ///
+ ///
+ /// Base64 encoded audio bytes generated by the model, in the format
+ /// specified in the request.
+ ///
+ /// Transcript of the audio generated by the model.
+ /// , or is null.
+ internal AudioResponseData(string id, DateTimeOffset expiresAt, string data, string transcript)
+ {
+ Argument.AssertNotNull(id, nameof(id));
+ Argument.AssertNotNull(data, nameof(data));
+ Argument.AssertNotNull(transcript, nameof(transcript));
+
+ Id = id;
+ ExpiresAt = expiresAt;
+ Data = data;
+ Transcript = transcript;
+ }
+
+ /// Initializes a new instance of .
+ /// Unique identifier for this audio response.
+ ///
+ /// The Unix timestamp (in seconds) for when this audio response
+ /// will no longer be accessible on the server for use in multi-turn
+ /// conversations.
+ ///
+ ///
+ /// Base64 encoded audio bytes generated by the model, in the format
+ /// specified in the request.
+ ///
+ /// Transcript of the audio generated by the model.
+ /// Keeps track of any properties unknown to the library.
+ internal AudioResponseData(string id, DateTimeOffset expiresAt, string data, string transcript, IDictionary serializedAdditionalRawData)
+ {
+ Id = id;
+ ExpiresAt = expiresAt;
+ Data = data;
+ Transcript = transcript;
+ _serializedAdditionalRawData = serializedAdditionalRawData;
+ }
+
+ /// Initializes a new instance of for deserialization.
+ internal AudioResponseData()
+ {
+ }
+
+ /// Unique identifier for this audio response.
+ public string Id { get; }
+ ///
+ /// The Unix timestamp (in seconds) for when this audio response
+ /// will no longer be accessible on the server for use in multi-turn
+ /// conversations.
+ ///
+ public DateTimeOffset ExpiresAt { get; }
+ ///
+ /// Base64 encoded audio bytes generated by the model, in the format
+ /// specified in the request.
+ ///
+ public string Data { get; }
+ /// Transcript of the audio generated by the model.
+ public string Transcript { get; }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTaskLabel.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTaskLabel.cs
new file mode 100644
index 000000000000..e4669cce267d
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTaskLabel.cs
@@ -0,0 +1,51 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.ComponentModel;
+
+namespace Azure.AI.OpenAI
+{
+ /// Defines the possible descriptors for available audio operation responses.
+ internal readonly partial struct AudioTaskLabel : IEquatable
+ {
+ private readonly string _value;
+
+ /// Initializes a new instance of .
+ /// is null.
+ public AudioTaskLabel(string value)
+ {
+ _value = value ?? throw new ArgumentNullException(nameof(value));
+ }
+
+ private const string TranscribeValue = "transcribe";
+ private const string TranslateValue = "translate";
+
+ /// Accompanying response data resulted from an audio transcription task.
+ public static AudioTaskLabel Transcribe { get; } = new AudioTaskLabel(TranscribeValue);
+ /// Accompanying response data resulted from an audio translation task.
+ public static AudioTaskLabel Translate { get; } = new AudioTaskLabel(TranslateValue);
+ /// Determines if two values are the same.
+ public static bool operator ==(AudioTaskLabel left, AudioTaskLabel right) => left.Equals(right);
+ /// Determines if two values are not the same.
+ public static bool operator !=(AudioTaskLabel left, AudioTaskLabel right) => !left.Equals(right);
+ /// Converts a to a .
+ public static implicit operator AudioTaskLabel(string value) => new AudioTaskLabel(value);
+
+ ///
+ [EditorBrowsable(EditorBrowsableState.Never)]
+ public override bool Equals(object obj) => obj is AudioTaskLabel other && Equals(other);
+ ///
+ public bool Equals(AudioTaskLabel other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase);
+
+ ///
+ [EditorBrowsable(EditorBrowsableState.Never)]
+ public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0;
+ ///
+ public override string ToString() => _value;
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscription.Serialization.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscription.Serialization.cs
new file mode 100644
index 000000000000..a1e94d5e5640
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscription.Serialization.cs
@@ -0,0 +1,240 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.ClientModel.Primitives;
+using System.Collections.Generic;
+using System.Text.Json;
+using Azure.Core;
+
+namespace Azure.AI.OpenAI
+{
+ public partial class AudioTranscription : IUtf8JsonSerializable, IJsonModel
+ {
+ void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions);
+
+ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ writer.WriteStartObject();
+ JsonModelWriteCore(writer, options);
+ writer.WriteEndObject();
+ }
+
+ /// The JSON writer.
+ /// The client options for reading and writing models.
+ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AudioTranscription)} does not support writing '{format}' format.");
+ }
+
+ writer.WritePropertyName("text"u8);
+ writer.WriteStringValue(Text);
+ if (Optional.IsDefined(InternalAudioTaskLabel))
+ {
+ writer.WritePropertyName("task"u8);
+ writer.WriteStringValue(InternalAudioTaskLabel.Value.ToString());
+ }
+ if (Optional.IsDefined(Language))
+ {
+ writer.WritePropertyName("language"u8);
+ writer.WriteStringValue(Language);
+ }
+ if (Optional.IsDefined(Duration))
+ {
+ writer.WritePropertyName("duration"u8);
+ writer.WriteNumberValue(Convert.ToDouble(Duration.Value.ToString("s\\.FFF")));
+ }
+ if (Optional.IsCollectionDefined(Segments))
+ {
+ writer.WritePropertyName("segments"u8);
+ writer.WriteStartArray();
+ foreach (var item in Segments)
+ {
+ writer.WriteObjectValue(item, options);
+ }
+ writer.WriteEndArray();
+ }
+ if (Optional.IsCollectionDefined(Words))
+ {
+ writer.WritePropertyName("words"u8);
+ writer.WriteStartArray();
+ foreach (var item in Words)
+ {
+ writer.WriteObjectValue(item, options);
+ }
+ writer.WriteEndArray();
+ }
+ if (options.Format != "W" && _serializedAdditionalRawData != null)
+ {
+ foreach (var item in _serializedAdditionalRawData)
+ {
+ writer.WritePropertyName(item.Key);
+#if NET6_0_OR_GREATER
+ writer.WriteRawValue(item.Value);
+#else
+ using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions))
+ {
+ JsonSerializer.Serialize(writer, document.RootElement);
+ }
+#endif
+ }
+ }
+ }
+
+ AudioTranscription IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AudioTranscription)} does not support reading '{format}' format.");
+ }
+
+ using JsonDocument document = JsonDocument.ParseValue(ref reader);
+ return DeserializeAudioTranscription(document.RootElement, options);
+ }
+
+ internal static AudioTranscription DeserializeAudioTranscription(JsonElement element, ModelReaderWriterOptions options = null)
+ {
+ options ??= ModelSerializationExtensions.WireOptions;
+
+ if (element.ValueKind == JsonValueKind.Null)
+ {
+ return null;
+ }
+ string text = default;
+ AudioTaskLabel? task = default;
+ string language = default;
+ TimeSpan? duration = default;
+ IReadOnlyList segments = default;
+ IReadOnlyList words = default;
+ IDictionary serializedAdditionalRawData = default;
+ Dictionary rawDataDictionary = new Dictionary();
+ foreach (var property in element.EnumerateObject())
+ {
+ if (property.NameEquals("text"u8))
+ {
+ text = property.Value.GetString();
+ continue;
+ }
+ if (property.NameEquals("task"u8))
+ {
+ if (property.Value.ValueKind == JsonValueKind.Null)
+ {
+ continue;
+ }
+ task = new AudioTaskLabel(property.Value.GetString());
+ continue;
+ }
+ if (property.NameEquals("language"u8))
+ {
+ language = property.Value.GetString();
+ continue;
+ }
+ if (property.NameEquals("duration"u8))
+ {
+ if (property.Value.ValueKind == JsonValueKind.Null)
+ {
+ continue;
+ }
+ duration = TimeSpan.FromSeconds(property.Value.GetDouble());
+ continue;
+ }
+ if (property.NameEquals("segments"u8))
+ {
+ if (property.Value.ValueKind == JsonValueKind.Null)
+ {
+ continue;
+ }
+ List array = new List();
+ foreach (var item in property.Value.EnumerateArray())
+ {
+ array.Add(AudioTranscriptionSegment.DeserializeAudioTranscriptionSegment(item, options));
+ }
+ segments = array;
+ continue;
+ }
+ if (property.NameEquals("words"u8))
+ {
+ if (property.Value.ValueKind == JsonValueKind.Null)
+ {
+ continue;
+ }
+ List array = new List();
+ foreach (var item in property.Value.EnumerateArray())
+ {
+ array.Add(AudioTranscriptionWord.DeserializeAudioTranscriptionWord(item, options));
+ }
+ words = array;
+ continue;
+ }
+ if (options.Format != "W")
+ {
+ rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText()));
+ }
+ }
+ serializedAdditionalRawData = rawDataDictionary;
+ return new AudioTranscription(
+ text,
+ task,
+ language,
+ duration,
+ segments ?? new ChangeTrackingList(),
+ words ?? new ChangeTrackingList(),
+ serializedAdditionalRawData);
+ }
+
+ BinaryData IPersistableModel.Write(ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+
+ switch (format)
+ {
+ case "J":
+ return ModelReaderWriter.Write(this, options);
+ default:
+ throw new FormatException($"The model {nameof(AudioTranscription)} does not support writing '{options.Format}' format.");
+ }
+ }
+
+ AudioTranscription IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+
+ switch (format)
+ {
+ case "J":
+ {
+ using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions);
+ return DeserializeAudioTranscription(document.RootElement, options);
+ }
+ default:
+ throw new FormatException($"The model {nameof(AudioTranscription)} does not support reading '{options.Format}' format.");
+ }
+ }
+
+ string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J";
+
+ /// Deserializes the model from a raw response.
+ /// The response to deserialize the model from.
+ internal static AudioTranscription FromResponse(Response response)
+ {
+ using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions);
+ return DeserializeAudioTranscription(document.RootElement);
+ }
+
+ /// Convert into a .
+ internal virtual RequestContent ToRequestContent()
+ {
+ var content = new Utf8JsonRequestContent();
+ content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions);
+ return content;
+ }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscription.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscription.cs
new file mode 100644
index 000000000000..af332f94ac50
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscription.cs
@@ -0,0 +1,103 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.Collections.Generic;
+
+namespace Azure.AI.OpenAI
+{
+ /// Result information for an operation that transcribed spoken audio into written text.
+ public partial class AudioTranscription
+ {
+ ///
+ /// Keeps track of any properties unknown to the library.
+ ///
+ /// To assign an object to the value of this property use .
+ ///
+ ///
+ /// To assign an already formatted json string to this property use .
+ ///
+ ///
+ /// Examples:
+ ///
+ /// -
+ /// BinaryData.FromObjectAsJson("foo")
+ /// Creates a payload of "foo".
+ ///
+ /// -
+ /// BinaryData.FromString("\"foo\"")
+ /// Creates a payload of "foo".
+ ///
+ /// -
+ /// BinaryData.FromObjectAsJson(new { key = "value" })
+ /// Creates a payload of { "key": "value" }.
+ ///
+ /// -
+ /// BinaryData.FromString("{\"key\": \"value\"}")
+ /// Creates a payload of { "key": "value" }.
+ ///
+ ///
+ ///
+ ///
+ private IDictionary _serializedAdditionalRawData;
+
+ /// Initializes a new instance of .
+ /// The transcribed text for the provided audio data.
+ /// is null.
+ internal AudioTranscription(string text)
+ {
+ Argument.AssertNotNull(text, nameof(text));
+
+ Text = text;
+ Segments = new ChangeTrackingList();
+ Words = new ChangeTrackingList();
+ }
+
+ /// Initializes a new instance of .
+ /// The transcribed text for the provided audio data.
+ /// The label that describes which operation type generated the accompanying response data.
+ ///
+ /// The spoken language that was detected in the transcribed audio data.
+ /// This is expressed as a two-letter ISO-639-1 language code like 'en' or 'fr'.
+ ///
+ /// The total duration of the audio processed to produce accompanying transcription information.
+ /// A collection of information about the timing, probabilities, and other detail of each processed audio segment.
+ /// A collection of information about the timing of each processed word.
+ /// Keeps track of any properties unknown to the library.
+ internal AudioTranscription(string text, AudioTaskLabel? internalAudioTaskLabel, string language, TimeSpan? duration, IReadOnlyList segments, IReadOnlyList words, IDictionary serializedAdditionalRawData)
+ {
+ Text = text;
+ InternalAudioTaskLabel = internalAudioTaskLabel;
+ Language = language;
+ Duration = duration;
+ Segments = segments;
+ Words = words;
+ _serializedAdditionalRawData = serializedAdditionalRawData;
+ }
+
+ /// Initializes a new instance of for deserialization.
+ internal AudioTranscription()
+ {
+ }
+
+ /// The transcribed text for the provided audio data.
+ public string Text { get; }
+ /// The label that describes which operation type generated the accompanying response data.
+ public AudioTaskLabel? InternalAudioTaskLabel { get; }
+ ///
+ /// The spoken language that was detected in the transcribed audio data.
+ /// This is expressed as a two-letter ISO-639-1 language code like 'en' or 'fr'.
+ ///
+ public string Language { get; }
+ /// The total duration of the audio processed to produce accompanying transcription information.
+ public TimeSpan? Duration { get; }
+ /// A collection of information about the timing, probabilities, and other detail of each processed audio segment.
+ public IReadOnlyList Segments { get; }
+ /// A collection of information about the timing of each processed word.
+ public IReadOnlyList Words { get; }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionFormat.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionFormat.cs
new file mode 100644
index 000000000000..fc8228609c41
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionFormat.cs
@@ -0,0 +1,63 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.ComponentModel;
+
+namespace Azure.AI.OpenAI
+{
+ /// Defines available options for the underlying response format of output transcription information.
+ public readonly partial struct AudioTranscriptionFormat : IEquatable
+ {
+ private readonly string _value;
+
+ /// Initializes a new instance of .
+ /// is null.
+ public AudioTranscriptionFormat(string value)
+ {
+ _value = value ?? throw new ArgumentNullException(nameof(value));
+ }
+
+ private const string SimpleValue = "json";
+ private const string VerboseValue = "verbose_json";
+ private const string InternalPlainTextValue = "text";
+ private const string SrtValue = "srt";
+ private const string VttValue = "vtt";
+
+ /// Use a response body that is a JSON object containing a single 'text' field for the transcription.
+ public static AudioTranscriptionFormat Simple { get; } = new AudioTranscriptionFormat(SimpleValue);
+ ///
+ /// Use a response body that is a JSON object containing transcription text along with timing, segments, and other
+ /// metadata.
+ ///
+ public static AudioTranscriptionFormat Verbose { get; } = new AudioTranscriptionFormat(VerboseValue);
+ /// Use a response body that is plain text containing the raw, unannotated transcription.
+ public static AudioTranscriptionFormat InternalPlainText { get; } = new AudioTranscriptionFormat(InternalPlainTextValue);
+ /// Use a response body that is plain text in SubRip (SRT) format that also includes timing information.
+ public static AudioTranscriptionFormat Srt { get; } = new AudioTranscriptionFormat(SrtValue);
+ /// Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information.
+ public static AudioTranscriptionFormat Vtt { get; } = new AudioTranscriptionFormat(VttValue);
+ /// Determines if two values are the same.
+ public static bool operator ==(AudioTranscriptionFormat left, AudioTranscriptionFormat right) => left.Equals(right);
+ /// Determines if two values are not the same.
+ public static bool operator !=(AudioTranscriptionFormat left, AudioTranscriptionFormat right) => !left.Equals(right);
+ /// Converts a to a .
+ public static implicit operator AudioTranscriptionFormat(string value) => new AudioTranscriptionFormat(value);
+
+ ///
+ [EditorBrowsable(EditorBrowsableState.Never)]
+ public override bool Equals(object obj) => obj is AudioTranscriptionFormat other && Equals(other);
+ ///
+ public bool Equals(AudioTranscriptionFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase);
+
+ ///
+ [EditorBrowsable(EditorBrowsableState.Never)]
+ public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0;
+ ///
+ public override string ToString() => _value;
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionOptions.Serialization.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionOptions.Serialization.cs
new file mode 100644
index 000000000000..bb0d57649ff7
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionOptions.Serialization.cs
@@ -0,0 +1,313 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.ClientModel.Primitives;
+using System.Collections.Generic;
+using System.IO;
+using System.Text.Json;
+using Azure.Core;
+
+namespace Azure.AI.OpenAI
+{
+ public partial class AudioTranscriptionOptions : IUtf8JsonSerializable, IJsonModel
+ {
+ void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions);
+
+ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ writer.WriteStartObject();
+ JsonModelWriteCore(writer, options);
+ writer.WriteEndObject();
+ }
+
+ /// The JSON writer.
+ /// The client options for reading and writing models.
+ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AudioTranscriptionOptions)} does not support writing '{format}' format.");
+ }
+
+ writer.WritePropertyName("file"u8);
+#if NET6_0_OR_GREATER
+ writer.WriteRawValue(global::System.BinaryData.FromStream(AudioData));
+#else
+ using (JsonDocument document = JsonDocument.Parse(BinaryData.FromStream(AudioData), ModelSerializationExtensions.JsonDocumentOptions))
+ {
+ JsonSerializer.Serialize(writer, document.RootElement);
+ }
+#endif
+ if (Optional.IsDefined(Filename))
+ {
+ writer.WritePropertyName("filename"u8);
+ writer.WriteStringValue(Filename);
+ }
+ if (Optional.IsDefined(ResponseFormat))
+ {
+ writer.WritePropertyName("response_format"u8);
+ writer.WriteStringValue(ResponseFormat.Value.ToString());
+ }
+ if (Optional.IsDefined(Language))
+ {
+ writer.WritePropertyName("language"u8);
+ writer.WriteStringValue(Language);
+ }
+ if (Optional.IsDefined(Prompt))
+ {
+ writer.WritePropertyName("prompt"u8);
+ writer.WriteStringValue(Prompt);
+ }
+ if (Optional.IsDefined(Temperature))
+ {
+ writer.WritePropertyName("temperature"u8);
+ writer.WriteNumberValue(Temperature.Value);
+ }
+ if (Optional.IsCollectionDefined(TimestampGranularities))
+ {
+ writer.WritePropertyName("timestamp_granularities"u8);
+ writer.WriteStartArray();
+ foreach (var item in TimestampGranularities)
+ {
+ writer.WriteStringValue(item.ToString());
+ }
+ writer.WriteEndArray();
+ }
+ if (Optional.IsDefined(DeploymentName))
+ {
+ writer.WritePropertyName("model"u8);
+ writer.WriteStringValue(DeploymentName);
+ }
+ if (options.Format != "W" && _serializedAdditionalRawData != null)
+ {
+ foreach (var item in _serializedAdditionalRawData)
+ {
+ writer.WritePropertyName(item.Key);
+#if NET6_0_OR_GREATER
+ writer.WriteRawValue(item.Value);
+#else
+ using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions))
+ {
+ JsonSerializer.Serialize(writer, document.RootElement);
+ }
+#endif
+ }
+ }
+ }
+
+ AudioTranscriptionOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AudioTranscriptionOptions)} does not support reading '{format}' format.");
+ }
+
+ using JsonDocument document = JsonDocument.ParseValue(ref reader);
+ return DeserializeAudioTranscriptionOptions(document.RootElement, options);
+ }
+
+ internal static AudioTranscriptionOptions DeserializeAudioTranscriptionOptions(JsonElement element, ModelReaderWriterOptions options = null)
+ {
+ options ??= ModelSerializationExtensions.WireOptions;
+
+ if (element.ValueKind == JsonValueKind.Null)
+ {
+ return null;
+ }
+ Stream file = default;
+ string filename = default;
+ AudioTranscriptionFormat? responseFormat = default;
+ string language = default;
+ string prompt = default;
+ float? temperature = default;
+ IList timestampGranularities = default;
+ string model = default;
+ IDictionary serializedAdditionalRawData = default;
+ Dictionary rawDataDictionary = new Dictionary();
+ foreach (var property in element.EnumerateObject())
+ {
+ if (property.NameEquals("file"u8))
+ {
+ file = BinaryData.FromString(property.Value.GetRawText()).ToStream();
+ continue;
+ }
+ if (property.NameEquals("filename"u8))
+ {
+ filename = property.Value.GetString();
+ continue;
+ }
+ if (property.NameEquals("response_format"u8))
+ {
+ if (property.Value.ValueKind == JsonValueKind.Null)
+ {
+ continue;
+ }
+ responseFormat = new AudioTranscriptionFormat(property.Value.GetString());
+ continue;
+ }
+ if (property.NameEquals("language"u8))
+ {
+ language = property.Value.GetString();
+ continue;
+ }
+ if (property.NameEquals("prompt"u8))
+ {
+ prompt = property.Value.GetString();
+ continue;
+ }
+ if (property.NameEquals("temperature"u8))
+ {
+ if (property.Value.ValueKind == JsonValueKind.Null)
+ {
+ continue;
+ }
+ temperature = property.Value.GetSingle();
+ continue;
+ }
+ if (property.NameEquals("timestamp_granularities"u8))
+ {
+ if (property.Value.ValueKind == JsonValueKind.Null)
+ {
+ continue;
+ }
+ List array = new List();
+ foreach (var item in property.Value.EnumerateArray())
+ {
+ array.Add(new AudioTranscriptionTimestampGranularity(item.GetString()));
+ }
+ timestampGranularities = array;
+ continue;
+ }
+ if (property.NameEquals("model"u8))
+ {
+ model = property.Value.GetString();
+ continue;
+ }
+ if (options.Format != "W")
+ {
+ rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText()));
+ }
+ }
+ serializedAdditionalRawData = rawDataDictionary;
+ return new AudioTranscriptionOptions(
+ file,
+ filename,
+ responseFormat,
+ language,
+ prompt,
+ temperature,
+ timestampGranularities ?? new ChangeTrackingList(),
+ model,
+ serializedAdditionalRawData);
+ }
+
+ private BinaryData SerializeMultipart(ModelReaderWriterOptions options)
+ {
+ using MultipartFormDataRequestContent content = ToMultipartRequestContent();
+ using MemoryStream stream = new MemoryStream();
+ content.WriteTo(stream);
+ if (stream.Position > int.MaxValue)
+ {
+ return BinaryData.FromStream(stream);
+ }
+ else
+ {
+ return new BinaryData(stream.GetBuffer().AsMemory(0, (int)stream.Position));
+ }
+ }
+
+ internal virtual MultipartFormDataRequestContent ToMultipartRequestContent()
+ {
+ MultipartFormDataRequestContent content = new MultipartFormDataRequestContent();
+ content.Add(AudioData, "file", "file", "application/octet-stream");
+ if (Optional.IsDefined(Filename))
+ {
+ content.Add(Filename, "filename");
+ }
+ if (Optional.IsDefined(ResponseFormat))
+ {
+ content.Add(ResponseFormat.Value.ToString(), "response_format");
+ }
+ if (Optional.IsDefined(Language))
+ {
+ content.Add(Language, "language");
+ }
+ if (Optional.IsDefined(Prompt))
+ {
+ content.Add(Prompt, "prompt");
+ }
+ if (Optional.IsDefined(Temperature))
+ {
+ content.Add(Temperature.Value, "temperature");
+ }
+ if (Optional.IsCollectionDefined(TimestampGranularities))
+ {
+ foreach (AudioTranscriptionTimestampGranularity item in TimestampGranularities)
+ {
+ content.Add(item.ToString(), "timestamp_granularities");
+ }
+ }
+ if (Optional.IsDefined(DeploymentName))
+ {
+ content.Add(DeploymentName, "model");
+ }
+ return content;
+ }
+
+ BinaryData IPersistableModel.Write(ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+
+ switch (format)
+ {
+ case "J":
+ return ModelReaderWriter.Write(this, options);
+ case "MFD":
+ return SerializeMultipart(options);
+ default:
+ throw new FormatException($"The model {nameof(AudioTranscriptionOptions)} does not support writing '{options.Format}' format.");
+ }
+ }
+
+ AudioTranscriptionOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+
+ switch (format)
+ {
+ case "J":
+ {
+ using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions);
+ return DeserializeAudioTranscriptionOptions(document.RootElement, options);
+ }
+ default:
+ throw new FormatException($"The model {nameof(AudioTranscriptionOptions)} does not support reading '{options.Format}' format.");
+ }
+ }
+
+ string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "MFD";
+
+ /// Deserializes the model from a raw response.
+ /// The response to deserialize the model from.
+ internal static AudioTranscriptionOptions FromResponse(Response response)
+ {
+ using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions);
+ return DeserializeAudioTranscriptionOptions(document.RootElement);
+ }
+
+ /// Convert into a .
+ internal virtual RequestContent ToRequestContent()
+ {
+ var content = new Utf8JsonRequestContent();
+ content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions);
+ return content;
+ }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionOptions.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionOptions.cs
new file mode 100644
index 000000000000..e4c0a5294401
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionOptions.cs
@@ -0,0 +1,146 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.Collections.Generic;
+using System.IO;
+
+namespace Azure.AI.OpenAI
+{
+ /// The configuration information for an audio transcription request.
+ public partial class AudioTranscriptionOptions
+ {
+ ///
+ /// Keeps track of any properties unknown to the library.
+ ///
+ /// To assign an object to the value of this property use .
+ ///
+ ///
+ /// To assign an already formatted json string to this property use .
+ ///
+ ///
+ /// Examples:
+ ///
+ /// -
+ /// BinaryData.FromObjectAsJson("foo")
+ /// Creates a payload of "foo".
+ ///
+ /// -
+ /// BinaryData.FromString("\"foo\"")
+ /// Creates a payload of "foo".
+ ///
+ /// -
+ /// BinaryData.FromObjectAsJson(new { key = "value" })
+ /// Creates a payload of { "key": "value" }.
+ ///
+ /// -
+ /// BinaryData.FromString("{\"key\": \"value\"}")
+ /// Creates a payload of { "key": "value" }.
+ ///
+ ///
+ ///
+ ///
+ private IDictionary _serializedAdditionalRawData;
+
+ /// Initializes a new instance of .
+ ///
+ /// The audio data to transcribe. This must be the binary content of a file in one of the supported media formats:
+ /// flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.
+ ///
+ /// is null.
+ public AudioTranscriptionOptions(Stream audioData)
+ {
+ Argument.AssertNotNull(audioData, nameof(audioData));
+
+ AudioData = audioData;
+ TimestampGranularities = new ChangeTrackingList();
+ }
+
+ /// Initializes a new instance of .
+ ///
+ /// The audio data to transcribe. This must be the binary content of a file in one of the supported media formats:
+ /// flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.
+ ///
+ /// The optional filename or descriptive identifier to associate with with the audio data.
+ /// The requested format of the transcription response data, which will influence the content and detail of the result.
+ ///
+ /// The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code
+ /// such as 'en' or 'fr'.
+ /// Providing this known input language is optional but may improve the accuracy and/or latency of transcription.
+ ///
+ ///
+ /// An optional hint to guide the model's style or continue from a prior audio segment. The written language of the
+ /// prompt should match the primary spoken language of the audio data.
+ ///
+ ///
+ /// The sampling temperature, between 0 and 1.
+ /// Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
+ /// If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
+ ///
+ ///
+ /// The timestamp granularities to populate for this transcription.
+ /// `response_format` must be set `verbose_json` to use timestamp granularities.
+ /// Either or both of these options are supported: `word`, or `segment`.
+ /// Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.
+ ///
+ /// The model to use for this transcription request.
+ /// Keeps track of any properties unknown to the library.
+ internal AudioTranscriptionOptions(Stream audioData, string filename, AudioTranscriptionFormat? responseFormat, string language, string prompt, float? temperature, IList timestampGranularities, string deploymentName, IDictionary serializedAdditionalRawData)
+ {
+ AudioData = audioData;
+ Filename = filename;
+ ResponseFormat = responseFormat;
+ Language = language;
+ Prompt = prompt;
+ Temperature = temperature;
+ TimestampGranularities = timestampGranularities;
+ DeploymentName = deploymentName;
+ _serializedAdditionalRawData = serializedAdditionalRawData;
+ }
+
+ /// Initializes a new instance of for deserialization.
+ internal AudioTranscriptionOptions()
+ {
+ }
+
+ ///
+ /// The audio data to transcribe. This must be the binary content of a file in one of the supported media formats:
+ /// flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.
+ ///
+ public Stream AudioData { get; }
+ /// The optional filename or descriptive identifier to associate with with the audio data.
+ public string Filename { get; set; }
+ /// The requested format of the transcription response data, which will influence the content and detail of the result.
+ public AudioTranscriptionFormat? ResponseFormat { get; set; }
+ ///
+ /// The primary spoken language of the audio data to be transcribed, supplied as a two-letter ISO-639-1 language code
+ /// such as 'en' or 'fr'.
+ /// Providing this known input language is optional but may improve the accuracy and/or latency of transcription.
+ ///
+ public string Language { get; set; }
+ ///
+ /// An optional hint to guide the model's style or continue from a prior audio segment. The written language of the
+ /// prompt should match the primary spoken language of the audio data.
+ ///
+ public string Prompt { get; set; }
+ ///
+ /// The sampling temperature, between 0 and 1.
+ /// Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
+ /// If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.
+ ///
+ public float? Temperature { get; set; }
+ ///
+ /// The timestamp granularities to populate for this transcription.
+ /// `response_format` must be set `verbose_json` to use timestamp granularities.
+ /// Either or both of these options are supported: `word`, or `segment`.
+ /// Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.
+ ///
+ public IList TimestampGranularities { get; }
+ /// The model to use for this transcription request.
+ public string DeploymentName { get; set; }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionSegment.Serialization.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionSegment.Serialization.cs
new file mode 100644
index 000000000000..cdef69b02e69
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionSegment.Serialization.cs
@@ -0,0 +1,235 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.ClientModel.Primitives;
+using System.Collections.Generic;
+using System.Text.Json;
+using Azure.Core;
+
+namespace Azure.AI.OpenAI
+{
+ public partial class AudioTranscriptionSegment : IUtf8JsonSerializable, IJsonModel
+ {
+ void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions);
+
+ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ writer.WriteStartObject();
+ JsonModelWriteCore(writer, options);
+ writer.WriteEndObject();
+ }
+
+ /// The JSON writer.
+ /// The client options for reading and writing models.
+ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AudioTranscriptionSegment)} does not support writing '{format}' format.");
+ }
+
+ writer.WritePropertyName("id"u8);
+ writer.WriteNumberValue(Id);
+ writer.WritePropertyName("start"u8);
+ writer.WriteNumberValue(Convert.ToDouble(Start.ToString("s\\.FFF")));
+ writer.WritePropertyName("end"u8);
+ writer.WriteNumberValue(Convert.ToDouble(End.ToString("s\\.FFF")));
+ writer.WritePropertyName("text"u8);
+ writer.WriteStringValue(Text);
+ writer.WritePropertyName("temperature"u8);
+ writer.WriteNumberValue(Temperature);
+ writer.WritePropertyName("avg_logprob"u8);
+ writer.WriteNumberValue(AverageLogProbability);
+ writer.WritePropertyName("compression_ratio"u8);
+ writer.WriteNumberValue(CompressionRatio);
+ writer.WritePropertyName("no_speech_prob"u8);
+ writer.WriteNumberValue(NoSpeechProbability);
+ writer.WritePropertyName("tokens"u8);
+ writer.WriteStartArray();
+ foreach (var item in Tokens)
+ {
+ writer.WriteNumberValue(item);
+ }
+ writer.WriteEndArray();
+ writer.WritePropertyName("seek"u8);
+ writer.WriteNumberValue(Seek);
+ if (options.Format != "W" && _serializedAdditionalRawData != null)
+ {
+ foreach (var item in _serializedAdditionalRawData)
+ {
+ writer.WritePropertyName(item.Key);
+#if NET6_0_OR_GREATER
+ writer.WriteRawValue(item.Value);
+#else
+ using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions))
+ {
+ JsonSerializer.Serialize(writer, document.RootElement);
+ }
+#endif
+ }
+ }
+ }
+
+ AudioTranscriptionSegment IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AudioTranscriptionSegment)} does not support reading '{format}' format.");
+ }
+
+ using JsonDocument document = JsonDocument.ParseValue(ref reader);
+ return DeserializeAudioTranscriptionSegment(document.RootElement, options);
+ }
+
+ internal static AudioTranscriptionSegment DeserializeAudioTranscriptionSegment(JsonElement element, ModelReaderWriterOptions options = null)
+ {
+ options ??= ModelSerializationExtensions.WireOptions;
+
+ if (element.ValueKind == JsonValueKind.Null)
+ {
+ return null;
+ }
+ int id = default;
+ TimeSpan start = default;
+ TimeSpan end = default;
+ string text = default;
+ float temperature = default;
+ float avgLogprob = default;
+ float compressionRatio = default;
+ float noSpeechProb = default;
+ IReadOnlyList tokens = default;
+ int seek = default;
+ IDictionary serializedAdditionalRawData = default;
+ Dictionary rawDataDictionary = new Dictionary();
+ foreach (var property in element.EnumerateObject())
+ {
+ if (property.NameEquals("id"u8))
+ {
+ id = property.Value.GetInt32();
+ continue;
+ }
+ if (property.NameEquals("start"u8))
+ {
+ start = TimeSpan.FromSeconds(property.Value.GetDouble());
+ continue;
+ }
+ if (property.NameEquals("end"u8))
+ {
+ end = TimeSpan.FromSeconds(property.Value.GetDouble());
+ continue;
+ }
+ if (property.NameEquals("text"u8))
+ {
+ text = property.Value.GetString();
+ continue;
+ }
+ if (property.NameEquals("temperature"u8))
+ {
+ temperature = property.Value.GetSingle();
+ continue;
+ }
+ if (property.NameEquals("avg_logprob"u8))
+ {
+ avgLogprob = property.Value.GetSingle();
+ continue;
+ }
+ if (property.NameEquals("compression_ratio"u8))
+ {
+ compressionRatio = property.Value.GetSingle();
+ continue;
+ }
+ if (property.NameEquals("no_speech_prob"u8))
+ {
+ noSpeechProb = property.Value.GetSingle();
+ continue;
+ }
+ if (property.NameEquals("tokens"u8))
+ {
+ List array = new List();
+ foreach (var item in property.Value.EnumerateArray())
+ {
+ array.Add(item.GetInt32());
+ }
+ tokens = array;
+ continue;
+ }
+ if (property.NameEquals("seek"u8))
+ {
+ seek = property.Value.GetInt32();
+ continue;
+ }
+ if (options.Format != "W")
+ {
+ rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText()));
+ }
+ }
+ serializedAdditionalRawData = rawDataDictionary;
+ return new AudioTranscriptionSegment(
+ id,
+ start,
+ end,
+ text,
+ temperature,
+ avgLogprob,
+ compressionRatio,
+ noSpeechProb,
+ tokens,
+ seek,
+ serializedAdditionalRawData);
+ }
+
+ BinaryData IPersistableModel.Write(ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+
+ switch (format)
+ {
+ case "J":
+ return ModelReaderWriter.Write(this, options);
+ default:
+ throw new FormatException($"The model {nameof(AudioTranscriptionSegment)} does not support writing '{options.Format}' format.");
+ }
+ }
+
+ AudioTranscriptionSegment IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+
+ switch (format)
+ {
+ case "J":
+ {
+ using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions);
+ return DeserializeAudioTranscriptionSegment(document.RootElement, options);
+ }
+ default:
+ throw new FormatException($"The model {nameof(AudioTranscriptionSegment)} does not support reading '{options.Format}' format.");
+ }
+ }
+
+ string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J";
+
+ /// Deserializes the model from a raw response.
+ /// The response to deserialize the model from.
+ internal static AudioTranscriptionSegment FromResponse(Response response)
+ {
+ using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions);
+ return DeserializeAudioTranscriptionSegment(document.RootElement);
+ }
+
+ /// Convert into a .
+ internal virtual RequestContent ToRequestContent()
+ {
+ var content = new Utf8JsonRequestContent();
+ content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions);
+ return content;
+ }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionSegment.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionSegment.cs
new file mode 100644
index 000000000000..e1dd9c8fad59
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionSegment.cs
@@ -0,0 +1,153 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.Collections.Generic;
+using System.Linq;
+
+namespace Azure.AI.OpenAI
+{
+ ///
+ /// Extended information about a single segment of transcribed audio data.
+ /// Segments generally represent roughly 5-10 seconds of speech. Segment boundaries typically occur between words but not
+ /// necessarily sentences.
+ ///
+ public partial class AudioTranscriptionSegment
+ {
+ ///
+ /// Keeps track of any properties unknown to the library.
+ ///
+ /// To assign an object to the value of this property use .
+ ///
+ ///
+ /// To assign an already formatted json string to this property use .
+ ///
+ ///
+ /// Examples:
+ ///
+ /// -
+ /// BinaryData.FromObjectAsJson("foo")
+ /// Creates a payload of "foo".
+ ///
+ /// -
+ /// BinaryData.FromString("\"foo\"")
+ /// Creates a payload of "foo".
+ ///
+ /// -
+ /// BinaryData.FromObjectAsJson(new { key = "value" })
+ /// Creates a payload of { "key": "value" }.
+ ///
+ /// -
+ /// BinaryData.FromString("{\"key\": \"value\"}")
+ /// Creates a payload of { "key": "value" }.
+ ///
+ ///
+ ///
+ ///
+ private IDictionary _serializedAdditionalRawData;
+
+ /// Initializes a new instance of .
+ /// The 0-based index of this segment within a transcription.
+ /// The time at which this segment started relative to the beginning of the transcribed audio.
+ /// The time at which this segment ended relative to the beginning of the transcribed audio.
+ /// The transcribed text that was part of this audio segment.
+ /// The temperature score associated with this audio segment.
+ /// The average log probability associated with this audio segment.
+ /// The compression ratio of this audio segment.
+ /// The probability of no speech detection within this audio segment.
+ /// The token IDs matching the transcribed text in this audio segment.
+ ///
+ /// The seek position associated with the processing of this audio segment.
+ /// Seek positions are expressed as hundredths of seconds.
+ /// The model may process several segments from a single seek position, so while the seek position will never represent
+ /// a later time than the segment's start, the segment's start may represent a significantly later time than the
+ /// segment's associated seek position.
+ ///
+ /// or is null.
+ internal AudioTranscriptionSegment(int id, TimeSpan start, TimeSpan end, string text, float temperature, float averageLogProbability, float compressionRatio, float noSpeechProbability, IEnumerable tokens, int seek)
+ {
+ Argument.AssertNotNull(text, nameof(text));
+ Argument.AssertNotNull(tokens, nameof(tokens));
+
+ Id = id;
+ Start = start;
+ End = end;
+ Text = text;
+ Temperature = temperature;
+ AverageLogProbability = averageLogProbability;
+ CompressionRatio = compressionRatio;
+ NoSpeechProbability = noSpeechProbability;
+ Tokens = tokens.ToList();
+ Seek = seek;
+ }
+
+ /// Initializes a new instance of .
+ /// The 0-based index of this segment within a transcription.
+ /// The time at which this segment started relative to the beginning of the transcribed audio.
+ /// The time at which this segment ended relative to the beginning of the transcribed audio.
+ /// The transcribed text that was part of this audio segment.
+ /// The temperature score associated with this audio segment.
+ /// The average log probability associated with this audio segment.
+ /// The compression ratio of this audio segment.
+ /// The probability of no speech detection within this audio segment.
+ /// The token IDs matching the transcribed text in this audio segment.
+ ///
+ /// The seek position associated with the processing of this audio segment.
+ /// Seek positions are expressed as hundredths of seconds.
+ /// The model may process several segments from a single seek position, so while the seek position will never represent
+ /// a later time than the segment's start, the segment's start may represent a significantly later time than the
+ /// segment's associated seek position.
+ ///
+ /// Keeps track of any properties unknown to the library.
+ internal AudioTranscriptionSegment(int id, TimeSpan start, TimeSpan end, string text, float temperature, float averageLogProbability, float compressionRatio, float noSpeechProbability, IReadOnlyList tokens, int seek, IDictionary serializedAdditionalRawData)
+ {
+ Id = id;
+ Start = start;
+ End = end;
+ Text = text;
+ Temperature = temperature;
+ AverageLogProbability = averageLogProbability;
+ CompressionRatio = compressionRatio;
+ NoSpeechProbability = noSpeechProbability;
+ Tokens = tokens;
+ Seek = seek;
+ _serializedAdditionalRawData = serializedAdditionalRawData;
+ }
+
+ /// Initializes a new instance of for deserialization.
+ internal AudioTranscriptionSegment()
+ {
+ }
+
+ /// The 0-based index of this segment within a transcription.
+ public int Id { get; }
+ /// The time at which this segment started relative to the beginning of the transcribed audio.
+ public TimeSpan Start { get; }
+ /// The time at which this segment ended relative to the beginning of the transcribed audio.
+ public TimeSpan End { get; }
+ /// The transcribed text that was part of this audio segment.
+ public string Text { get; }
+ /// The temperature score associated with this audio segment.
+ public float Temperature { get; }
+ /// The average log probability associated with this audio segment.
+ public float AverageLogProbability { get; }
+ /// The compression ratio of this audio segment.
+ public float CompressionRatio { get; }
+ /// The probability of no speech detection within this audio segment.
+ public float NoSpeechProbability { get; }
+ /// The token IDs matching the transcribed text in this audio segment.
+ public IReadOnlyList Tokens { get; }
+ ///
+ /// The seek position associated with the processing of this audio segment.
+ /// Seek positions are expressed as hundredths of seconds.
+ /// The model may process several segments from a single seek position, so while the seek position will never represent
+ /// a later time than the segment's start, the segment's start may represent a significantly later time than the
+ /// segment's associated seek position.
+ ///
+ public int Seek { get; }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionTimestampGranularity.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionTimestampGranularity.cs
new file mode 100644
index 000000000000..b0ae51145060
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionTimestampGranularity.cs
@@ -0,0 +1,57 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.ComponentModel;
+
+namespace Azure.AI.OpenAI
+{
+ /// Defines the timestamp granularities that can be requested on a verbose transcription response.
+ public readonly partial struct AudioTranscriptionTimestampGranularity : IEquatable
+ {
+ private readonly string _value;
+
+ /// Initializes a new instance of .
+ /// is null.
+ public AudioTranscriptionTimestampGranularity(string value)
+ {
+ _value = value ?? throw new ArgumentNullException(nameof(value));
+ }
+
+ private const string WordValue = "word";
+ private const string SegmentValue = "segment";
+
+ ///
+ /// Indicates that responses should include timing information about each transcribed word. Note that generating word
+ /// timestamp information will incur additional response latency.
+ ///
+ public static AudioTranscriptionTimestampGranularity Word { get; } = new AudioTranscriptionTimestampGranularity(WordValue);
+ ///
+ /// Indicates that responses should include timing and other information about each transcribed audio segment. Audio
+ /// segment timestamp information does not incur any additional latency.
+ ///
+ public static AudioTranscriptionTimestampGranularity Segment { get; } = new AudioTranscriptionTimestampGranularity(SegmentValue);
+ /// Determines if two values are the same.
+ public static bool operator ==(AudioTranscriptionTimestampGranularity left, AudioTranscriptionTimestampGranularity right) => left.Equals(right);
+ /// Determines if two values are not the same.
+ public static bool operator !=(AudioTranscriptionTimestampGranularity left, AudioTranscriptionTimestampGranularity right) => !left.Equals(right);
+ /// Converts a to a .
+ public static implicit operator AudioTranscriptionTimestampGranularity(string value) => new AudioTranscriptionTimestampGranularity(value);
+
+ ///
+ [EditorBrowsable(EditorBrowsableState.Never)]
+ public override bool Equals(object obj) => obj is AudioTranscriptionTimestampGranularity other && Equals(other);
+ ///
+ public bool Equals(AudioTranscriptionTimestampGranularity other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase);
+
+ ///
+ [EditorBrowsable(EditorBrowsableState.Never)]
+ public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0;
+ ///
+ public override string ToString() => _value;
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionWord.Serialization.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionWord.Serialization.cs
new file mode 100644
index 000000000000..fad645758a38
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionWord.Serialization.cs
@@ -0,0 +1,158 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.ClientModel.Primitives;
+using System.Collections.Generic;
+using System.Text.Json;
+using Azure.Core;
+
+namespace Azure.AI.OpenAI
+{
+ public partial class AudioTranscriptionWord : IUtf8JsonSerializable, IJsonModel
+ {
+ void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions);
+
+ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ writer.WriteStartObject();
+ JsonModelWriteCore(writer, options);
+ writer.WriteEndObject();
+ }
+
+ /// The JSON writer.
+ /// The client options for reading and writing models.
+ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AudioTranscriptionWord)} does not support writing '{format}' format.");
+ }
+
+ writer.WritePropertyName("word"u8);
+ writer.WriteStringValue(Word);
+ writer.WritePropertyName("start"u8);
+ writer.WriteNumberValue(Convert.ToDouble(Start.ToString("s\\.FFF")));
+ writer.WritePropertyName("end"u8);
+ writer.WriteNumberValue(Convert.ToDouble(End.ToString("s\\.FFF")));
+ if (options.Format != "W" && _serializedAdditionalRawData != null)
+ {
+ foreach (var item in _serializedAdditionalRawData)
+ {
+ writer.WritePropertyName(item.Key);
+#if NET6_0_OR_GREATER
+ writer.WriteRawValue(item.Value);
+#else
+ using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions))
+ {
+ JsonSerializer.Serialize(writer, document.RootElement);
+ }
+#endif
+ }
+ }
+ }
+
+ AudioTranscriptionWord IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AudioTranscriptionWord)} does not support reading '{format}' format.");
+ }
+
+ using JsonDocument document = JsonDocument.ParseValue(ref reader);
+ return DeserializeAudioTranscriptionWord(document.RootElement, options);
+ }
+
+ internal static AudioTranscriptionWord DeserializeAudioTranscriptionWord(JsonElement element, ModelReaderWriterOptions options = null)
+ {
+ options ??= ModelSerializationExtensions.WireOptions;
+
+ if (element.ValueKind == JsonValueKind.Null)
+ {
+ return null;
+ }
+ string word = default;
+ TimeSpan start = default;
+ TimeSpan end = default;
+ IDictionary serializedAdditionalRawData = default;
+ Dictionary rawDataDictionary = new Dictionary();
+ foreach (var property in element.EnumerateObject())
+ {
+ if (property.NameEquals("word"u8))
+ {
+ word = property.Value.GetString();
+ continue;
+ }
+ if (property.NameEquals("start"u8))
+ {
+ start = TimeSpan.FromSeconds(property.Value.GetDouble());
+ continue;
+ }
+ if (property.NameEquals("end"u8))
+ {
+ end = TimeSpan.FromSeconds(property.Value.GetDouble());
+ continue;
+ }
+ if (options.Format != "W")
+ {
+ rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText()));
+ }
+ }
+ serializedAdditionalRawData = rawDataDictionary;
+ return new AudioTranscriptionWord(word, start, end, serializedAdditionalRawData);
+ }
+
+ BinaryData IPersistableModel.Write(ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+
+ switch (format)
+ {
+ case "J":
+ return ModelReaderWriter.Write(this, options);
+ default:
+ throw new FormatException($"The model {nameof(AudioTranscriptionWord)} does not support writing '{options.Format}' format.");
+ }
+ }
+
+ AudioTranscriptionWord IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+
+ switch (format)
+ {
+ case "J":
+ {
+ using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions);
+ return DeserializeAudioTranscriptionWord(document.RootElement, options);
+ }
+ default:
+ throw new FormatException($"The model {nameof(AudioTranscriptionWord)} does not support reading '{options.Format}' format.");
+ }
+ }
+
+ string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J";
+
+ /// Deserializes the model from a raw response.
+ /// The response to deserialize the model from.
+ internal static AudioTranscriptionWord FromResponse(Response response)
+ {
+ using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions);
+ return DeserializeAudioTranscriptionWord(document.RootElement);
+ }
+
+ /// Convert into a .
+ internal virtual RequestContent ToRequestContent()
+ {
+ var content = new Utf8JsonRequestContent();
+ content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions);
+ return content;
+ }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionWord.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionWord.cs
new file mode 100644
index 000000000000..9fa92dfadb66
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionWord.cs
@@ -0,0 +1,87 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.Collections.Generic;
+
+namespace Azure.AI.OpenAI
+{
+ /// Extended information about a single transcribed word, as provided on responses when the 'word' timestamp granularity is provided.
+ public partial class AudioTranscriptionWord
+ {
+ ///
+ /// Keeps track of any properties unknown to the library.
+ ///
+ /// To assign an object to the value of this property use .
+ ///
+ ///
+ /// To assign an already formatted json string to this property use .
+ ///
+ ///
+ /// Examples:
+ ///
+ /// -
+ /// BinaryData.FromObjectAsJson("foo")
+ /// Creates a payload of "foo".
+ ///
+ /// -
+ /// BinaryData.FromString("\"foo\"")
+ /// Creates a payload of "foo".
+ ///
+ /// -
+ /// BinaryData.FromObjectAsJson(new { key = "value" })
+ /// Creates a payload of { "key": "value" }.
+ ///
+ /// -
+ /// BinaryData.FromString("{\"key\": \"value\"}")
+ /// Creates a payload of { "key": "value" }.
+ ///
+ ///
+ ///
+ ///
+ private IDictionary _serializedAdditionalRawData;
+
+ /// Initializes a new instance of .
+ /// The textual content of the word.
+ /// The start time of the word relative to the beginning of the audio, expressed in seconds.
+ /// The end time of the word relative to the beginning of the audio, expressed in seconds.
+ /// is null.
+ internal AudioTranscriptionWord(string word, TimeSpan start, TimeSpan end)
+ {
+ Argument.AssertNotNull(word, nameof(word));
+
+ Word = word;
+ Start = start;
+ End = end;
+ }
+
+ /// Initializes a new instance of .
+ /// The textual content of the word.
+ /// The start time of the word relative to the beginning of the audio, expressed in seconds.
+ /// The end time of the word relative to the beginning of the audio, expressed in seconds.
+ /// Keeps track of any properties unknown to the library.
+ internal AudioTranscriptionWord(string word, TimeSpan start, TimeSpan end, IDictionary serializedAdditionalRawData)
+ {
+ Word = word;
+ Start = start;
+ End = end;
+ _serializedAdditionalRawData = serializedAdditionalRawData;
+ }
+
+ /// Initializes a new instance of for deserialization.
+ internal AudioTranscriptionWord()
+ {
+ }
+
+ /// The textual content of the word.
+ public string Word { get; }
+ /// The start time of the word relative to the beginning of the audio, expressed in seconds.
+ public TimeSpan Start { get; }
+ /// The end time of the word relative to the beginning of the audio, expressed in seconds.
+ public TimeSpan End { get; }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslation.Serialization.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslation.Serialization.cs
new file mode 100644
index 000000000000..b97c051403e6
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslation.Serialization.cs
@@ -0,0 +1,214 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.ClientModel.Primitives;
+using System.Collections.Generic;
+using System.Text.Json;
+using Azure.Core;
+
+namespace Azure.AI.OpenAI
+{
+ public partial class AudioTranslation : IUtf8JsonSerializable, IJsonModel
+ {
+ void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions);
+
+ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ writer.WriteStartObject();
+ JsonModelWriteCore(writer, options);
+ writer.WriteEndObject();
+ }
+
+ /// The JSON writer.
+ /// The client options for reading and writing models.
+ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AudioTranslation)} does not support writing '{format}' format.");
+ }
+
+ writer.WritePropertyName("text"u8);
+ writer.WriteStringValue(Text);
+ if (Optional.IsDefined(InternalAudioTaskLabel))
+ {
+ writer.WritePropertyName("task"u8);
+ writer.WriteStringValue(InternalAudioTaskLabel.Value.ToString());
+ }
+ if (Optional.IsDefined(Language))
+ {
+ writer.WritePropertyName("language"u8);
+ writer.WriteStringValue(Language);
+ }
+ if (Optional.IsDefined(Duration))
+ {
+ writer.WritePropertyName("duration"u8);
+ writer.WriteNumberValue(Convert.ToDouble(Duration.Value.ToString("s\\.FFF")));
+ }
+ if (Optional.IsCollectionDefined(Segments))
+ {
+ writer.WritePropertyName("segments"u8);
+ writer.WriteStartArray();
+ foreach (var item in Segments)
+ {
+ writer.WriteObjectValue(item, options);
+ }
+ writer.WriteEndArray();
+ }
+ if (options.Format != "W" && _serializedAdditionalRawData != null)
+ {
+ foreach (var item in _serializedAdditionalRawData)
+ {
+ writer.WritePropertyName(item.Key);
+#if NET6_0_OR_GREATER
+ writer.WriteRawValue(item.Value);
+#else
+ using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions))
+ {
+ JsonSerializer.Serialize(writer, document.RootElement);
+ }
+#endif
+ }
+ }
+ }
+
+ AudioTranslation IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AudioTranslation)} does not support reading '{format}' format.");
+ }
+
+ using JsonDocument document = JsonDocument.ParseValue(ref reader);
+ return DeserializeAudioTranslation(document.RootElement, options);
+ }
+
+ internal static AudioTranslation DeserializeAudioTranslation(JsonElement element, ModelReaderWriterOptions options = null)
+ {
+ options ??= ModelSerializationExtensions.WireOptions;
+
+ if (element.ValueKind == JsonValueKind.Null)
+ {
+ return null;
+ }
+ string text = default;
+ AudioTaskLabel? task = default;
+ string language = default;
+ TimeSpan? duration = default;
+ IReadOnlyList segments = default;
+ IDictionary serializedAdditionalRawData = default;
+ Dictionary rawDataDictionary = new Dictionary();
+ foreach (var property in element.EnumerateObject())
+ {
+ if (property.NameEquals("text"u8))
+ {
+ text = property.Value.GetString();
+ continue;
+ }
+ if (property.NameEquals("task"u8))
+ {
+ if (property.Value.ValueKind == JsonValueKind.Null)
+ {
+ continue;
+ }
+ task = new AudioTaskLabel(property.Value.GetString());
+ continue;
+ }
+ if (property.NameEquals("language"u8))
+ {
+ language = property.Value.GetString();
+ continue;
+ }
+ if (property.NameEquals("duration"u8))
+ {
+ if (property.Value.ValueKind == JsonValueKind.Null)
+ {
+ continue;
+ }
+ duration = TimeSpan.FromSeconds(property.Value.GetDouble());
+ continue;
+ }
+ if (property.NameEquals("segments"u8))
+ {
+ if (property.Value.ValueKind == JsonValueKind.Null)
+ {
+ continue;
+ }
+ List array = new List();
+ foreach (var item in property.Value.EnumerateArray())
+ {
+ array.Add(AudioTranslationSegment.DeserializeAudioTranslationSegment(item, options));
+ }
+ segments = array;
+ continue;
+ }
+ if (options.Format != "W")
+ {
+ rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText()));
+ }
+ }
+ serializedAdditionalRawData = rawDataDictionary;
+ return new AudioTranslation(
+ text,
+ task,
+ language,
+ duration,
+ segments ?? new ChangeTrackingList(),
+ serializedAdditionalRawData);
+ }
+
+ BinaryData IPersistableModel.Write(ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+
+ switch (format)
+ {
+ case "J":
+ return ModelReaderWriter.Write(this, options);
+ default:
+ throw new FormatException($"The model {nameof(AudioTranslation)} does not support writing '{options.Format}' format.");
+ }
+ }
+
+ AudioTranslation IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+
+ switch (format)
+ {
+ case "J":
+ {
+ using JsonDocument document = JsonDocument.Parse(data, ModelSerializationExtensions.JsonDocumentOptions);
+ return DeserializeAudioTranslation(document.RootElement, options);
+ }
+ default:
+ throw new FormatException($"The model {nameof(AudioTranslation)} does not support reading '{options.Format}' format.");
+ }
+ }
+
+ string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J";
+
+ /// Deserializes the model from a raw response.
+ /// The response to deserialize the model from.
+ internal static AudioTranslation FromResponse(Response response)
+ {
+ using var document = JsonDocument.Parse(response.Content, ModelSerializationExtensions.JsonDocumentOptions);
+ return DeserializeAudioTranslation(document.RootElement);
+ }
+
+ /// Convert into a .
+ internal virtual RequestContent ToRequestContent()
+ {
+ var content = new Utf8JsonRequestContent();
+ content.JsonWriter.WriteObjectValue(this, ModelSerializationExtensions.WireOptions);
+ return content;
+ }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslation.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslation.cs
new file mode 100644
index 000000000000..476c481e3824
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslation.cs
@@ -0,0 +1,98 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.Collections.Generic;
+
+namespace Azure.AI.OpenAI
+{
+ /// Result information for an operation that translated spoken audio into written text.
+ public partial class AudioTranslation
+ {
+ ///
+ /// Keeps track of any properties unknown to the library.
+ ///
+ /// To assign an object to the value of this property use .
+ ///
+ ///
+ /// To assign an already formatted json string to this property use .
+ ///
+ ///
+ /// Examples:
+ ///
+ /// -
+ /// BinaryData.FromObjectAsJson("foo")
+ /// Creates a payload of "foo".
+ ///
+ /// -
+ /// BinaryData.FromString("\"foo\"")
+ /// Creates a payload of "foo".
+ ///
+ /// -
+ /// BinaryData.FromObjectAsJson(new { key = "value" })
+ /// Creates a payload of { "key": "value" }.
+ ///
+ /// -
+ /// BinaryData.FromString("{\"key\": \"value\"}")
+ /// Creates a payload of { "key": "value" }.
+ ///
+ ///
+ ///
+ ///
+ private IDictionary _serializedAdditionalRawData;
+
+ /// Initializes a new instance of .
+ /// The translated text for the provided audio data.
+ /// is null.
+ internal AudioTranslation(string text)
+ {
+ Argument.AssertNotNull(text, nameof(text));
+
+ Text = text;
+ Segments = new ChangeTrackingList();
+ }
+
+ /// Initializes a new instance of .
+ /// The translated text for the provided audio data.
+ /// The label that describes which operation type generated the accompanying response data.
+ ///
+ /// The spoken language that was detected in the translated audio data.
+ /// This is expressed as a two-letter ISO-639-1 language code like 'en' or 'fr'.
+ ///
+ /// The total duration of the audio processed to produce accompanying translation information.
+ /// A collection of information about the timing, probabilities, and other detail of each processed audio segment.
+ /// Keeps track of any properties unknown to the library.
+ internal AudioTranslation(string text, AudioTaskLabel? internalAudioTaskLabel, string language, TimeSpan? duration, IReadOnlyList segments, IDictionary serializedAdditionalRawData)
+ {
+ Text = text;
+ InternalAudioTaskLabel = internalAudioTaskLabel;
+ Language = language;
+ Duration = duration;
+ Segments = segments;
+ _serializedAdditionalRawData = serializedAdditionalRawData;
+ }
+
+ /// Initializes a new instance of for deserialization.
+ internal AudioTranslation()
+ {
+ }
+
+ /// The translated text for the provided audio data.
+ public string Text { get; }
+ /// The label that describes which operation type generated the accompanying response data.
+ public AudioTaskLabel? InternalAudioTaskLabel { get; }
+ ///
+ /// The spoken language that was detected in the translated audio data.
+ /// This is expressed as a two-letter ISO-639-1 language code like 'en' or 'fr'.
+ ///
+ public string Language { get; }
+ /// The total duration of the audio processed to produce accompanying translation information.
+ public TimeSpan? Duration { get; }
+ /// A collection of information about the timing, probabilities, and other detail of each processed audio segment.
+ public IReadOnlyList Segments { get; }
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslationFormat.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslationFormat.cs
new file mode 100644
index 000000000000..0c8068cb6afe
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslationFormat.cs
@@ -0,0 +1,63 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.ComponentModel;
+
+namespace Azure.AI.OpenAI
+{
+ /// Defines available options for the underlying response format of output translation information.
+ public readonly partial struct AudioTranslationFormat : IEquatable
+ {
+ private readonly string _value;
+
+ /// Initializes a new instance of .
+ /// is null.
+ public AudioTranslationFormat(string value)
+ {
+ _value = value ?? throw new ArgumentNullException(nameof(value));
+ }
+
+ private const string SimpleValue = "json";
+ private const string VerboseValue = "verbose_json";
+ private const string InternalPlainTextValue = "text";
+ private const string SrtValue = "srt";
+ private const string VttValue = "vtt";
+
+ /// Use a response body that is a JSON object containing a single 'text' field for the translation.
+ public static AudioTranslationFormat Simple { get; } = new AudioTranslationFormat(SimpleValue);
+ ///
+ /// Use a response body that is a JSON object containing translation text along with timing, segments, and other
+ /// metadata.
+ ///
+ public static AudioTranslationFormat Verbose { get; } = new AudioTranslationFormat(VerboseValue);
+ /// Use a response body that is plain text containing the raw, unannotated translation.
+ public static AudioTranslationFormat InternalPlainText { get; } = new AudioTranslationFormat(InternalPlainTextValue);
+ /// Use a response body that is plain text in SubRip (SRT) format that also includes timing information.
+ public static AudioTranslationFormat Srt { get; } = new AudioTranslationFormat(SrtValue);
+ /// Use a response body that is plain text in Web Video Text Tracks (VTT) format that also includes timing information.
+ public static AudioTranslationFormat Vtt { get; } = new AudioTranslationFormat(VttValue);
+ /// Determines if two values are the same.
+ public static bool operator ==(AudioTranslationFormat left, AudioTranslationFormat right) => left.Equals(right);
+ /// Determines if two values are not the same.
+ public static bool operator !=(AudioTranslationFormat left, AudioTranslationFormat right) => !left.Equals(right);
+ /// Converts a to a .
+ public static implicit operator AudioTranslationFormat(string value) => new AudioTranslationFormat(value);
+
+ ///
+ [EditorBrowsable(EditorBrowsableState.Never)]
+ public override bool Equals(object obj) => obj is AudioTranslationFormat other && Equals(other);
+ ///
+ public bool Equals(AudioTranslationFormat other) => string.Equals(_value, other._value, StringComparison.InvariantCultureIgnoreCase);
+
+ ///
+ [EditorBrowsable(EditorBrowsableState.Never)]
+ public override int GetHashCode() => _value != null ? StringComparer.InvariantCultureIgnoreCase.GetHashCode(_value) : 0;
+ ///
+ public override string ToString() => _value;
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslationOptions.Serialization.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslationOptions.Serialization.cs
new file mode 100644
index 000000000000..c8c33bd61eea
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslationOptions.Serialization.cs
@@ -0,0 +1,264 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+//
+
+#nullable disable
+
+using System;
+using System.ClientModel.Primitives;
+using System.Collections.Generic;
+using System.IO;
+using System.Text.Json;
+using Azure.Core;
+
+namespace Azure.AI.OpenAI
+{
+ public partial class AudioTranslationOptions : IUtf8JsonSerializable, IJsonModel
+ {
+ void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, ModelSerializationExtensions.WireOptions);
+
+ void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ writer.WriteStartObject();
+ JsonModelWriteCore(writer, options);
+ writer.WriteEndObject();
+ }
+
+ /// The JSON writer.
+ /// The client options for reading and writing models.
+ protected virtual void JsonModelWriteCore(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AudioTranslationOptions)} does not support writing '{format}' format.");
+ }
+
+ writer.WritePropertyName("file"u8);
+#if NET6_0_OR_GREATER
+ writer.WriteRawValue(global::System.BinaryData.FromStream(AudioData));
+#else
+ using (JsonDocument document = JsonDocument.Parse(BinaryData.FromStream(AudioData), ModelSerializationExtensions.JsonDocumentOptions))
+ {
+ JsonSerializer.Serialize(writer, document.RootElement);
+ }
+#endif
+ if (Optional.IsDefined(Filename))
+ {
+ writer.WritePropertyName("filename"u8);
+ writer.WriteStringValue(Filename);
+ }
+ if (Optional.IsDefined(ResponseFormat))
+ {
+ writer.WritePropertyName("response_format"u8);
+ writer.WriteStringValue(ResponseFormat.Value.ToString());
+ }
+ if (Optional.IsDefined(Prompt))
+ {
+ writer.WritePropertyName("prompt"u8);
+ writer.WriteStringValue(Prompt);
+ }
+ if (Optional.IsDefined(Temperature))
+ {
+ writer.WritePropertyName("temperature"u8);
+ writer.WriteNumberValue(Temperature.Value);
+ }
+ if (Optional.IsDefined(DeploymentName))
+ {
+ writer.WritePropertyName("model"u8);
+ writer.WriteStringValue(DeploymentName);
+ }
+ if (options.Format != "W" && _serializedAdditionalRawData != null)
+ {
+ foreach (var item in _serializedAdditionalRawData)
+ {
+ writer.WritePropertyName(item.Key);
+#if NET6_0_OR_GREATER
+ writer.WriteRawValue(item.Value);
+#else
+ using (JsonDocument document = JsonDocument.Parse(item.Value, ModelSerializationExtensions.JsonDocumentOptions))
+ {
+ JsonSerializer.Serialize(writer, document.RootElement);
+ }
+#endif
+ }
+ }
+ }
+
+ AudioTranslationOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options)
+ {
+ var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
+ if (format != "J")
+ {
+ throw new FormatException($"The model {nameof(AudioTranslationOptions)} does not support reading '{format}' format.");
+ }
+
+ using JsonDocument document = JsonDocument.ParseValue(ref reader);
+ return DeserializeAudioTranslationOptions(document.RootElement, options);
+ }
+
+ internal static AudioTranslationOptions DeserializeAudioTranslationOptions(JsonElement element, ModelReaderWriterOptions options = null)
+ {
+ options ??= ModelSerializationExtensions.WireOptions;
+
+ if (element.ValueKind == JsonValueKind.Null)
+ {
+ return null;
+ }
+ Stream file = default;
+ string filename = default;
+ AudioTranslationFormat? responseFormat = default;
+ string prompt = default;
+ float? temperature = default;
+ string model = default;
+ IDictionary serializedAdditionalRawData = default;
+ Dictionary rawDataDictionary = new Dictionary