Skip to content

Commit 3d32477

Browse files
author
github-actions[bot]
committed
feat: Updated OpenAPI spec
1 parent db9d198 commit 3d32477

25 files changed

+355
-127
lines changed

src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.AudioClient.CreateSpeech.g.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ partial void ProcessCreateSpeechResponseContent(
158158
/// The text to generate audio for. The maximum length is 4096 characters.
159159
/// </param>
160160
/// <param name="voice">
161-
/// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options).
161+
/// The voice to use when generating the audio. Supported voices are `alloy`, `ash`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options).
162162
/// </param>
163163
/// <param name="responseFormat">
164164
/// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.<br/>

src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.ChatClient.CreateChatCompletion.g.cs

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -289,7 +289,6 @@ partial void ProcessCreateChatCompletionResponseContent(
289289
/// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.<br/>
290290
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.<br/>
291291
/// - When not set, the default behavior is 'auto'.<br/>
292-
/// When this parameter is set, the response body will include the `service_tier` utilized.<br/>
293292
/// Default Value: auto
294293
/// </param>
295294
/// <param name="stop">
@@ -354,7 +353,7 @@ partial void ProcessCreateChatCompletionResponseContent(
354353
global::tryAGI.OpenAI.CreateChatCompletionRequestAudio? audio = default,
355354
double? presencePenalty = default,
356355
global::tryAGI.OpenAI.ResponseFormat? responseFormat = default,
357-
int? seed = default,
356+
long? seed = default,
358357
global::tryAGI.OpenAI.CreateChatCompletionRequestServiceTier? serviceTier = default,
359358
global::tryAGI.OpenAI.OneOf<string, global::System.Collections.Generic.IList<string>>? stop = default,
360359
bool? stream = default,

src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.CompletionsClient.CreateCompletion.g.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,7 @@ partial void ProcessCreateCompletionResponseContent(
257257
int? maxTokens = default,
258258
int? n = default,
259259
double? presencePenalty = default,
260-
int? seed = default,
260+
long? seed = default,
261261
global::tryAGI.OpenAI.OneOf<string, global::System.Collections.Generic.IList<string>>? stop = default,
262262
bool? stream = default,
263263
global::tryAGI.OpenAI.ChatCompletionStreamOptions? streamOptions = default,

src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.EmbeddingsClient.CreateEmbedding.g.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ partial void ProcessCreateEmbeddingResponseContent(
161161
/// Creates an embedding vector representing the input text.
162162
/// </summary>
163163
/// <param name="input">
164-
/// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.<br/>
164+
/// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. Some models may also impose a limit on total number of tokens summed across inputs.<br/>
165165
/// Example: The quick brown fox jumped over the lazy dog
166166
/// </param>
167167
/// <param name="model">

src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.IAudioClient.CreateSpeech.g.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ public partial interface IAudioClient
2424
/// The text to generate audio for. The maximum length is 4096 characters.
2525
/// </param>
2626
/// <param name="voice">
27-
/// The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options).
27+
/// The voice to use when generating the audio. Supported voices are `alloy`, `ash`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech#voice-options).
2828
/// </param>
2929
/// <param name="responseFormat">
3030
/// The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.<br/>

src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.IChatClient.CreateChatCompletion.g.cs

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,6 @@ public partial interface IChatClient
146146
/// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.<br/>
147147
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.<br/>
148148
/// - When not set, the default behavior is 'auto'.<br/>
149-
/// When this parameter is set, the response body will include the `service_tier` utilized.<br/>
150149
/// Default Value: auto
151150
/// </param>
152151
/// <param name="stop">
@@ -211,7 +210,7 @@ public partial interface IChatClient
211210
global::tryAGI.OpenAI.CreateChatCompletionRequestAudio? audio = default,
212211
double? presencePenalty = default,
213212
global::tryAGI.OpenAI.ResponseFormat? responseFormat = default,
214-
int? seed = default,
213+
long? seed = default,
215214
global::tryAGI.OpenAI.CreateChatCompletionRequestServiceTier? serviceTier = default,
216215
global::tryAGI.OpenAI.OneOf<string, global::System.Collections.Generic.IList<string>>? stop = default,
217216
bool? stream = default,

src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.ICompletionsClient.CreateCompletion.g.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ public partial interface ICompletionsClient
114114
int? maxTokens = default,
115115
int? n = default,
116116
double? presencePenalty = default,
117-
int? seed = default,
117+
long? seed = default,
118118
global::tryAGI.OpenAI.OneOf<string, global::System.Collections.Generic.IList<string>>? stop = default,
119119
bool? stream = default,
120120
global::tryAGI.OpenAI.ChatCompletionStreamOptions? streamOptions = default,

src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.IEmbeddingsClient.CreateEmbedding.g.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ public partial interface IEmbeddingsClient
1818
/// Creates an embedding vector representing the input text.
1919
/// </summary>
2020
/// <param name="input">
21-
/// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.<br/>
21+
/// Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. Some models may also impose a limit on total number of tokens summed across inputs.<br/>
2222
/// Example: The quick brown fox jumped over the lazy dog
2323
/// </param>
2424
/// <param name="model">

src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.IRealtimeClient.CreateRealtimeSession.g.cs

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -54,10 +54,13 @@ public partial interface IRealtimeClient
5454
/// `shimmer` and `verse`.
5555
/// </param>
5656
/// <param name="inputAudioFormat">
57-
/// The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
57+
/// The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.<br/>
58+
/// For `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, <br/>
59+
/// single channel (mono), and little-endian byte order.
5860
/// </param>
5961
/// <param name="outputAudioFormat">
60-
/// The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
62+
/// The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.<br/>
63+
/// For `pcm16`, output audio is sampled at a rate of 24kHz.
6164
/// </param>
6265
/// <param name="inputAudioTranscription">
6366
/// Configuration for input audio transcription, defaults to off and can be <br/>
@@ -90,8 +93,8 @@ public partial interface IRealtimeClient
9093
/// <param name="cancellationToken">The token to cancel the operation with</param>
9194
/// <exception cref="global::System.InvalidOperationException"></exception>
9295
global::System.Threading.Tasks.Task<global::tryAGI.OpenAI.RealtimeSessionCreateResponse> CreateRealtimeSessionAsync(
93-
global::tryAGI.OpenAI.RealtimeSessionCreateRequestModel model,
9496
global::System.Collections.Generic.IList<global::tryAGI.OpenAI.RealtimeSessionCreateRequestModalitie>? modalities = default,
97+
global::tryAGI.OpenAI.RealtimeSessionCreateRequestModel? model = default,
9598
string? instructions = default,
9699
global::tryAGI.OpenAI.RealtimeSessionCreateRequestVoice? voice = default,
97100
global::tryAGI.OpenAI.RealtimeSessionCreateRequestInputAudioFormat? inputAudioFormat = default,

src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.Models.CreateChatCompletionRequest.g.cs

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -186,15 +186,14 @@ public sealed partial class CreateChatCompletionRequest
186186
/// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.
187187
/// </summary>
188188
[global::System.Text.Json.Serialization.JsonPropertyName("seed")]
189-
public int? Seed { get; set; }
189+
public long? Seed { get; set; }
190190

191191
/// <summary>
192192
/// Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:<br/>
193193
/// - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted.<br/>
194194
/// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.<br/>
195195
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.<br/>
196196
/// - When not set, the default behavior is 'auto'.<br/>
197-
/// When this parameter is set, the response body will include the `service_tier` utilized.<br/>
198197
/// Default Value: auto
199198
/// </summary>
200199
[global::System.Text.Json.Serialization.JsonPropertyName("service_tier")]
@@ -424,7 +423,6 @@ public sealed partial class CreateChatCompletionRequest
424423
/// - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.<br/>
425424
/// - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.<br/>
426425
/// - When not set, the default behavior is 'auto'.<br/>
427-
/// When this parameter is set, the response body will include the `service_tier` utilized.<br/>
428426
/// Default Value: auto
429427
/// </param>
430428
/// <param name="stop">
@@ -488,7 +486,7 @@ public CreateChatCompletionRequest(
488486
global::tryAGI.OpenAI.CreateChatCompletionRequestAudio? audio,
489487
double? presencePenalty,
490488
global::tryAGI.OpenAI.ResponseFormat? responseFormat,
491-
int? seed,
489+
long? seed,
492490
global::tryAGI.OpenAI.CreateChatCompletionRequestServiceTier? serviceTier,
493491
global::tryAGI.OpenAI.OneOf<string, global::System.Collections.Generic.IList<string>>? stop,
494492
bool? stream,

0 commit comments

Comments
 (0)