Skip to content

Commit 49fcbbf

Browse files
committed
feat: Updated
1 parent 3cf175e commit 49fcbbf

File tree

1,405 files changed

+78022
-36059
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,405 files changed

+78022
-36059
lines changed

docs/index.md

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,12 @@ Examples and documentation can be found here: https://tryagi.github.io/OpenAI/
2424
using var api = new OpenAiApi("API_KEY");
2525
string response = await api.Chat.CreateChatCompletionAsync(
2626
messages: ["Generate five random words."],
27-
model: CreateChatCompletionRequestModel.Gpt4oMini);
27+
model: ModelIdsEnum.Gpt4oMini);
2828
Console.WriteLine(response); // "apple, banana, cherry, date, elderberry"
2929
3030
var enumerable = api.Chat.CreateChatCompletionAsStreamAsync(
3131
messages: ["Generate five random words."],
32-
model: CreateChatCompletionRequestModel.Gpt4oMini);
32+
model: ModelIdsEnum.Gpt4oMini);
3333

3434
await foreach (string response in enumerable)
3535
{
@@ -96,7 +96,7 @@ var messages = new List<ChatCompletionRequestMessage>
9696
"You are a helpful weather assistant.".AsSystemMessage(),
9797
"What is the current temperature in Dubai, UAE in Celsius?".AsUserMessage(),
9898
};
99-
var model = CreateChatCompletionRequestModel.Gpt4oMini;
99+
var model = ModelIdsEnum.Gpt4oMini;
100100
var result = await api.Chat.CreateChatCompletionAsync(
101101
messages,
102102
model: model,
@@ -141,7 +141,7 @@ using var api = new OpenAiApi("API_KEY");
141141

142142
var response = await api.Chat.CreateChatCompletionAsAsync<Weather>(
143143
messages: ["Generate random weather."],
144-
model: CreateChatCompletionRequestModel.Gpt4oMini,
144+
model: ModelIdsEnum.Gpt4oMini,
145145
jsonSerializerOptions: new JsonSerializerOptions
146146
{
147147
Converters = {new JsonStringEnumConverter()},
@@ -150,7 +150,7 @@ var response = await api.Chat.CreateChatCompletionAsAsync<Weather>(
150150
var response = await api.Chat.CreateChatCompletionAsAsync(
151151
jsonTypeInfo: SourceGeneratedContext.Default.Weather,
152152
messages: ["Generate random weather."],
153-
model: CreateChatCompletionRequestModel.Gpt4oMini);
153+
model: ModelIdsEnum.Gpt4oMini);
154154

155155
// response.Value1 contains the structured output
156156
// response.Value2 contains the CreateChatCompletionResponse object
@@ -198,10 +198,10 @@ There also non-try methods that throw an exception if the value is not found.
198198
using OpenAI;
199199

200200
// You can try to get the enum from string using:
201-
var model = CreateChatCompletionRequestModelExtensions.ToEnum("gpt-4o") ?? throw new Exception("Invalid model");
201+
var model = ModelIdsEnumExtensions.ToEnum("gpt-4o") ?? throw new Exception("Invalid model");
202202

203203
// Chat
204-
var model = CreateChatCompletionRequestModel.Gpt4oMini;
204+
var model = ModelIdsEnum.Gpt4oMini;
205205
double? priceInUsd = model.TryGetPriceInUsd(
206206
inputTokens: 500,
207207
outputTokens: 500)

docs/samples/Chat.ChatWithVision.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ CreateChatCompletionResponse response = await api.Chat.CreateChatCompletionAsync
66
"Please describe the following image.",
77
H.Resources.images_dog_and_cat_png.AsBytes().AsUserMessage(mimeType: "image/png"),
88
],
9-
model: CreateChatCompletionRequestModel.Gpt4o);
9+
model: ModelIdsEnum.Gpt4o);
1010

1111
Console.WriteLine("[ASSISTANT]:");
1212
Console.WriteLine($"{response.Choices[0].Message.Content}");

docs/samples/Chat.FunctionCalling.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ do
1515
requiresAction = false;
1616
CreateChatCompletionResponse chatCompletion = await api.Chat.CreateChatCompletionAsync(
1717
messages,
18-
model: CreateChatCompletionRequestModel.Gpt4o20240806,
18+
model: ModelIdsEnum.Gpt4o20240806,
1919
tools: tools);
2020

2121
switch (chatCompletion.Choices[0].FinishReason)

docs/samples/Chat.FunctionCallingStreaming.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ do
2020
IAsyncEnumerable<CreateChatCompletionStreamResponse> chatUpdates
2121
= api.Chat.CreateChatCompletionAsStreamAsync(
2222
messages,
23-
model: CreateChatCompletionRequestModel.Gpt4o20240806,
23+
model: ModelIdsEnum.Gpt4o20240806,
2424
tools: tools);
2525

2626
await foreach (CreateChatCompletionStreamResponse chatUpdate in chatUpdates)

docs/samples/Chat.SimpleChat.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ using var api = GetAuthenticatedClient();
33

44
CreateChatCompletionResponse response = await api.Chat.CreateChatCompletionAsync(
55
messages: ["Say 'this is a test.'"],
6-
model: CreateChatCompletionRequestModel.Gpt4o);
6+
model: ModelIdsEnum.Gpt4o);
77

88
Console.WriteLine("[ASSISTANT]:");
99
Console.WriteLine($"{response.Choices[0].Message.Content}");

docs/samples/Chat.SimpleChatStreaming.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ using var api = GetAuthenticatedClient();
33

44
IAsyncEnumerable<CreateChatCompletionStreamResponse> enumerable = api.Chat.CreateChatCompletionAsStreamAsync(
55
messages: ["Say 'this is a test.'"],
6-
model: CreateChatCompletionRequestModel.Gpt4o);
6+
model: ModelIdsEnum.Gpt4o);
77

88
Console.WriteLine("[ASSISTANT]:");
99
await foreach (CreateChatCompletionStreamResponse chatUpdate in enumerable)

docs/samples/Chat.StructuredOutputs.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ using var api = GetAuthenticatedClient();
33

44
MathReasoning? mathReasoning = await api.Chat.CreateChatCompletionAsAsync<MathReasoning>(
55
messages: ["How can I solve 8x + 7 = -23?"],
6-
model: CreateChatCompletionRequestModel.Gpt4o20240806,
6+
model: ModelIdsEnum.Gpt4o20240806,
77
strict: true);
88

99
Console.WriteLine($"Final answer: {mathReasoning?.FinalAnswer}");

docs/samples/Combination.CuriousCreatureCreator.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ using var api = GetAuthenticatedClient();
33

44
// First, we'll use gpt-4o to have a creative helper imagine a twist on a household pet
55
CreateChatCompletionResponse creativeWriterResult = await api.Chat.CreateChatCompletionAsync(
6-
model: CreateChatCompletionRequestModel.Gpt4o,
6+
model: ModelIdsEnum.Gpt4o,
77
messages:
88
[
99
"You're a creative helper that specializes in brainstorming designs for concepts that fuse ordinary, mundane items with a fantastical touch. In particular, you can provide good one-paragraph descriptions of concept images.".AsSystemMessage(),

src/libs/tryAGI.OpenAI/AssistantClient.CreateRun.AsStream.cs

Lines changed: 36 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,15 @@ public partial class AssistantsClient
118118
/// The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.<br/>
119119
/// Example: gpt-4o
120120
/// </param>
121+
/// <param name="reasoningEffort">
122+
/// **o-series models only** <br/>
123+
/// Constrains effort on reasoning for <br/>
124+
/// [reasoning models](https://platform.openai.com/docs/guides/reasoning).<br/>
125+
/// Currently supported values are `low`, `medium`, and `high`. Reducing<br/>
126+
/// reasoning effort can result in faster responses and fewer tokens used<br/>
127+
/// on reasoning in a response.<br/>
128+
/// Default Value: medium
129+
/// </param>
121130
/// <param name="instructions">
122131
/// Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis.
123132
/// </param>
@@ -131,15 +140,20 @@ public partial class AssistantsClient
131140
/// Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis.
132141
/// </param>
133142
/// <param name="metadata">
134-
/// Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
143+
/// Set of 16 key-value pairs that can be attached to an object. This can be<br/>
144+
/// useful for storing additional information about the object in a structured<br/>
145+
/// format, and querying for objects via API or the dashboard. <br/>
146+
/// Keys are strings with a maximum length of 64 characters. Values are strings<br/>
147+
/// with a maximum length of 512 characters.
135148
/// </param>
136149
/// <param name="temperature">
137-
/// empty<br/>
150+
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.<br/>
138151
/// Default Value: 1<br/>
139152
/// Example: 1
140153
/// </param>
141154
/// <param name="topP">
142-
/// empty<br/>
155+
/// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.<br/>
156+
/// We generally recommend altering this or temperature but not both.<br/>
143157
/// Default Value: 1<br/>
144158
/// Example: 1
145159
/// </param>
@@ -152,52 +166,49 @@ public partial class AssistantsClient
152166
/// <param name="maxCompletionTokens">
153167
/// The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info.
154168
/// </param>
155-
/// <param name="truncationStrategy">
156-
/// Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run.
157-
/// </param>
158-
/// <param name="toolChoice">
159-
/// Controls which (if any) tool is called by the model.<br/>
160-
/// `none` means the model will not call any tools and instead generates a message.<br/>
161-
/// `auto` is the default value and means the model can pick between generating a message or calling one or more tools.<br/>
162-
/// `required` means the model must call one or more tools before responding to the user.<br/>
163-
/// Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool.
164-
/// </param>
169+
/// <param name="truncationStrategy"></param>
170+
/// <param name="toolChoice"></param>
165171
/// <param name="parallelToolCalls">
166-
/// Whether to enable [parallel function calling](/docs/guides/function-calling/parallel-function-calling) during tool use.
172+
/// Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use.
167173
/// </param>
168174
/// <param name="responseFormat">
169-
/// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models/gpt-4o), [GPT-4 Turbo](/docs/models/gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.<br/>
170-
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which guarantees the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).<br/>
171-
/// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.<br/>
175+
/// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.<br/>
176+
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).<br/>
177+
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.<br/>
172178
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
173179
/// </param>
174180
/// <param name="cancellationToken">The token to cancel the operation with</param>
175181
/// <exception cref="global::System.InvalidOperationException"></exception>
182+
#if NET8_0_OR_GREATER
183+
[global::System.Diagnostics.CodeAnalysis.Experimental(diagnosticId: "TRYAGI_OPENAI_BETA_001")]
184+
#endif
176185
public async IAsyncEnumerable<global::tryAGI.OpenAI.AssistantStreamEvent> CreateRunAsStreamAsync(
177186
string threadId,
178187
string assistantId,
179188
global::System.Collections.Generic.IList<global::tryAGI.OpenAI.CreateRunIncludeItem>? include = default,
180189
global::tryAGI.OpenAI.AnyOf<string, global::tryAGI.OpenAI.AssistantSupportedModels?>? model = default,
190+
global::tryAGI.OpenAI.ReasoningEffort? reasoningEffort = default,
181191
string? instructions = default,
182192
string? additionalInstructions = default,
183193
global::System.Collections.Generic.IList<global::tryAGI.OpenAI.CreateMessageRequest>? additionalMessages = default,
184-
global::System.Collections.Generic.IList<global::tryAGI.OpenAI.ToolsItem4>? tools = default,
194+
global::System.Collections.Generic.IList<global::tryAGI.OpenAI.OneOf<global::tryAGI.OpenAI.AssistantToolsCode, global::tryAGI.OpenAI.AssistantToolsFileSearch, global::tryAGI.OpenAI.AssistantToolsFunction>>? tools = default,
185195
global::System.Collections.Generic.Dictionary<string, string>? metadata = default,
186-
double? temperature = 1,
187-
double? topP = 1,
196+
double? temperature = default,
197+
double? topP = default,
188198
bool? stream = default,
189199
int? maxPromptTokens = default,
190200
int? maxCompletionTokens = default,
191-
global::tryAGI.OpenAI.TruncationObject? truncationStrategy = default,
192-
global::tryAGI.OpenAI.AssistantsApiToolChoiceOption? toolChoice = default,
201+
global::tryAGI.OpenAI.AllOf<global::tryAGI.OpenAI.TruncationObject, object>? truncationStrategy = default,
202+
global::tryAGI.OpenAI.AllOf<global::tryAGI.OpenAI.AssistantsApiToolChoiceOption?, object>? toolChoice = default,
193203
bool? parallelToolCalls = default,
194204
global::tryAGI.OpenAI.AssistantsApiResponseFormatOption? responseFormat = default,
195205
[EnumeratorCancellation] global::System.Threading.CancellationToken cancellationToken = default)
196206
{
197-
var request = new global::tryAGI.OpenAI.CreateRunRequest
207+
var __request = new global::tryAGI.OpenAI.CreateRunRequest
198208
{
199209
AssistantId = assistantId,
200210
Model = model,
211+
ReasoningEffort = reasoningEffort,
201212
Instructions = instructions,
202213
AdditionalInstructions = additionalInstructions,
203214
AdditionalMessages = additionalMessages,
@@ -208,9 +219,7 @@ public partial class AssistantsClient
208219
Stream = stream,
209220
MaxPromptTokens = maxPromptTokens,
210221
MaxCompletionTokens = maxCompletionTokens,
211-
TruncationStrategy = truncationStrategy != null
212-
? (global::tryAGI.OpenAI.AllOf<global::tryAGI.OpenAI.TruncationObject, object>?)truncationStrategy
213-
: null,
222+
TruncationStrategy = truncationStrategy,
214223
ToolChoice = toolChoice,
215224
ParallelToolCalls = parallelToolCalls,
216225
ResponseFormat = responseFormat,
@@ -219,7 +228,7 @@ public partial class AssistantsClient
219228
await foreach (var response in CreateRunAsStreamAsync(
220229
threadId: threadId,
221230
include: include,
222-
request: request,
231+
request: __request,
223232
cancellationToken: cancellationToken).ConfigureAwait(false))
224233
{
225234
yield return response;

0 commit comments

Comments
 (0)