Skip to content

Commit 407f43f

Browse files
committed
fix: Fixed.
1 parent 8639b99 commit 407f43f

File tree

820 files changed

+35169
-24047
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

820 files changed

+35169
-24047
lines changed

src/helpers/FixOpenApiSpec/FixOpenApiSpec.csproj

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
</PropertyGroup>
1010

1111
<ItemGroup>
12+
<PackageReference Include="AutoSDK" Version="0.28.1-dev.47" />
1213
<PackageReference Include="Microsoft.OpenApi.Readers" Version="1.6.24" />
1314
</ItemGroup>
1415

src/helpers/FixOpenApiSpec/Program.cs

Lines changed: 14 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,22 @@
1+
using AutoSDK.Helpers;
12
using Microsoft.OpenApi;
2-
using Microsoft.OpenApi.Any;
33
using Microsoft.OpenApi.Extensions;
44
using Microsoft.OpenApi.Models;
55
using Microsoft.OpenApi.Readers;
66

77
var path = args[0];
8-
var text = await File.ReadAllTextAsync(path);
8+
var jsonOrYaml = await File.ReadAllTextAsync(path);
9+
10+
jsonOrYaml = jsonOrYaml.Replace("minimum: -9223372036854776000", "");
11+
12+
if (OpenApi31Support.IsOpenApi31(jsonOrYaml))
13+
{
14+
jsonOrYaml = OpenApi31Support.ConvertToOpenApi30(jsonOrYaml);
15+
}
16+
917
var realtimeText = await File.ReadAllTextAsync(path.Replace(".yaml", ".realtime.yaml"));
1018

11-
var openApiDocument = new OpenApiStringReader().Read(text, out var diagnostics);
19+
var openApiDocument = new OpenApiStringReader().Read(jsonOrYaml, out var diagnostics);
1220
var realtimeOpenApiDocument = new OpenApiStringReader().Read(realtimeText, out var realtimeDiagnostics);
1321
foreach (var schema in realtimeOpenApiDocument.Components.Schemas)
1422
{
@@ -62,8 +70,8 @@
6270
// .DistinctBy(x => (x as OpenApiString)?.Value)
6371
// .ToList();
6472

65-
text = openApiDocument.SerializeAsYaml(OpenApiSpecVersion.OpenApi3_0);
66-
_ = new OpenApiStringReader().Read(text, out diagnostics);
73+
jsonOrYaml = openApiDocument.SerializeAsYaml(OpenApiSpecVersion.OpenApi3_0);
74+
_ = new OpenApiStringReader().Read(jsonOrYaml, out diagnostics);
6775

6876
if (diagnostics.Errors.Count > 0)
6977
{
@@ -75,4 +83,4 @@
7583
//Environment.Exit(1);
7684
}
7785

78-
await File.WriteAllTextAsync(path, text);
86+
await File.WriteAllTextAsync(path, jsonOrYaml);

src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI..JsonSerializerContext.g.cs

Lines changed: 426 additions & 381 deletions
Large diffs are not rendered by default.

src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.AssistantsClient.CreateAssistant.g.cs

Lines changed: 35 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -171,19 +171,26 @@ partial void ProcessCreateAssistantResponseContent(
171171
/// <summary>
172172
/// Create an assistant with a model and instructions.
173173
/// </summary>
174+
/// <param name="description">
175+
/// The description of the assistant. The maximum length is 512 characters.
176+
/// </param>
177+
/// <param name="instructions">
178+
/// The system instructions that the assistant uses. The maximum length is 256,000 characters.
179+
/// </param>
180+
/// <param name="metadata">
181+
/// Set of 16 key-value pairs that can be attached to an object. This can be<br/>
182+
/// useful for storing additional information about the object in a structured<br/>
183+
/// format, and querying for objects via API or the dashboard. <br/>
184+
/// Keys are strings with a maximum length of 64 characters. Values are strings<br/>
185+
/// with a maximum length of 512 characters.
186+
/// </param>
174187
/// <param name="model">
175188
/// ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models) for descriptions of them.<br/>
176189
/// Example: gpt-4o
177190
/// </param>
178191
/// <param name="name">
179192
/// The name of the assistant. The maximum length is 256 characters.
180193
/// </param>
181-
/// <param name="description">
182-
/// The description of the assistant. The maximum length is 512 characters.
183-
/// </param>
184-
/// <param name="instructions">
185-
/// The system instructions that the assistant uses. The maximum length is 256,000 characters.
186-
/// </param>
187194
/// <param name="reasoningEffort">
188195
/// **o-series models only** <br/>
189196
/// Constrains effort on reasoning for <br/>
@@ -193,68 +200,61 @@ partial void ProcessCreateAssistantResponseContent(
193200
/// on reasoning in a response.<br/>
194201
/// Default Value: medium
195202
/// </param>
196-
/// <param name="tools">
197-
/// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
198-
/// </param>
199-
/// <param name="toolResources">
200-
/// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.
201-
/// </param>
202-
/// <param name="metadata">
203-
/// Set of 16 key-value pairs that can be attached to an object. This can be<br/>
204-
/// useful for storing additional information about the object in a structured<br/>
205-
/// format, and querying for objects via API or the dashboard. <br/>
206-
/// Keys are strings with a maximum length of 64 characters. Values are strings<br/>
207-
/// with a maximum length of 512 characters.
203+
/// <param name="responseFormat">
204+
/// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.<br/>
205+
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).<br/>
206+
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.<br/>
207+
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
208208
/// </param>
209209
/// <param name="temperature">
210210
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.<br/>
211211
/// Default Value: 1<br/>
212212
/// Example: 1
213213
/// </param>
214+
/// <param name="toolResources">
215+
/// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs.
216+
/// </param>
217+
/// <param name="tools">
218+
/// A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`.
219+
/// </param>
214220
/// <param name="topP">
215221
/// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.<br/>
216222
/// We generally recommend altering this or temperature but not both.<br/>
217223
/// Default Value: 1<br/>
218224
/// Example: 1
219225
/// </param>
220-
/// <param name="responseFormat">
221-
/// Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.<br/>
222-
/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs).<br/>
223-
/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the model generates is valid JSON.<br/>
224-
/// **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
225-
/// </param>
226226
/// <param name="cancellationToken">The token to cancel the operation with</param>
227227
/// <exception cref="global::System.InvalidOperationException"></exception>
228228
#if NET8_0_OR_GREATER
229229
[global::System.Diagnostics.CodeAnalysis.Experimental(diagnosticId: "TRYAGI_OPENAI_BETA_001")]
230230
#endif
231231
public async global::System.Threading.Tasks.Task<global::tryAGI.OpenAI.AssistantObject> CreateAssistantAsync(
232232
global::tryAGI.OpenAI.AnyOf<string, global::tryAGI.OpenAI.AssistantSupportedModels?> model,
233-
string? name = default,
234233
string? description = default,
235234
string? instructions = default,
236-
global::tryAGI.OpenAI.ReasoningEffort? reasoningEffort = default,
237-
global::System.Collections.Generic.IList<global::tryAGI.OpenAI.OneOf<global::tryAGI.OpenAI.AssistantToolsCode, global::tryAGI.OpenAI.AssistantToolsFileSearch, global::tryAGI.OpenAI.AssistantToolsFunction>>? tools = default,
238-
global::tryAGI.OpenAI.CreateAssistantRequestToolResources? toolResources = default,
239235
global::System.Collections.Generic.Dictionary<string, string>? metadata = default,
236+
string? name = default,
237+
global::tryAGI.OpenAI.ReasoningEffort? reasoningEffort = default,
238+
global::tryAGI.OpenAI.AssistantsApiResponseFormatOption? responseFormat = default,
240239
double? temperature = default,
240+
global::tryAGI.OpenAI.CreateAssistantRequestToolResources? toolResources = default,
241+
global::System.Collections.Generic.IList<global::tryAGI.OpenAI.OneOf<global::tryAGI.OpenAI.AssistantToolsCode, global::tryAGI.OpenAI.AssistantToolsFileSearch, global::tryAGI.OpenAI.AssistantToolsFunction>>? tools = default,
241242
double? topP = default,
242-
global::tryAGI.OpenAI.AssistantsApiResponseFormatOption? responseFormat = default,
243243
global::System.Threading.CancellationToken cancellationToken = default)
244244
{
245245
var __request = new global::tryAGI.OpenAI.CreateAssistantRequest
246246
{
247-
Model = model,
248-
Name = name,
249247
Description = description,
250248
Instructions = instructions,
251-
ReasoningEffort = reasoningEffort,
252-
Tools = tools,
253-
ToolResources = toolResources,
254249
Metadata = metadata,
250+
Model = model,
251+
Name = name,
252+
ReasoningEffort = reasoningEffort,
253+
ResponseFormat = responseFormat,
255254
Temperature = temperature,
255+
ToolResources = toolResources,
256+
Tools = tools,
256257
TopP = topP,
257-
ResponseFormat = responseFormat,
258258
};
259259

260260
return await CreateAssistantAsync(

src/libs/tryAGI.OpenAI/Generated/tryAGI.OpenAI.AssistantsClient.CreateMessage.g.cs

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -178,41 +178,41 @@ partial void ProcessCreateMessageResponseContent(
178178
/// Create a message.
179179
/// </summary>
180180
/// <param name="threadId"></param>
181-
/// <param name="role">
182-
/// The role of the entity that is creating the message. Allowed values include:<br/>
183-
/// - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages.<br/>
184-
/// - `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation.
185-
/// </param>
186-
/// <param name="content"></param>
187181
/// <param name="attachments">
188182
/// A list of files attached to the message, and the tools they should be added to.
189183
/// </param>
184+
/// <param name="content"></param>
190185
/// <param name="metadata">
191186
/// Set of 16 key-value pairs that can be attached to an object. This can be<br/>
192187
/// useful for storing additional information about the object in a structured<br/>
193188
/// format, and querying for objects via API or the dashboard. <br/>
194189
/// Keys are strings with a maximum length of 64 characters. Values are strings<br/>
195190
/// with a maximum length of 512 characters.
196191
/// </param>
192+
/// <param name="role">
193+
/// The role of the entity that is creating the message. Allowed values include:<br/>
194+
/// - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages.<br/>
195+
/// - `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation.
196+
/// </param>
197197
/// <param name="cancellationToken">The token to cancel the operation with</param>
198198
/// <exception cref="global::System.InvalidOperationException"></exception>
199199
#if NET8_0_OR_GREATER
200200
[global::System.Diagnostics.CodeAnalysis.Experimental(diagnosticId: "TRYAGI_OPENAI_BETA_001")]
201201
#endif
202202
public async global::System.Threading.Tasks.Task<global::tryAGI.OpenAI.MessageObject> CreateMessageAsync(
203203
string threadId,
204-
global::tryAGI.OpenAI.CreateMessageRequestRole role,
205204
global::tryAGI.OpenAI.OneOf<string, global::System.Collections.Generic.IList<global::tryAGI.OpenAI.OneOf<global::tryAGI.OpenAI.MessageContentImageFileObject, global::tryAGI.OpenAI.MessageContentImageUrlObject, global::tryAGI.OpenAI.MessageRequestContentTextObject>>> content,
205+
global::tryAGI.OpenAI.CreateMessageRequestRole role,
206206
global::System.Collections.Generic.IList<global::tryAGI.OpenAI.CreateMessageRequestAttachment>? attachments = default,
207207
global::System.Collections.Generic.Dictionary<string, string>? metadata = default,
208208
global::System.Threading.CancellationToken cancellationToken = default)
209209
{
210210
var __request = new global::tryAGI.OpenAI.CreateMessageRequest
211211
{
212-
Role = role,
213-
Content = content,
214212
Attachments = attachments,
213+
Content = content,
215214
Metadata = metadata,
215+
Role = role,
216216
};
217217

218218
return await CreateMessageAsync(

0 commit comments

Comments
 (0)