Skip to content

Commit 748ec7c

Browse files
HavenDVgithub-actions[bot]
andauthored
feat: Updated OpenAPI spec (#238)
Co-authored-by: github-actions[bot] <[email protected]>
1 parent 0859662 commit 748ec7c

File tree

7 files changed

+0
-66
lines changed

7 files changed

+0
-66
lines changed

src/libs/Cohere/Generated/Cohere.CohereClient.Chat.g.cs

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -740,11 +740,6 @@ partial void ProcessChatResponseContent(
740740
/// - AUTO: Cohere Platform Only<br/>
741741
/// - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments
742742
/// </param>
743-
/// <param name="rawPrompting">
744-
/// When enabled, the user's prompt will be sent to the model without<br/>
745-
/// any pre-processing.<br/>
746-
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
747-
/// </param>
748743
/// <param name="responseFormat">
749744
/// Configuration for forcing the model output to adhere to the specified format. Supported on [Command R 03-2024](https://docs.cohere.com/docs/command-r), [Command R+ 04-2024](https://docs.cohere.com/docs/command-r-plus) and newer models.<br/>
750745
/// The model can be forced into outputting JSON objects (with up to 5 levels of nesting) by setting `{ "type": "json_object" }`.<br/>
@@ -839,7 +834,6 @@ partial void ProcessChatResponseContent(
839834
string? model = default,
840835
string? preamble = default,
841836
global::Cohere.ChatRequestPromptTruncation? promptTruncation = default,
842-
bool? rawPrompting = default,
843837
global::Cohere.ResponseFormat? responseFormat = default,
844838
global::Cohere.ChatRequestSafetyMode? safetyMode = default,
845839
bool? searchQueriesOnly = default,
@@ -868,7 +862,6 @@ partial void ProcessChatResponseContent(
868862
Preamble = preamble,
869863
PresencePenalty = presencePenalty,
870864
PromptTruncation = promptTruncation,
871-
RawPrompting = rawPrompting,
872865
ResponseFormat = responseFormat,
873866
SafetyMode = safetyMode,
874867
SearchQueriesOnly = searchQueriesOnly,

src/libs/Cohere/Generated/Cohere.CohereClient.Chatv2.g.cs

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -667,11 +667,6 @@ partial void ProcessChatv2ResponseContent(
667667
/// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.<br/>
668668
/// Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
669669
/// </param>
670-
/// <param name="rawPrompting">
671-
/// When enabled, the user's prompt will be sent to the model without<br/>
672-
/// any pre-processing.<br/>
673-
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
674-
/// </param>
675670
/// <param name="reasoningEffort">
676671
/// The reasoning effort level of the model. This affects the model's performance and the time it takes to generate a response.
677672
/// </param>
@@ -738,7 +733,6 @@ partial void ProcessChatv2ResponseContent(
738733
int? maxTokens = default,
739734
float? p = default,
740735
float? presencePenalty = default,
741-
bool? rawPrompting = default,
742736
global::Cohere.ReasoningEffort? reasoningEffort = default,
743737
global::Cohere.ResponseFormatV2? responseFormat = default,
744738
global::Cohere.Chatv2RequestSafetyMode? safetyMode = default,
@@ -763,7 +757,6 @@ partial void ProcessChatv2ResponseContent(
763757
Model = model,
764758
P = p,
765759
PresencePenalty = presencePenalty,
766-
RawPrompting = rawPrompting,
767760
ReasoningEffort = reasoningEffort,
768761
ResponseFormat = responseFormat,
769762
SafetyMode = safetyMode,

src/libs/Cohere/Generated/Cohere.ICohereClient.Chat.g.cs

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -127,11 +127,6 @@ public partial interface ICohereClient
127127
/// - AUTO: Cohere Platform Only<br/>
128128
/// - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments
129129
/// </param>
130-
/// <param name="rawPrompting">
131-
/// When enabled, the user's prompt will be sent to the model without<br/>
132-
/// any pre-processing.<br/>
133-
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
134-
/// </param>
135130
/// <param name="responseFormat">
136131
/// Configuration for forcing the model output to adhere to the specified format. Supported on [Command R 03-2024](https://docs.cohere.com/docs/command-r), [Command R+ 04-2024](https://docs.cohere.com/docs/command-r-plus) and newer models.<br/>
137132
/// The model can be forced into outputting JSON objects (with up to 5 levels of nesting) by setting `{ "type": "json_object" }`.<br/>
@@ -226,7 +221,6 @@ public partial interface ICohereClient
226221
string? model = default,
227222
string? preamble = default,
228223
global::Cohere.ChatRequestPromptTruncation? promptTruncation = default,
229-
bool? rawPrompting = default,
230224
global::Cohere.ResponseFormat? responseFormat = default,
231225
global::Cohere.ChatRequestSafetyMode? safetyMode = default,
232226
bool? searchQueriesOnly = default,

src/libs/Cohere/Generated/Cohere.ICohereClient.Chatv2.g.cs

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -62,11 +62,6 @@ public partial interface ICohereClient
6262
/// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.<br/>
6363
/// Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
6464
/// </param>
65-
/// <param name="rawPrompting">
66-
/// When enabled, the user's prompt will be sent to the model without<br/>
67-
/// any pre-processing.<br/>
68-
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
69-
/// </param>
7065
/// <param name="reasoningEffort">
7166
/// The reasoning effort level of the model. This affects the model's performance and the time it takes to generate a response.
7267
/// </param>
@@ -133,7 +128,6 @@ public partial interface ICohereClient
133128
int? maxTokens = default,
134129
float? p = default,
135130
float? presencePenalty = default,
136-
bool? rawPrompting = default,
137131
global::Cohere.ReasoningEffort? reasoningEffort = default,
138132
global::Cohere.ResponseFormatV2? responseFormat = default,
139133
global::Cohere.Chatv2RequestSafetyMode? safetyMode = default,

src/libs/Cohere/Generated/Cohere.Models.ChatRequest.g.cs

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -159,14 +159,6 @@ public sealed partial class ChatRequest
159159
[global::System.Text.Json.Serialization.JsonConverter(typeof(global::Cohere.JsonConverters.ChatRequestPromptTruncationJsonConverter))]
160160
public global::Cohere.ChatRequestPromptTruncation? PromptTruncation { get; set; }
161161

162-
/// <summary>
163-
/// When enabled, the user's prompt will be sent to the model without<br/>
164-
/// any pre-processing.<br/>
165-
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
166-
/// </summary>
167-
[global::System.Text.Json.Serialization.JsonPropertyName("raw_prompting")]
168-
public bool? RawPrompting { get; set; }
169-
170162
/// <summary>
171163
/// Configuration for forcing the model output to adhere to the specified format. Supported on [Command R 03-2024](https://docs.cohere.com/docs/command-r), [Command R+ 04-2024](https://docs.cohere.com/docs/command-r-plus) and newer models.<br/>
172164
/// The model can be forced into outputting JSON objects (with up to 5 levels of nesting) by setting `{ "type": "json_object" }`.<br/>
@@ -377,11 +369,6 @@ public sealed partial class ChatRequest
377369
/// - AUTO: Cohere Platform Only<br/>
378370
/// - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments
379371
/// </param>
380-
/// <param name="rawPrompting">
381-
/// When enabled, the user's prompt will be sent to the model without<br/>
382-
/// any pre-processing.<br/>
383-
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
384-
/// </param>
385372
/// <param name="responseFormat">
386373
/// Configuration for forcing the model output to adhere to the specified format. Supported on [Command R 03-2024](https://docs.cohere.com/docs/command-r), [Command R+ 04-2024](https://docs.cohere.com/docs/command-r-plus) and newer models.<br/>
387374
/// The model can be forced into outputting JSON objects (with up to 5 levels of nesting) by setting `{ "type": "json_object" }`.<br/>
@@ -474,7 +461,6 @@ public ChatRequest(
474461
string? preamble,
475462
double? presencePenalty,
476463
global::Cohere.ChatRequestPromptTruncation? promptTruncation,
477-
bool? rawPrompting,
478464
global::Cohere.ResponseFormat? responseFormat,
479465
global::Cohere.ChatRequestSafetyMode? safetyMode,
480466
bool? searchQueriesOnly,
@@ -501,7 +487,6 @@ public ChatRequest(
501487
this.Preamble = preamble;
502488
this.PresencePenalty = presencePenalty;
503489
this.PromptTruncation = promptTruncation;
504-
this.RawPrompting = rawPrompting;
505490
this.ResponseFormat = responseFormat;
506491
this.SafetyMode = safetyMode;
507492
this.SearchQueriesOnly = searchQueriesOnly;

src/libs/Cohere/Generated/Cohere.Models.Chatv2Request.g.cs

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -80,14 +80,6 @@ public sealed partial class Chatv2Request
8080
[global::System.Text.Json.Serialization.JsonPropertyName("presence_penalty")]
8181
public float? PresencePenalty { get; set; }
8282

83-
/// <summary>
84-
/// When enabled, the user's prompt will be sent to the model without<br/>
85-
/// any pre-processing.<br/>
86-
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
87-
/// </summary>
88-
[global::System.Text.Json.Serialization.JsonPropertyName("raw_prompting")]
89-
public bool? RawPrompting { get; set; }
90-
9183
/// <summary>
9284
/// The reasoning effort level of the model. This affects the model's performance and the time it takes to generate a response.
9385
/// </summary>
@@ -221,11 +213,6 @@ public sealed partial class Chatv2Request
221213
/// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.<br/>
222214
/// Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
223215
/// </param>
224-
/// <param name="rawPrompting">
225-
/// When enabled, the user's prompt will be sent to the model without<br/>
226-
/// any pre-processing.<br/>
227-
/// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
228-
/// </param>
229216
/// <param name="reasoningEffort">
230217
/// The reasoning effort level of the model. This affects the model's performance and the time it takes to generate a response.
231218
/// </param>
@@ -292,7 +279,6 @@ public Chatv2Request(
292279
int? maxTokens,
293280
float? p,
294281
float? presencePenalty,
295-
bool? rawPrompting,
296282
global::Cohere.ReasoningEffort? reasoningEffort,
297283
global::Cohere.ResponseFormatV2? responseFormat,
298284
global::Cohere.Chatv2RequestSafetyMode? safetyMode,
@@ -314,7 +300,6 @@ public Chatv2Request(
314300
this.MaxTokens = maxTokens;
315301
this.P = p;
316302
this.PresencePenalty = presencePenalty;
317-
this.RawPrompting = rawPrompting;
318303
this.ReasoningEffort = reasoningEffort;
319304
this.ResponseFormat = responseFormat;
320305
this.SafetyMode = safetyMode;

src/libs/Cohere/openapi.yaml

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -148,11 +148,6 @@ paths:
148148
description: "Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases.\n\nDictates how the prompt will be constructed.\n\nWith `prompt_truncation` set to \"AUTO\", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance.\n\nWith `prompt_truncation` set to \"AUTO_PRESERVE_ORDER\", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.\n\nWith `prompt_truncation` set to \"OFF\", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.\n\nCompatible Deployments:\n - AUTO: Cohere Platform Only\n - AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker/Bedrock, Private Deployments\n"
149149
x-fern-audiences:
150150
- public
151-
raw_prompting:
152-
type: boolean
153-
description: "When enabled, the user's prompt will be sent to the model without\nany pre-processing.\n\nCompatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments\n"
154-
x-fern-audiences:
155-
- sdk-only
156151
response_format:
157152
$ref: '#/components/schemas/ResponseFormat'
158153
safety_mode:
@@ -7445,11 +7440,6 @@ paths:
74457440
format: float
74467441
x-fern-audiences:
74477442
- public
7448-
raw_prompting:
7449-
type: boolean
7450-
description: "When enabled, the user's prompt will be sent to the model without\nany pre-processing.\n\nCompatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments\n"
7451-
x-fern-audiences:
7452-
- sdk-only
74537443
reasoning_effort:
74547444
$ref: '#/components/schemas/ReasoningEffort'
74557445
response_format:

0 commit comments

Comments
 (0)