Skip to content

Commit d692600

Browse files
committed
test: Updated cohere spec.
1 parent b6e7541 commit d692600

File tree

589 files changed

+46741
-14122
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

589 files changed

+46741
-14122
lines changed

specs/cohere.yaml

Lines changed: 19626 additions & 3847 deletions
Large diffs are not rendered by default.

src/tests/AutoSDK.SnapshotTests/Snapshots/cohere/NewtonsoftJson/_#G.Api.Chat.g.verified.cs

Lines changed: 118 additions & 118 deletions
Large diffs are not rendered by default.

src/tests/AutoSDK.SnapshotTests/Snapshots/cohere/NewtonsoftJson/_#G.Api.Chatv2.g.verified.cs

Lines changed: 116 additions & 65 deletions
Large diffs are not rendered by default.

src/tests/AutoSDK.SnapshotTests/Snapshots/cohere/NewtonsoftJson/_#G.Api.Classify.g.verified.cs

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -522,19 +522,19 @@ partial void ProcessClassifyResponseContent(
522522
/// Note: [Fine-tuned models](https://docs.cohere.com/docs/classify-fine-tuning) trained on classification examples don't require the `examples` parameter to be passed in explicitly.
523523
/// </summary>
524524
/// <param name="xClientName"></param>
525+
/// <param name="examples">
526+
/// An array of examples to provide context to the model. Each example is a text string and its associated label/class. Each unique label requires at least 2 examples associated with it; the maximum number of examples is 2500, and each example has a maximum length of 512 tokens. The values should be structured as `{text: "...",label: "..."}`.<br/>
527+
/// Note: [Fine-tuned Models](https://docs.cohere.com/docs/classify-fine-tuning) trained on classification examples don't require the `examples` parameter to be passed in explicitly.<br/>
528+
/// Included only in requests
529+
/// </param>
525530
/// <param name="inputs">
526531
/// A list of up to 96 texts to be classified. Each one must be a non-empty string.<br/>
527532
/// There is, however, no consistent, universal limit to the length a particular input can be. We perform classification on the first `x` tokens of each input, and `x` varies depending on which underlying model is powering classification. The maximum token length for each model is listed in the "max tokens" column [here](https://docs.cohere.com/docs/models).<br/>
528533
/// Note: by default the `truncate` parameter is set to `END`, so tokens exceeding the limit will be automatically dropped. This behavior can be disabled by setting `truncate` to `NONE`, which will result in validation errors for longer texts.<br/>
529534
/// Included only in requests
530535
/// </param>
531-
/// <param name="examples">
532-
/// An array of examples to provide context to the model. Each example is a text string and its associated label/class. Each unique label requires at least 2 examples associated with it; the maximum number of examples is 2500, and each example has a maximum length of 512 tokens. The values should be structured as `{text: "...",label: "..."}`.<br/>
533-
/// Note: [Fine-tuned Models](https://docs.cohere.com/docs/classify-fine-tuning) trained on classification examples don't require the `examples` parameter to be passed in explicitly.<br/>
534-
/// Included only in requests
535-
/// </param>
536536
/// <param name="model">
537-
/// The identifier of the model. Currently available models are `embed-multilingual-v2.0`, `embed-english-light-v2.0`, and `embed-english-v2.0` (default). Smaller "light" models are faster, while larger models will perform better. [Fine-tuned models](https://docs.cohere.com/docs/fine-tuning) can also be supplied with their full ID.<br/>
537+
/// ID of a [Fine-tuned](https://docs.cohere.com/v2/docs/classify-starting-the-training) Classify model<br/>
538538
/// Included only in requests
539539
/// </param>
540540
/// <param name="preset">
@@ -552,8 +552,8 @@ partial void ProcessClassifyResponseContent(
552552
/// <param name="cancellationToken">The token to cancel the operation with</param>
553553
/// <exception cref="global::System.InvalidOperationException"></exception>
554554
public async global::System.Threading.Tasks.Task<global::G.ClassifyResponse> ClassifyAsync(
555-
global::System.Collections.Generic.IList<string> inputs,
556555
global::System.Collections.Generic.IList<global::G.ClassifyExample> examples,
556+
global::System.Collections.Generic.IList<string> inputs,
557557
string model,
558558
string preset,
559559
global::G.ClassifyRequestTruncate truncate,
@@ -562,8 +562,8 @@ partial void ProcessClassifyResponseContent(
562562
{
563563
var __request = new global::G.ClassifyRequest
564564
{
565-
Inputs = inputs,
566565
Examples = examples,
566+
Inputs = inputs,
567567
Model = model,
568568
Preset = preset,
569569
Truncate = truncate,

src/tests/AutoSDK.SnapshotTests/Snapshots/cohere/NewtonsoftJson/_#G.Api.Detokenize.g.verified.cs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -520,25 +520,25 @@ partial void ProcessDetokenizeResponseContent(
520520
/// This endpoint takes tokens using byte-pair encoding and returns their text representation. To learn more about tokenization and byte pair encoding, see the tokens page.
521521
/// </summary>
522522
/// <param name="xClientName"></param>
523+
/// <param name="model">
524+
/// An optional parameter to provide the model name. This will ensure that the detokenization is done by the tokenizer used by that model.
525+
/// </param>
523526
/// <param name="tokens">
524527
/// The list of tokens to be detokenized.<br/>
525528
/// Included only in requests
526529
/// </param>
527-
/// <param name="model">
528-
/// An optional parameter to provide the model name. This will ensure that the detokenization is done by the tokenizer used by that model.
529-
/// </param>
530530
/// <param name="cancellationToken">The token to cancel the operation with</param>
531531
/// <exception cref="global::System.InvalidOperationException"></exception>
532532
public async global::System.Threading.Tasks.Task<global::G.DetokenizeResponse> DetokenizeAsync(
533-
global::System.Collections.Generic.IList<int> tokens,
534533
string model,
534+
global::System.Collections.Generic.IList<int> tokens,
535535
string? xClientName = default,
536536
global::System.Threading.CancellationToken cancellationToken = default)
537537
{
538538
var __request = new global::G.DetokenizeRequest
539539
{
540-
Tokens = tokens,
541540
Model = model,
541+
Tokens = tokens,
542542
};
543543

544544
return await DetokenizeAsync(

src/tests/AutoSDK.SnapshotTests/Snapshots/cohere/NewtonsoftJson/_#G.Api.Embed.g.verified.cs

Lines changed: 36 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -25,16 +25,16 @@ partial void ProcessEmbedResponseContent(
2525
ref string content);
2626

2727
/// <summary>
28-
/// Embed<br/>
29-
/// This endpoint returns text embeddings. An embedding is a list of floating point numbers that captures semantic information about the text that it represents.<br/>
30-
/// Embeddings can be used to create text classifiers as well as empower semantic search. To learn more about embeddings, see the embedding page.<br/>
31-
/// If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](/docs/semantic-search).
28+
/// Embed V1 API<br/>
29+
/// This endpoint returns text and image embeddings. An embedding is a list of floating point numbers that captures semantic information about the content that it represents.<br/>
30+
/// Embeddings can be used to create classifiers as well as empower semantic search. To learn more about embeddings, see the embedding page.<br/>
31+
/// If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](https://docs.cohere.com/docs/semantic-search).
3232
/// </summary>
3333
/// <param name="xClientName"></param>
3434
/// <param name="request"></param>
3535
/// <param name="cancellationToken">The token to cancel the operation with</param>
3636
/// <exception cref="global::G.ApiException"></exception>
37-
public async global::System.Threading.Tasks.Task<global::G.OneOf<global::G.EmbedFloatsResponse, global::G.EmbedByTypeResponse>> EmbedAsync(
37+
public async global::System.Threading.Tasks.Task<global::G.EmbedResponse> EmbedAsync(
3838
global::G.EmbedRequest request,
3939
string? xClientName = default,
4040
global::System.Threading.CancellationToken cancellationToken = default)
@@ -482,7 +482,7 @@ partial void ProcessEmbedResponseContent(
482482
}
483483

484484
return
485-
global::G.OneOf<global::G.EmbedFloatsResponse, global::G.EmbedByTypeResponse>.FromJson(__content, JsonSerializerOptions) ??
485+
global::G.EmbedResponse.FromJson(__content, JsonSerializerOptions) ??
486486
throw new global::System.InvalidOperationException($"Response deserialization failed for \"{__content}\" ");
487487
}
488488
else
@@ -512,34 +512,31 @@ partial void ProcessEmbedResponseContent(
512512
).ConfigureAwait(false);
513513

514514
return
515-
await global::G.OneOf<global::G.EmbedFloatsResponse, global::G.EmbedByTypeResponse>.FromJsonStreamAsync(__content, JsonSerializerOptions).ConfigureAwait(false) ??
515+
await global::G.EmbedResponse.FromJsonStreamAsync(__content, JsonSerializerOptions).ConfigureAwait(false) ??
516516
throw new global::System.InvalidOperationException("Response deserialization failed.");
517517
}
518518
}
519519

520520
/// <summary>
521-
/// Embed<br/>
522-
/// This endpoint returns text embeddings. An embedding is a list of floating point numbers that captures semantic information about the text that it represents.<br/>
523-
/// Embeddings can be used to create text classifiers as well as empower semantic search. To learn more about embeddings, see the embedding page.<br/>
524-
/// If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](/docs/semantic-search).
521+
/// Embed V1 API<br/>
522+
/// This endpoint returns text and image embeddings. An embedding is a list of floating point numbers that captures semantic information about the content that it represents.<br/>
523+
/// Embeddings can be used to create classifiers as well as empower semantic search. To learn more about embeddings, see the embedding page.<br/>
524+
/// If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](https://docs.cohere.com/docs/semantic-search).
525525
/// </summary>
526526
/// <param name="xClientName"></param>
527-
/// <param name="texts">
528-
/// An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality.<br/>
527+
/// <param name="embeddingTypes">
528+
/// Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types.<br/>
529+
/// * `"float"`: Use this when you want to get back the default float embeddings. Supported with all Embed models.<br/>
530+
/// * `"int8"`: Use this when you want to get back signed int8 embeddings. Supported with Embed v3.0 and newer Embed models.<br/>
531+
/// * `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Supported with Embed v3.0 and newer Embed models.<br/>
532+
/// * `"binary"`: Use this when you want to get back signed binary embeddings. Supported with Embed v3.0 and newer Embed models.<br/>
533+
/// * `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Supported with Embed v3.0 and newer Embed models.<br/>
529534
/// Included only in requests
530535
/// </param>
531-
/// <param name="model">
532-
/// Defaults to embed-english-v2.0<br/>
533-
/// The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID.<br/>
534-
/// Available models and corresponding embedding dimensions:<br/>
535-
/// * `embed-english-v3.0` 1024<br/>
536-
/// * `embed-multilingual-v3.0` 1024<br/>
537-
/// * `embed-english-light-v3.0` 384<br/>
538-
/// * `embed-multilingual-light-v3.0` 384<br/>
539-
/// * `embed-english-v2.0` 4096<br/>
540-
/// * `embed-english-light-v2.0` 1024<br/>
541-
/// * `embed-multilingual-v2.0` 768<br/>
542-
/// Included only in requests
536+
/// <param name="images">
537+
/// An array of image data URIs for the model to embed. Maximum number of images per call is `1`.<br/>
538+
/// The image must be a valid [data URI](https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data). The image must be in either `image/jpeg` or `image/png` format and has a maximum size of 5MB.<br/>
539+
/// Images are only supported with Embed v3.0 and newer models.
543540
/// </param>
544541
/// <param name="inputType">
545542
/// Specifies the type of input passed to the model. Required for embedding models v3 and higher.<br/>
@@ -549,13 +546,12 @@ partial void ProcessEmbedResponseContent(
549546
/// - `"clustering"`: Used for the embeddings run through a clustering algorithm.<br/>
550547
/// - `"image"`: Used for embeddings with image input.
551548
/// </param>
552-
/// <param name="embeddingTypes">
553-
/// Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types.<br/>
554-
/// * `"float"`: Use this when you want to get back the default float embeddings. Valid for all models.<br/>
555-
/// * `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for only v3 models.<br/>
556-
/// * `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for only v3 models.<br/>
557-
/// * `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models.<br/>
558-
/// * `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models.<br/>
549+
/// <param name="model">
550+
/// ID of one of the available [Embedding models](https://docs.cohere.com/docs/cohere-embed).<br/>
551+
/// Included only in requests
552+
/// </param>
553+
/// <param name="texts">
554+
/// An array of strings for the model to embed. Maximum number of texts per call is `96`.<br/>
559555
/// Included only in requests
560556
/// </param>
561557
/// <param name="truncate">
@@ -567,21 +563,23 @@ partial void ProcessEmbedResponseContent(
567563
/// </param>
568564
/// <param name="cancellationToken">The token to cancel the operation with</param>
569565
/// <exception cref="global::System.InvalidOperationException"></exception>
570-
public async global::System.Threading.Tasks.Task<global::G.OneOf<global::G.EmbedFloatsResponse, global::G.EmbedByTypeResponse>> EmbedAsync(
571-
global::System.Collections.Generic.IList<string> texts,
572-
string model,
566+
public async global::System.Threading.Tasks.Task<global::G.EmbedResponse> EmbedAsync(
573567
global::System.Collections.Generic.IList<global::G.EmbeddingType> embeddingTypes,
568+
string model,
569+
global::System.Collections.Generic.IList<string> texts,
574570
global::G.EmbedRequestTruncate truncate,
575571
string? xClientName = default,
572+
global::System.Collections.Generic.IList<string>? images = default,
576573
global::G.EmbedInputType? inputType = default,
577574
global::System.Threading.CancellationToken cancellationToken = default)
578575
{
579576
var __request = new global::G.EmbedRequest
580577
{
581-
Texts = texts,
582-
Model = model,
583-
InputType = inputType,
584578
EmbeddingTypes = embeddingTypes,
579+
Images = images,
580+
InputType = inputType,
581+
Model = model,
582+
Texts = texts,
585583
Truncate = truncate,
586584
};
587585

0 commit comments

Comments
 (0)