Skip to content
This repository was archived by the owner on Oct 8, 2024. It is now read-only.

Commit bb074cf

Browse files
Export all model interface classes (#37)
1 parent 943134f commit bb074cf

File tree

8 files changed

+45
-37
lines changed

8 files changed

+45
-37
lines changed

CHANGELOG.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,9 @@
11
# Change Log
22

3+
## UNRELEASED
4+
5+
- Export all model interfaces classes [#37](https://github.com/hypermodeAI/models-as/pull/37)
6+
37
## 2024-08-09 - Version 0.2.3
48

59
- Add support for Gemini models [#24](https://github.com/hypermodeAI/models-as/pull/24)

src/models/anthropic/messages.ts

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ export class AssistantMessage extends Message {
9292
* The input object for the Anthropic Messages API.
9393
*/
9494
@json
95-
class AnthropicMessagesInput {
95+
export class AnthropicMessagesInput {
9696
/**
9797
* The model that will complete your prompt.
9898
* Must be the exact string expected by the model provider.
@@ -258,7 +258,7 @@ export class Tool {
258258

259259

260260
@json
261-
class ToolChoice {
261+
export class ToolChoice {
262262
constructor(type: string, name: string | null = null) {
263263
this._type = type;
264264
this._name = name;
@@ -296,7 +296,7 @@ export const ToolChoiceTool = (name: string): ToolChoice =>
296296
* The output object for the Anthropic Messages API.
297297
*/
298298
@json
299-
class AnthropicMessagesOutput {
299+
export class AnthropicMessagesOutput {
300300
/**
301301
* Unique object identifier.
302302
*/
@@ -348,7 +348,7 @@ class AnthropicMessagesOutput {
348348

349349

350350
@json
351-
class ContentBlock {
351+
export class ContentBlock {
352352
type!: string;
353353

354354
// Text block
@@ -370,7 +370,7 @@ class ContentBlock {
370370

371371

372372
@json
373-
class Usage {
373+
export class Usage {
374374
/**
375375
* The number of input tokens which were used.
376376
*/

src/models/experimental/classification.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ export class ClassificationModel extends Model<
2626
* An input object for the classification model.
2727
*/
2828
@json
29-
class ClassificationInput {
29+
export class ClassificationInput {
3030
/**
3131
* A list of one or more text strings of text to classify.
3232
*/
@@ -37,7 +37,7 @@ class ClassificationInput {
3737
* An output object for the classification model.
3838
*/
3939
@json
40-
class ClassificationOutput {
40+
export class ClassificationOutput {
4141
/**
4242
* A list of prediction results that correspond to each input text string.
4343
*/
@@ -69,7 +69,7 @@ export class ClassifierResult {
6969
* A classification label with its corresponding probability.
7070
*/
7171
@json
72-
class ClassifierLabel {
72+
export class ClassifierLabel {
7373
/**
7474
* The classification label.
7575
*/

src/models/experimental/embeddings.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ export class EmbeddingsModel extends Model<EmbeddingsInput, EmbeddingsOutput> {
2323
* An input object for the embeddings model.
2424
*/
2525
@json
26-
class EmbeddingsInput {
26+
export class EmbeddingsInput {
2727
/**
2828
* A list of one or more text strings to create vector embeddings for.
2929
*/
@@ -34,7 +34,7 @@ class EmbeddingsInput {
3434
* An output object for the embeddings model.
3535
*/
3636
@json
37-
class EmbeddingsOutput {
37+
export class EmbeddingsOutput {
3838
/**
3939
* A list of vector embeddings that correspond to each input text string.
4040
*/

src/models/gemini/generate.ts

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ export class PromptContent {
5858

5959

6060
@json
61-
class Part {
61+
export class Part {
6262
text!: string;
6363
}
6464

@@ -141,7 +141,7 @@ export class ModelTextContent extends ModelContent {
141141
* The input object for the Gemini Generate Content API.
142142
*/
143143
@json
144-
class GeminiGenerateInput {
144+
export class GeminiGenerateInput {
145145
/**
146146
* The content of the current conversation with the model.
147147
*/
@@ -257,7 +257,7 @@ export class GenerationConfig {
257257
* Safety setting, affecting the safety-blocking behavior.
258258
*/
259259
@json
260-
class SafetySetting {
260+
export class SafetySetting {
261261
category!: HarmCategory;
262262
threshold!: HarmBlockThreshold;
263263
}
@@ -312,7 +312,7 @@ export type HarmBlockThreshold = string;
312312
* The output object for the Gemini Generate Content API.
313313
*/
314314
@json
315-
class GeminiGenerateOutput {
315+
export class GeminiGenerateOutput {
316316
/**
317317
* Candidate responses from the model.
318318
*/
@@ -334,7 +334,7 @@ class GeminiGenerateOutput {
334334
*
335335
*/
336336
@json
337-
class Candidate {
337+
export class Candidate {
338338
/**
339339
* Index of the candidate in the list of candidates.
340340
*/
@@ -425,7 +425,7 @@ export type FinishReason = string;
425425
* Safety setting, affecting the safety-blocking behavior.
426426
*/
427427
@json
428-
class SafetyRating {
428+
export class SafetyRating {
429429
category!: HarmCategory;
430430
probability!: HarmProbability;
431431
}
@@ -466,15 +466,15 @@ export type HarmProbability = string;
466466
* Citation metadata that may be found on a {@link Candidate}.
467467
*/
468468
@json
469-
class CitationMetadata {
469+
export class CitationMetadata {
470470
citationSources!: CitationSource[];
471471
}
472472

473473
/**
474474
* A single citation source.
475475
*/
476476
@json
477-
class CitationSource {
477+
export class CitationSource {
478478
/**
479479
* Start of segment of the response that is attributed to this source.
480480
*/
@@ -505,7 +505,7 @@ class CitationSource {
505505
* @public
506506
*/
507507
@json
508-
class PromptFeedback {
508+
export class PromptFeedback {
509509
blockReason!: BlockReason;
510510
safetyRatings!: SafetyRating[];
511511
}
@@ -536,7 +536,7 @@ export type BlockReason = string;
536536
* Metadata on the generation request's token usage.
537537
*/
538538
@json
539-
class UsageMetadata {
539+
export class UsageMetadata {
540540
/**
541541
* Number of tokens in the prompt.
542542
*/

src/models/meta/llama.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ export class TextGenerationModel extends Model<
1818

1919

2020
@json
21-
class TextGenerationInput {
21+
export class TextGenerationInput {
2222
/**
2323
* The prompt text to pass to the model.
2424
* May contain special tokens to control the behavior of the model.
@@ -58,7 +58,7 @@ class TextGenerationInput {
5858

5959

6060
@json
61-
class TextGenerationOutput {
61+
export class TextGenerationOutput {
6262
/**
6363
* The generated text.
6464
*/

src/models/openai/chat.ts

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ export class OpenAIChatModel extends Model<OpenAIChatInput, OpenAIChatOutput> {
2323
* The input object for the OpenAI Chat API.
2424
*/
2525
@json
26-
class OpenAIChatInput {
26+
export class OpenAIChatInput {
2727
/**
2828
* The name of the model to use for the chat.
2929
* Must be the exact string expected by the model provider.
@@ -238,7 +238,7 @@ export type ServiceTier = string;
238238
* The output object for the OpenAI Chat API.
239239
*/
240240
@json
241-
class OpenAIChatOutput {
241+
export class OpenAIChatOutput {
242242
/**
243243
* A unique identifier for the chat completion.
244244
*/
@@ -317,7 +317,7 @@ export class ResponseFormat {
317317
* Additionally, if you need an array you must ask for an object that wraps the array,
318318
* because the model will not reliably produce arrays directly (ie., there is no `json_array` option).
319319
*/
320-
static Json: ResponseFormat = { type: "json_object" };
320+
static Json: ResponseFormat = { type: "json_object", jsonSchema: null };
321321

322322
/**
323323
* Enables Structured Outputs which guarantees the model will match your supplied JSON schema.
@@ -339,7 +339,7 @@ export class ResponseFormat {
339339
* @remarks
340340
* This is the default response format.
341341
*/
342-
static Text: ResponseFormat = { type: "text" };
342+
static Text: ResponseFormat = { type: "text", jsonSchema: null };
343343
}
344344

345345
// @json
@@ -459,7 +459,7 @@ export class FunctionCall {
459459
* The usage statistics for the request.
460460
*/
461461
@json
462-
class Usage {
462+
export class Usage {
463463
/**
464464
* The number of completion tokens used in the response.
465465
*/
@@ -483,7 +483,7 @@ class Usage {
483483
* A completion choice object returned in the response.
484484
*/
485485
@json
486-
class Choice {
486+
export class Choice {
487487
/**
488488
* The reason the model stopped generating tokens.
489489
*
@@ -516,7 +516,7 @@ class Choice {
516516
* Log probability information for a choice.
517517
*/
518518
@json
519-
class Logprobs {
519+
export class Logprobs {
520520
/**
521521
* A list of message content tokens with log probability information.
522522
*/
@@ -527,7 +527,7 @@ class Logprobs {
527527
* Log probability information for a message content token.
528528
*/
529529
@json
530-
class LogprobsContent {
530+
export class LogprobsContent {
531531
/**
532532
* The token.
533533
*/
@@ -560,7 +560,7 @@ class LogprobsContent {
560560
* Log probability information for the most likely tokens at a given position.
561561
*/
562562
@json
563-
class TopLogprobsContent {
563+
export class TopLogprobsContent {
564564
/**
565565
* The token.
566566
*/
@@ -713,7 +713,7 @@ export class ToolMessage extends Message {
713713
* A chat completion message generated by the model.
714714
*/
715715
@json
716-
class CompletionMessage extends Message {
716+
export class CompletionMessage extends Message {
717717
/**
718718
* Creates a new completion message object.
719719
*

src/models/openai/embeddings.ts

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ export class OpenAIEmbeddingsModel extends Model<
5656
* The input object for the OpenAI Embeddings API.
5757
*/
5858
@json
59-
class OpenAIEmbeddingsInput {
59+
export class OpenAIEmbeddingsInput {
6060
/**
6161
* The name of the model to use for the embeddings.
6262
* Must be the exact string expected by the model provider.
@@ -100,7 +100,7 @@ class OpenAIEmbeddingsInput {
100100
* The input object for the OpenAI Embeddings API.
101101
*/
102102
@json
103-
class TypedEmbeddingsInput<T> extends OpenAIEmbeddingsInput {
103+
export class TypedEmbeddingsInput<T> extends OpenAIEmbeddingsInput {
104104
/**
105105
* The input content to vectorize.
106106
*/
@@ -111,7 +111,7 @@ class TypedEmbeddingsInput<T> extends OpenAIEmbeddingsInput {
111111
* The output object for the OpenAI Embeddings API.
112112
*/
113113
@json
114-
class OpenAIEmbeddingsOutput {
114+
export class OpenAIEmbeddingsOutput {
115115
/**
116116
* The name of the output object type returned by the API.
117117
* Always `"list"`.
@@ -160,7 +160,7 @@ export type EncodingFormat = string;
160160
* The output vector embeddings data.
161161
*/
162162
@json
163-
class Embedding {
163+
export class Embedding {
164164
/**
165165
* The name of the output object type returned by the API.
166166
* Always `"embedding"`.
@@ -172,14 +172,18 @@ class Embedding {
172172
* Used when requesting embeddings for multiple texts.
173173
*/
174174
index!: i32;
175+
176+
/**
177+
* The vector embedding of the input text.
178+
*/
175179
embedding!: f32[]; // TODO: support `f32[] | string` based on input encoding format
176180
}
177181

178182
/**
179183
* The usage statistics for the request.
180184
*/
181185
@json
182-
class Usage {
186+
export class Usage {
183187
/**
184188
* The number of prompt tokens used in the request.
185189
*/

0 commit comments

Comments
 (0)