Skip to content

Commit ea2d471

Browse files
authored
♻️ [Tasks] JSON spec: text-generation (#468)
# TL;DR - Update `text-generation` spec to match TGI API - ~Add `conversational` spec, heavily inspired by TGI messages API (cc @Wauplin @osanseviero @Narsil )~ - ~Relevant related work: #457 & huggingface-internal/moon-landing#8723 - regenerate typescript code for those tasks
1 parent 839c1ce commit ea2d471

File tree

3 files changed

+235
-7
lines changed

3 files changed

+235
-7
lines changed

packages/tasks/src/tasks/text-generation/inference.ts

Lines changed: 110 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -26,11 +26,24 @@ export interface TextGenerationInput {
2626
*/
2727
export interface TextGenerationParameters {
2828
/**
29-
* Whether to use logit sampling (true) or greedy search (false).
29+
* The number of sampling queries to run. Only the best one (in terms of total logprob) will
30+
* be returned.
31+
*/
32+
best_of?: number;
33+
/**
34+
* Whether or not to output decoder input details
35+
*/
36+
decoder_input_details?: boolean;
37+
/**
38+
* Whether or not to output details
39+
*/
40+
details?: boolean;
41+
/**
42+
* Whether to use logits sampling instead of greedy decoding when generating new tokens.
3043
*/
3144
do_sample?: boolean;
3245
/**
33-
* Maximum number of generated tokens.
46+
* The maximum number of tokens to generate.
3447
*/
3548
max_new_tokens?: number;
3649
/**
@@ -42,6 +55,10 @@ export interface TextGenerationParameters {
4255
* Whether to prepend the prompt to the generated text.
4356
*/
4457
return_full_text?: boolean;
58+
/**
59+
* The random sampling seed.
60+
*/
61+
seed?: number;
4562
/**
4663
* Stop generating tokens if a member of `stop_sequences` is generated.
4764
*/
@@ -79,10 +96,99 @@ export interface TextGenerationParameters {
7996
* Outputs for Text Generation inference
8097
*/
8198
export interface TextGenerationOutput {
82-
generatedText: unknown;
99+
/**
100+
* When enabled, details about the generation
101+
*/
102+
details?: TextGenerationOutputDetails;
103+
/**
104+
* The generated text
105+
*/
106+
generated_text: string;
107+
[property: string]: unknown;
108+
}
109+
110+
/**
111+
* When enabled, details about the generation
112+
*/
113+
export interface TextGenerationOutputDetails {
114+
/**
115+
* Details about additional sequences when best_of is provided
116+
*/
117+
best_of_sequences?: TextGenerationSequenceDetails[];
118+
/**
119+
* The reason why the generation was stopped.
120+
*/
121+
finish_reason: FinishReason;
122+
/**
123+
* The number of generated tokens
124+
*/
125+
generated_tokens: number;
126+
prefill: PrefillToken[];
127+
/**
128+
* The random seed used for generation
129+
*/
130+
seed?: number;
131+
/**
132+
* The generated tokens and associated details
133+
*/
134+
tokens: Token[];
135+
[property: string]: unknown;
136+
}
137+
138+
export interface TextGenerationSequenceDetails {
139+
/**
140+
* The reason why the generation was stopped.
141+
*/
142+
finish_reason: FinishReason;
83143
/**
84144
* The generated text
85145
*/
86-
generated_text?: string;
146+
generated_text: number;
147+
/**
148+
* The number of generated tokens
149+
*/
150+
generated_tokens: number;
151+
prefill: PrefillToken[];
152+
/**
153+
* The random seed used for generation
154+
*/
155+
seed?: number;
156+
/**
157+
* The generated tokens and associated details
158+
*/
159+
tokens: Token[];
160+
[property: string]: unknown;
161+
}
162+
163+
/**
164+
* The generated sequence reached the maximum allowed length
165+
*
166+
* The model generated an end-of-sentence (EOS) token
167+
*
168+
* One of the sequence in stop_sequences was generated
169+
*/
170+
export type FinishReason = "length" | "eos_token" | "stop_sequence";
171+
172+
export interface PrefillToken {
173+
id: number;
174+
logprob: number;
175+
/**
176+
* The text associated with that token
177+
*/
178+
text: string;
179+
[property: string]: unknown;
180+
}
181+
182+
export interface Token {
183+
id: number;
184+
logprob: number;
185+
/**
186+
* Whether or not that token is a special one
187+
*/
188+
special: boolean;
189+
/**
190+
* The text associated with that token
191+
*/
192+
text: string;
87193
[property: string]: unknown;
88194
}

packages/tasks/src/tasks/text-generation/spec/input.json

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,25 @@
2020
"description": "Additional inference parameters for Text Generation",
2121
"type": "object",
2222
"properties": {
23+
"best_of": {
24+
"type": "integer",
25+
"description": "The number of sampling queries to run. Only the best one (in terms of total logprob) will be returned."
26+
},
27+
"decoder_input_details": {
28+
"type": "boolean",
29+
"description": "Whether or not to output decoder input details"
30+
},
31+
"details": {
32+
"type": "boolean",
33+
"description": "Whether or not to output details"
34+
},
2335
"do_sample": {
2436
"type": "boolean",
25-
"description": "Whether to use logit sampling (true) or greedy search (false)."
37+
"description": "Whether to use logits sampling instead of greedy decoding when generating new tokens."
2638
},
2739
"max_new_tokens": {
2840
"type": "integer",
29-
"description": "Maximum number of generated tokens."
41+
"description": "The maximum number of tokens to generate."
3042
},
3143
"repetition_penalty": {
3244
"type": "number",
@@ -36,6 +48,10 @@
3648
"type": "boolean",
3749
"description": "Whether to prepend the prompt to the generated text."
3850
},
51+
"seed": {
52+
"type": "integer",
53+
"description": "The random sampling seed."
54+
},
3955
"stop_sequences": {
4056
"type": "array",
4157
"items": {

packages/tasks/src/tasks/text-generation/spec/output.json

Lines changed: 107 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,113 @@
88
"generated_text": {
99
"type": "string",
1010
"description": "The generated text"
11+
},
12+
"details": {
13+
"description": "When enabled, details about the generation",
14+
"title": "TextGenerationOutputDetails",
15+
"allOf": [
16+
{ "$ref": "#/$defs/SequenceDetails" },
17+
{
18+
"type": "object",
19+
"properties": {
20+
"best_of_sequences": {
21+
"type": "array",
22+
"description": "Details about additional sequences when best_of is provided",
23+
"items": {
24+
"allOf": [
25+
{ "$ref": "#/$defs/SequenceDetails" },
26+
{
27+
"type": "object",
28+
"properties": {
29+
"generated_text": {
30+
"type": "integer",
31+
"description": "The generated text"
32+
}
33+
},
34+
"required": ["generated_text"]
35+
}
36+
]
37+
}
38+
}
39+
}
40+
}
41+
]
1142
}
1243
},
13-
"required": ["generatedText"]
44+
"required": ["generated_text"],
45+
46+
"$defs": {
47+
"Token": {
48+
"type": "object",
49+
"title": "Token",
50+
"properties": {
51+
"id": {
52+
"type": "integer"
53+
},
54+
"logprob": {
55+
"type": "number"
56+
},
57+
"special": {
58+
"type": "boolean",
59+
"description": "Whether or not that token is a special one"
60+
},
61+
"text": {
62+
"type": "string",
63+
"description": "The text associated with that token"
64+
}
65+
},
66+
"required": ["id", "logprob", "special", "text"]
67+
},
68+
"SequenceDetails": {
69+
"type": "object",
70+
"title": "TextGenerationSequenceDetails",
71+
"properties": {
72+
"finish_reason": {
73+
"type": "string",
74+
"description": "The reason why the generation was stopped.",
75+
"oneOf": [
76+
{ "const": "length", "description": "The generated sequence reached the maximum allowed length" },
77+
{ "const": "eos_token", "description": "The model generated an end-of-sentence (EOS) token" },
78+
{ "const": "stop_sequence", "description": "One of the sequence in stop_sequences was generated" }
79+
]
80+
},
81+
"generated_tokens": {
82+
"type": "integer",
83+
"description": "The number of generated tokens"
84+
},
85+
"prefill": {
86+
"type": "array",
87+
"items": {
88+
"title": "PrefillToken",
89+
"type": "object",
90+
"properties": {
91+
"id": {
92+
"type": "integer"
93+
},
94+
"logprob": {
95+
"type": "number"
96+
},
97+
"text": {
98+
"type": "string",
99+
"description": "The text associated with that token"
100+
}
101+
},
102+
"required": ["id", "logprob", "text"]
103+
}
104+
},
105+
"seed": {
106+
"type": "integer",
107+
"description": "The random seed used for generation"
108+
},
109+
"tokens": {
110+
"type": "array",
111+
"description": "The generated tokens and associated details",
112+
"items": {
113+
"$ref": "#/$defs/Token"
114+
}
115+
}
116+
},
117+
"required": ["finish_reason", "generated_tokens", "prefill", "tokens"]
118+
}
119+
}
14120
}

0 commit comments

Comments
 (0)