Skip to content

Commit 7b857fe

Browse files
stainless-botstainless-app[bot]
authored andcommitted
feat(api): updates (#874)
1 parent fd70373 commit 7b857fe

File tree

15 files changed

+416
-18
lines changed

15 files changed

+416
-18
lines changed

.stats.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 64
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-363dd904e5d6e65b3a323fc88e6b502fb23a6aa319be219273e3ee47c7530993.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-0577fd0d08da6b867b002a5accd45f7116ef91c4940b41cf45dc479938c77163.yml

src/resources/batches.ts

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,9 @@ export class Batches extends APIResource {
3737
}
3838

3939
/**
40-
* Cancels an in-progress batch.
40+
* Cancels an in-progress batch. The batch will be in status `cancelling` for up to
41+
* 10 minutes, before changing to `cancelled`, where it will have partial results
42+
* (if any) available in the output file.
4143
*/
4244
cancel(batchId: string, options?: Core.RequestOptions): Core.APIPromise<Batch> {
4345
return this._client.post(`/batches/${batchId}/cancel`, options);
@@ -228,7 +230,7 @@ export interface BatchCreateParams {
228230
* for how to upload a file.
229231
*
230232
* Your input file must be formatted as a
231-
* [JSONL file](https://platform.openai.com/docs/api-reference/batch/requestInput),
233+
* [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
232234
* and must be uploaded with the purpose `batch`. The file can contain up to 50,000
233235
* requests, and can be up to 100 MB in size.
234236
*/

src/resources/beta/assistants.ts

Lines changed: 99 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -258,6 +258,7 @@ export type AssistantStreamEvent =
258258
| AssistantStreamEvent.ThreadRunInProgress
259259
| AssistantStreamEvent.ThreadRunRequiresAction
260260
| AssistantStreamEvent.ThreadRunCompleted
261+
| AssistantStreamEvent.ThreadRunIncomplete
261262
| AssistantStreamEvent.ThreadRunFailed
262263
| AssistantStreamEvent.ThreadRunCancelling
263264
| AssistantStreamEvent.ThreadRunCancelled
@@ -362,6 +363,20 @@ export namespace AssistantStreamEvent {
362363
event: 'thread.run.completed';
363364
}
364365

366+
/**
367+
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
368+
* ends with status `incomplete`.
369+
*/
370+
export interface ThreadRunIncomplete {
371+
/**
372+
* Represents an execution run on a
373+
* [thread](https://platform.openai.com/docs/api-reference/threads).
374+
*/
375+
data: RunsAPI.Run;
376+
377+
event: 'thread.run.incomplete';
378+
}
379+
365380
/**
366381
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
367382
* fails.
@@ -618,6 +633,30 @@ export interface FileSearchTool {
618633
* The type of tool being defined: `file_search`
619634
*/
620635
type: 'file_search';
636+
637+
/**
638+
* Overrides for the file search tool.
639+
*/
640+
file_search?: FileSearchTool.FileSearch;
641+
}
642+
643+
export namespace FileSearchTool {
644+
/**
645+
* Overrides for the file search tool.
646+
*/
647+
export interface FileSearch {
648+
/**
649+
* The maximum number of results the file search tool should output. The default is
650+
* 20 for gpt-4\* models and 5 for gpt-3.5-turbo. This number should be between 1
651+
* and 50 inclusive.
652+
*
653+
* Note that the file search tool may output fewer than `max_num_results` results.
654+
* See the
655+
* [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/number-of-chunks-returned)
656+
* for more information.
657+
*/
658+
max_num_results?: number;
659+
}
621660
}
622661

623662
export interface FunctionTool {
@@ -843,6 +882,7 @@ export type RunStreamEvent =
843882
| RunStreamEvent.ThreadRunInProgress
844883
| RunStreamEvent.ThreadRunRequiresAction
845884
| RunStreamEvent.ThreadRunCompleted
885+
| RunStreamEvent.ThreadRunIncomplete
846886
| RunStreamEvent.ThreadRunFailed
847887
| RunStreamEvent.ThreadRunCancelling
848888
| RunStreamEvent.ThreadRunCancelled
@@ -919,6 +959,20 @@ export namespace RunStreamEvent {
919959
event: 'thread.run.completed';
920960
}
921961

962+
/**
963+
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
964+
* ends with status `incomplete`.
965+
*/
966+
export interface ThreadRunIncomplete {
967+
/**
968+
* Represents an execution run on a
969+
* [thread](https://platform.openai.com/docs/api-reference/threads).
970+
*/
971+
data: RunsAPI.Run;
972+
973+
event: 'thread.run.incomplete';
974+
}
975+
922976
/**
923977
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
924978
* fails.
@@ -1140,6 +1194,12 @@ export namespace AssistantCreateParams {
11401194

11411195
export namespace FileSearch {
11421196
export interface VectorStore {
1197+
/**
1198+
* The chunking strategy used to chunk the file(s). If not set, will use the `auto`
1199+
* strategy.
1200+
*/
1201+
chunking_strategy?: VectorStore.Auto | VectorStore.Static;
1202+
11431203
/**
11441204
* A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
11451205
* add to the vector store. There can be a maximum of 10000 files in a vector
@@ -1155,6 +1215,45 @@ export namespace AssistantCreateParams {
11551215
*/
11561216
metadata?: unknown;
11571217
}
1218+
1219+
export namespace VectorStore {
1220+
/**
1221+
* The default strategy. This strategy currently uses a `max_chunk_size_tokens` of
1222+
* `800` and `chunk_overlap_tokens` of `400`.
1223+
*/
1224+
export interface Auto {
1225+
/**
1226+
* Always `auto`.
1227+
*/
1228+
type: 'auto';
1229+
}
1230+
1231+
export interface Static {
1232+
static: Static.Static;
1233+
1234+
/**
1235+
* Always `static`.
1236+
*/
1237+
type: 'static';
1238+
}
1239+
1240+
export namespace Static {
1241+
export interface Static {
1242+
/**
1243+
* The number of tokens that overlap between chunks. The default value is `400`.
1244+
*
1245+
* Note that the overlap must not exceed half of `max_chunk_size_tokens`.
1246+
*/
1247+
chunk_overlap_tokens: number;
1248+
1249+
/**
1250+
* The maximum number of tokens in each chunk. The default value is `800`. The
1251+
* minimum value is `100` and the maximum value is `4096`.
1252+
*/
1253+
max_chunk_size_tokens: number;
1254+
}
1255+
}
1256+
}
11581257
}
11591258
}
11601259
}

src/resources/beta/threads/threads.ts

Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -369,6 +369,12 @@ export namespace ThreadCreateParams {
369369

370370
export namespace FileSearch {
371371
export interface VectorStore {
372+
/**
373+
* The chunking strategy used to chunk the file(s). If not set, will use the `auto`
374+
* strategy.
375+
*/
376+
chunking_strategy?: VectorStore.Auto | VectorStore.Static;
377+
372378
/**
373379
* A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
374380
* add to the vector store. There can be a maximum of 10000 files in a vector
@@ -384,6 +390,45 @@ export namespace ThreadCreateParams {
384390
*/
385391
metadata?: unknown;
386392
}
393+
394+
export namespace VectorStore {
395+
/**
396+
* The default strategy. This strategy currently uses a `max_chunk_size_tokens` of
397+
* `800` and `chunk_overlap_tokens` of `400`.
398+
*/
399+
export interface Auto {
400+
/**
401+
* Always `auto`.
402+
*/
403+
type: 'auto';
404+
}
405+
406+
export interface Static {
407+
static: Static.Static;
408+
409+
/**
410+
* Always `static`.
411+
*/
412+
type: 'static';
413+
}
414+
415+
export namespace Static {
416+
export interface Static {
417+
/**
418+
* The number of tokens that overlap between chunks. The default value is `400`.
419+
*
420+
* Note that the overlap must not exceed half of `max_chunk_size_tokens`.
421+
*/
422+
chunk_overlap_tokens: number;
423+
424+
/**
425+
* The maximum number of tokens in each chunk. The default value is `800`. The
426+
* minimum value is `100` and the maximum value is `4096`.
427+
*/
428+
max_chunk_size_tokens: number;
429+
}
430+
}
431+
}
387432
}
388433
}
389434
}
@@ -711,6 +756,12 @@ export namespace ThreadCreateAndRunParams {
711756

712757
export namespace FileSearch {
713758
export interface VectorStore {
759+
/**
760+
* The chunking strategy used to chunk the file(s). If not set, will use the `auto`
761+
* strategy.
762+
*/
763+
chunking_strategy?: VectorStore.Auto | VectorStore.Static;
764+
714765
/**
715766
* A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
716767
* add to the vector store. There can be a maximum of 10000 files in a vector
@@ -726,6 +777,45 @@ export namespace ThreadCreateAndRunParams {
726777
*/
727778
metadata?: unknown;
728779
}
780+
781+
export namespace VectorStore {
782+
/**
783+
* The default strategy. This strategy currently uses a `max_chunk_size_tokens` of
784+
* `800` and `chunk_overlap_tokens` of `400`.
785+
*/
786+
export interface Auto {
787+
/**
788+
* Always `auto`.
789+
*/
790+
type: 'auto';
791+
}
792+
793+
export interface Static {
794+
static: Static.Static;
795+
796+
/**
797+
* Always `static`.
798+
*/
799+
type: 'static';
800+
}
801+
802+
export namespace Static {
803+
export interface Static {
804+
/**
805+
* The number of tokens that overlap between chunks. The default value is `400`.
806+
*
807+
* Note that the overlap must not exceed half of `max_chunk_size_tokens`.
808+
*/
809+
chunk_overlap_tokens: number;
810+
811+
/**
812+
* The maximum number of tokens in each chunk. The default value is `800`. The
813+
* minimum value is `100` and the maximum value is `4096`.
814+
*/
815+
max_chunk_size_tokens: number;
816+
}
817+
}
818+
}
729819
}
730820
}
731821
}

src/resources/beta/vector-stores/file-batches.ts

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -261,6 +261,53 @@ export interface FileBatchCreateParams {
261261
* files.
262262
*/
263263
file_ids: Array<string>;
264+
265+
/**
266+
* The chunking strategy used to chunk the file(s). If not set, will use the `auto`
267+
* strategy.
268+
*/
269+
chunking_strategy?:
270+
| FileBatchCreateParams.AutoChunkingStrategyRequestParam
271+
| FileBatchCreateParams.StaticChunkingStrategyRequestParam;
272+
}
273+
274+
export namespace FileBatchCreateParams {
275+
/**
276+
* The default strategy. This strategy currently uses a `max_chunk_size_tokens` of
277+
* `800` and `chunk_overlap_tokens` of `400`.
278+
*/
279+
export interface AutoChunkingStrategyRequestParam {
280+
/**
281+
* Always `auto`.
282+
*/
283+
type: 'auto';
284+
}
285+
286+
export interface StaticChunkingStrategyRequestParam {
287+
static: StaticChunkingStrategyRequestParam.Static;
288+
289+
/**
290+
* Always `static`.
291+
*/
292+
type: 'static';
293+
}
294+
295+
export namespace StaticChunkingStrategyRequestParam {
296+
export interface Static {
297+
/**
298+
* The number of tokens that overlap between chunks. The default value is `400`.
299+
*
300+
* Note that the overlap must not exceed half of `max_chunk_size_tokens`.
301+
*/
302+
chunk_overlap_tokens: number;
303+
304+
/**
305+
* The maximum number of tokens in each chunk. The default value is `800`. The
306+
* minimum value is `100` and the maximum value is `4096`.
307+
*/
308+
max_chunk_size_tokens: number;
309+
}
310+
}
264311
}
265312

266313
export interface FileBatchListFilesParams extends CursorPageParams {

0 commit comments

Comments
 (0)