@@ -346,26 +346,6 @@ export interface ThreadDeleted {
346
346
object : 'thread.deleted' ;
347
347
}
348
348
349
- /**
350
- * Controls for how a thread will be truncated prior to the run. Use this to
351
- * control the intial context window of the run.
352
- */
353
- export interface TruncationObject {
354
- /**
355
- * The truncation strategy to use for the thread. The default is `auto`. If set to
356
- * `last_messages`, the thread will be truncated to the n most recent messages in
357
- * the thread. When set to `auto`, messages in the middle of the thread will be
358
- * dropped to fit the context length of the model, `max_prompt_tokens`.
359
- */
360
- type : 'auto' | 'last_messages' ;
361
-
362
- /**
363
- * The number of most recent messages from the thread when constructing the context
364
- * for the run.
365
- */
366
- last_messages ?: number | null ;
367
- }
368
-
369
349
export interface ThreadCreateParams {
370
350
/**
371
351
* A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
@@ -754,7 +734,7 @@ export interface ThreadCreateAndRunParamsBase {
754
734
* Controls for how a thread will be truncated prior to the run. Use this to
755
735
* control the intial context window of the run.
756
736
*/
757
- truncation_strategy ?: TruncationObject | null ;
737
+ truncation_strategy ?: ThreadCreateAndRunParams . TruncationStrategy | null ;
758
738
}
759
739
760
740
export namespace ThreadCreateAndRunParams {
@@ -985,6 +965,26 @@ export namespace ThreadCreateAndRunParams {
985
965
}
986
966
}
987
967
968
+ /**
969
+ * Controls for how a thread will be truncated prior to the run. Use this to
970
+ * control the intial context window of the run.
971
+ */
972
+ export interface TruncationStrategy {
973
+ /**
974
+ * The truncation strategy to use for the thread. The default is `auto`. If set to
975
+ * `last_messages`, the thread will be truncated to the n most recent messages in
976
+ * the thread. When set to `auto`, messages in the middle of the thread will be
977
+ * dropped to fit the context length of the model, `max_prompt_tokens`.
978
+ */
979
+ type : 'auto' | 'last_messages' ;
980
+
981
+ /**
982
+ * The number of most recent messages from the thread when constructing the context
983
+ * for the run.
984
+ */
985
+ last_messages ?: number | null ;
986
+ }
987
+
988
988
export type ThreadCreateAndRunParamsNonStreaming = ThreadsAPI . ThreadCreateAndRunParamsNonStreaming ;
989
989
export type ThreadCreateAndRunParamsStreaming = ThreadsAPI . ThreadCreateAndRunParamsStreaming ;
990
990
}
@@ -1684,7 +1684,6 @@ export declare namespace Threads {
1684
1684
type AssistantToolChoiceOption as AssistantToolChoiceOption ,
1685
1685
type Thread as Thread ,
1686
1686
type ThreadDeleted as ThreadDeleted ,
1687
- type TruncationObject as TruncationObject ,
1688
1687
type ThreadCreateParams as ThreadCreateParams ,
1689
1688
type ThreadUpdateParams as ThreadUpdateParams ,
1690
1689
type ThreadCreateAndRunParams as ThreadCreateAndRunParams ,
0 commit comments