@@ -592,18 +592,81 @@ constructor(
592
592
fun responseFormat (behavior : AssistantResponseFormatOption .Behavior ) =
593
593
responseFormat(AssistantResponseFormatOption .ofBehavior(behavior))
594
594
595
+ /* *
596
+ * Specifies the format that the model must output. Compatible with
597
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
598
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
599
+ * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
600
+ *
601
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
602
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
603
+ * the
604
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
605
+ *
606
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
607
+ * the model generates is valid JSON.
608
+ *
609
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
610
+ * JSON yourself via a system or user message. Without this, the model may generate an
611
+ * unending stream of whitespace until the generation reaches the token limit, resulting
612
+ * in a long-running and seemingly "stuck" request. Also note that the message content
613
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
614
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
615
+ */
595
616
fun responseFormat (responseFormatText : ResponseFormatText ) =
596
617
responseFormat(
597
618
AssistantResponseFormatOption .ofResponseFormatText(responseFormatText)
598
619
)
599
620
621
+ /* *
622
+ * Specifies the format that the model must output. Compatible with
623
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
624
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
625
+ * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
626
+ *
627
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
628
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
629
+ * the
630
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
631
+ *
632
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
633
+ * the model generates is valid JSON.
634
+ *
635
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
636
+ * JSON yourself via a system or user message. Without this, the model may generate an
637
+ * unending stream of whitespace until the generation reaches the token limit, resulting
638
+ * in a long-running and seemingly "stuck" request. Also note that the message content
639
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
640
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
641
+ */
600
642
fun responseFormat (responseFormatJsonObject : ResponseFormatJsonObject ) =
601
643
responseFormat(
602
644
AssistantResponseFormatOption .ofResponseFormatJsonObject(
603
645
responseFormatJsonObject
604
646
)
605
647
)
606
648
649
+ /* *
650
+ * Specifies the format that the model must output. Compatible with
651
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
652
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
653
+ * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
654
+ *
655
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
656
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
657
+ * the
658
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
659
+ *
660
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
661
+ * the model generates is valid JSON.
662
+ *
663
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
664
+ * JSON yourself via a system or user message. Without this, the model may generate an
665
+ * unending stream of whitespace until the generation reaches the token limit, resulting
666
+ * in a long-running and seemingly "stuck" request. Also note that the message content
667
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
668
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
669
+ */
607
670
fun responseFormat (responseFormatJsonSchema : ResponseFormatJsonSchema ) =
608
671
responseFormat(
609
672
AssistantResponseFormatOption .ofResponseFormatJsonSchema(
@@ -976,14 +1039,74 @@ constructor(
976
1039
body.responseFormat(behavior)
977
1040
}
978
1041
1042
+ /* *
1043
+ * Specifies the format that the model must output. Compatible with
1044
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
1045
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
1046
+ * Turbo models since `gpt-3.5-turbo-1106`.
1047
+ *
1048
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
1049
+ * which ensures the model will match your supplied JSON schema. Learn more in the
1050
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
1051
+ *
1052
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
1053
+ * model generates is valid JSON.
1054
+ *
1055
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
1056
+ * yourself via a system or user message. Without this, the model may generate an unending
1057
+ * stream of whitespace until the generation reaches the token limit, resulting in a
1058
+ * long-running and seemingly "stuck" request. Also note that the message content may be
1059
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
1060
+ * `max_tokens` or the conversation exceeded the max context length.
1061
+ */
979
1062
fun responseFormat (responseFormatText : ResponseFormatText ) = apply {
980
1063
body.responseFormat(responseFormatText)
981
1064
}
982
1065
1066
+ /* *
1067
+ * Specifies the format that the model must output. Compatible with
1068
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
1069
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
1070
+ * Turbo models since `gpt-3.5-turbo-1106`.
1071
+ *
1072
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
1073
+ * which ensures the model will match your supplied JSON schema. Learn more in the
1074
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
1075
+ *
1076
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
1077
+ * model generates is valid JSON.
1078
+ *
1079
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
1080
+ * yourself via a system or user message. Without this, the model may generate an unending
1081
+ * stream of whitespace until the generation reaches the token limit, resulting in a
1082
+ * long-running and seemingly "stuck" request. Also note that the message content may be
1083
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
1084
+ * `max_tokens` or the conversation exceeded the max context length.
1085
+ */
983
1086
fun responseFormat (responseFormatJsonObject : ResponseFormatJsonObject ) = apply {
984
1087
body.responseFormat(responseFormatJsonObject)
985
1088
}
986
1089
1090
+ /* *
1091
+ * Specifies the format that the model must output. Compatible with
1092
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
1093
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
1094
+ * Turbo models since `gpt-3.5-turbo-1106`.
1095
+ *
1096
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
1097
+ * which ensures the model will match your supplied JSON schema. Learn more in the
1098
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
1099
+ *
1100
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
1101
+ * model generates is valid JSON.
1102
+ *
1103
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
1104
+ * yourself via a system or user message. Without this, the model may generate an unending
1105
+ * stream of whitespace until the generation reaches the token limit, resulting in a
1106
+ * long-running and seemingly "stuck" request. Also note that the message content may be
1107
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
1108
+ * `max_tokens` or the conversation exceeded the max context length.
1109
+ */
987
1110
fun responseFormat (responseFormatJsonSchema : ResponseFormatJsonSchema ) = apply {
988
1111
body.responseFormat(responseFormatJsonSchema)
989
1112
}
@@ -1805,6 +1928,10 @@ constructor(
1805
1928
)
1806
1929
)
1807
1930
1931
+ /* *
1932
+ * The chunking strategy used to chunk the file(s). If not set, will use the
1933
+ * `auto` strategy. Only applicable if `file_ids` is non-empty.
1934
+ */
1808
1935
fun chunkingStrategy (
1809
1936
staticFileChunkingStrategyParam : StaticFileChunkingStrategyParam
1810
1937
) =
0 commit comments