Skip to content

Commit 4e02164

Browse files
docs: update some builder method javadocs (#99)
1 parent 4dc8acb commit 4e02164

23 files changed

+1049
-80
lines changed

openai-java-core/src/main/kotlin/com/openai/models/Assistant.kt

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -500,14 +500,74 @@ private constructor(
500500
fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) =
501501
responseFormat(AssistantResponseFormatOption.ofBehavior(behavior))
502502

503+
/**
504+
* Specifies the format that the model must output. Compatible with
505+
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
506+
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
507+
* Turbo models since `gpt-3.5-turbo-1106`.
508+
*
509+
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
510+
* which ensures the model will match your supplied JSON schema. Learn more in the
511+
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
512+
*
513+
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
514+
* model generates is valid JSON.
515+
*
516+
* **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
517+
* yourself via a system or user message. Without this, the model may generate an unending
518+
* stream of whitespace until the generation reaches the token limit, resulting in a
519+
* long-running and seemingly "stuck" request. Also note that the message content may be
520+
* partially cut off if `finish_reason="length"`, which indicates the generation exceeded
521+
* `max_tokens` or the conversation exceeded the max context length.
522+
*/
503523
fun responseFormat(responseFormatText: ResponseFormatText) =
504524
responseFormat(AssistantResponseFormatOption.ofResponseFormatText(responseFormatText))
505525

526+
/**
527+
* Specifies the format that the model must output. Compatible with
528+
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
529+
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
530+
* Turbo models since `gpt-3.5-turbo-1106`.
531+
*
532+
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
533+
* which ensures the model will match your supplied JSON schema. Learn more in the
534+
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
535+
*
536+
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
537+
* model generates is valid JSON.
538+
*
539+
* **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
540+
* yourself via a system or user message. Without this, the model may generate an unending
541+
* stream of whitespace until the generation reaches the token limit, resulting in a
542+
* long-running and seemingly "stuck" request. Also note that the message content may be
543+
* partially cut off if `finish_reason="length"`, which indicates the generation exceeded
544+
* `max_tokens` or the conversation exceeded the max context length.
545+
*/
506546
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) =
507547
responseFormat(
508548
AssistantResponseFormatOption.ofResponseFormatJsonObject(responseFormatJsonObject)
509549
)
510550

551+
/**
552+
* Specifies the format that the model must output. Compatible with
553+
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
554+
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
555+
* Turbo models since `gpt-3.5-turbo-1106`.
556+
*
557+
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
558+
* which ensures the model will match your supplied JSON schema. Learn more in the
559+
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
560+
*
561+
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
562+
* model generates is valid JSON.
563+
*
564+
* **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
565+
* yourself via a system or user message. Without this, the model may generate an unending
566+
* stream of whitespace until the generation reaches the token limit, resulting in a
567+
* long-running and seemingly "stuck" request. Also note that the message content may be
568+
* partially cut off if `finish_reason="length"`, which indicates the generation exceeded
569+
* `max_tokens` or the conversation exceeded the max context length.
570+
*/
511571
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) =
512572
responseFormat(
513573
AssistantResponseFormatOption.ofResponseFormatJsonSchema(responseFormatJsonSchema)

openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantCreateParams.kt

Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -592,18 +592,81 @@ constructor(
592592
fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) =
593593
responseFormat(AssistantResponseFormatOption.ofBehavior(behavior))
594594

595+
/**
596+
* Specifies the format that the model must output. Compatible with
597+
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
598+
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
599+
* GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
600+
*
601+
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
602+
* Outputs which ensures the model will match your supplied JSON schema. Learn more in
603+
* the
604+
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
605+
*
606+
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
607+
* the model generates is valid JSON.
608+
*
609+
* **Important:** when using JSON mode, you **must** also instruct the model to produce
610+
* JSON yourself via a system or user message. Without this, the model may generate an
611+
* unending stream of whitespace until the generation reaches the token limit, resulting
612+
* in a long-running and seemingly "stuck" request. Also note that the message content
613+
* may be partially cut off if `finish_reason="length"`, which indicates the generation
614+
* exceeded `max_tokens` or the conversation exceeded the max context length.
615+
*/
595616
fun responseFormat(responseFormatText: ResponseFormatText) =
596617
responseFormat(
597618
AssistantResponseFormatOption.ofResponseFormatText(responseFormatText)
598619
)
599620

621+
/**
622+
* Specifies the format that the model must output. Compatible with
623+
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
624+
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
625+
* GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
626+
*
627+
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
628+
* Outputs which ensures the model will match your supplied JSON schema. Learn more in
629+
* the
630+
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
631+
*
632+
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
633+
* the model generates is valid JSON.
634+
*
635+
* **Important:** when using JSON mode, you **must** also instruct the model to produce
636+
* JSON yourself via a system or user message. Without this, the model may generate an
637+
* unending stream of whitespace until the generation reaches the token limit, resulting
638+
* in a long-running and seemingly "stuck" request. Also note that the message content
639+
* may be partially cut off if `finish_reason="length"`, which indicates the generation
640+
* exceeded `max_tokens` or the conversation exceeded the max context length.
641+
*/
600642
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) =
601643
responseFormat(
602644
AssistantResponseFormatOption.ofResponseFormatJsonObject(
603645
responseFormatJsonObject
604646
)
605647
)
606648

649+
/**
650+
* Specifies the format that the model must output. Compatible with
651+
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
652+
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
653+
* GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
654+
*
655+
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
656+
* Outputs which ensures the model will match your supplied JSON schema. Learn more in
657+
* the
658+
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
659+
*
660+
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
661+
* the model generates is valid JSON.
662+
*
663+
* **Important:** when using JSON mode, you **must** also instruct the model to produce
664+
* JSON yourself via a system or user message. Without this, the model may generate an
665+
* unending stream of whitespace until the generation reaches the token limit, resulting
666+
* in a long-running and seemingly "stuck" request. Also note that the message content
667+
* may be partially cut off if `finish_reason="length"`, which indicates the generation
668+
* exceeded `max_tokens` or the conversation exceeded the max context length.
669+
*/
607670
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) =
608671
responseFormat(
609672
AssistantResponseFormatOption.ofResponseFormatJsonSchema(
@@ -976,14 +1039,74 @@ constructor(
9761039
body.responseFormat(behavior)
9771040
}
9781041

1042+
/**
1043+
* Specifies the format that the model must output. Compatible with
1044+
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
1045+
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
1046+
* Turbo models since `gpt-3.5-turbo-1106`.
1047+
*
1048+
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
1049+
* which ensures the model will match your supplied JSON schema. Learn more in the
1050+
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
1051+
*
1052+
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
1053+
* model generates is valid JSON.
1054+
*
1055+
* **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
1056+
* yourself via a system or user message. Without this, the model may generate an unending
1057+
* stream of whitespace until the generation reaches the token limit, resulting in a
1058+
* long-running and seemingly "stuck" request. Also note that the message content may be
1059+
* partially cut off if `finish_reason="length"`, which indicates the generation exceeded
1060+
* `max_tokens` or the conversation exceeded the max context length.
1061+
*/
9791062
fun responseFormat(responseFormatText: ResponseFormatText) = apply {
9801063
body.responseFormat(responseFormatText)
9811064
}
9821065

1066+
/**
1067+
* Specifies the format that the model must output. Compatible with
1068+
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
1069+
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
1070+
* Turbo models since `gpt-3.5-turbo-1106`.
1071+
*
1072+
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
1073+
* which ensures the model will match your supplied JSON schema. Learn more in the
1074+
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
1075+
*
1076+
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
1077+
* model generates is valid JSON.
1078+
*
1079+
* **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
1080+
* yourself via a system or user message. Without this, the model may generate an unending
1081+
* stream of whitespace until the generation reaches the token limit, resulting in a
1082+
* long-running and seemingly "stuck" request. Also note that the message content may be
1083+
* partially cut off if `finish_reason="length"`, which indicates the generation exceeded
1084+
* `max_tokens` or the conversation exceeded the max context length.
1085+
*/
9831086
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) = apply {
9841087
body.responseFormat(responseFormatJsonObject)
9851088
}
9861089

1090+
/**
1091+
* Specifies the format that the model must output. Compatible with
1092+
* [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
1093+
* Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
1094+
* Turbo models since `gpt-3.5-turbo-1106`.
1095+
*
1096+
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
1097+
* which ensures the model will match your supplied JSON schema. Learn more in the
1098+
* [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
1099+
*
1100+
* Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
1101+
* model generates is valid JSON.
1102+
*
1103+
* **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
1104+
* yourself via a system or user message. Without this, the model may generate an unending
1105+
* stream of whitespace until the generation reaches the token limit, resulting in a
1106+
* long-running and seemingly "stuck" request. Also note that the message content may be
1107+
* partially cut off if `finish_reason="length"`, which indicates the generation exceeded
1108+
* `max_tokens` or the conversation exceeded the max context length.
1109+
*/
9871110
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) = apply {
9881111
body.responseFormat(responseFormatJsonSchema)
9891112
}
@@ -1805,6 +1928,10 @@ constructor(
18051928
)
18061929
)
18071930

1931+
/**
1932+
* The chunking strategy used to chunk the file(s). If not set, will use the
1933+
* `auto` strategy. Only applicable if `file_ids` is non-empty.
1934+
*/
18081935
fun chunkingStrategy(
18091936
staticFileChunkingStrategyParam: StaticFileChunkingStrategyParam
18101937
) =

0 commit comments

Comments
 (0)