diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index f7014c353..e82003f4e 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.11.0"
+ ".": "0.11.1"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 86f76d768..6e3315239 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
configured_endpoints: 60
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-02200a58ed631064b6419711da99fefd6e97bdbbeb577a80a1a6e0c8dbcb18f5.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b5b0e2c794b012919701c3fd43286af10fa25d33ceb8a881bec2636028f446e0.yml
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 12ea0de21..dd00d75e0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,19 @@
# Changelog
+## 0.11.1 (2025-01-09)
+
+Full Changelog: [v0.11.0...v0.11.1](https://github.com/openai/openai-java/compare/v0.11.0...v0.11.1)
+
+### Chores
+
+* **internal:** add some missing newlines between methods ([#100](https://github.com/openai/openai-java/issues/100)) ([afc2998](https://github.com/openai/openai-java/commit/afc2998ac124a26fe3ec92207f5ff4c9614ff673))
+* **internal:** spec update ([#97](https://github.com/openai/openai-java/issues/97)) ([0cff792](https://github.com/openai/openai-java/commit/0cff79271c63be46f5502a138ce1ad67a146724f))
+
+
+### Documentation
+
+* update some builder method javadocs ([#99](https://github.com/openai/openai-java/issues/99)) ([192965a](https://github.com/openai/openai-java/commit/192965abf73b9868d808c407bfc9fb73a507def7))
+
## 0.11.0 (2025-01-08)
Full Changelog: [v0.10.0...v0.11.0](https://github.com/openai/openai-java/compare/v0.10.0...v0.11.0)
diff --git a/README.md b/README.md
index 65bd21338..a644cef5c 100644
--- a/README.md
+++ b/README.md
@@ -9,7 +9,7 @@
-[](https://central.sonatype.com/artifact/com.openai/openai-java/0.11.0)
+[](https://central.sonatype.com/artifact/com.openai/openai-java/0.11.1)
@@ -32,7 +32,7 @@ The REST API documentation can be foundĀ on [platform.openai.com](https://platfo
```kotlin
-implementation("com.openai:openai-java:0.11.0")
+implementation("com.openai:openai-java:0.11.1")
```
#### Maven
@@ -41,7 +41,7 @@ implementation("com.openai:openai-java:0.11.0")
com.openai
openai-java
- 0.11.0
+ 0.11.1
```
diff --git a/build.gradle.kts b/build.gradle.kts
index c8bce5fe2..0c44ebecf 100644
--- a/build.gradle.kts
+++ b/build.gradle.kts
@@ -4,7 +4,7 @@ plugins {
allprojects {
group = "com.openai"
- version = "0.11.0" // x-release-please-version
+ version = "0.11.1" // x-release-please-version
}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/Annotation.kt b/openai-java-core/src/main/kotlin/com/openai/models/Annotation.kt
index fba511f1d..d7226af69 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/Annotation.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/Annotation.kt
@@ -41,6 +41,7 @@ private constructor(
*/
fun fileCitationAnnotation(): Optional =
Optional.ofNullable(fileCitationAnnotation)
+
/**
* A URL for the file that's generated when the assistant used the `code_interpreter` tool to
* generate a file.
@@ -58,6 +59,7 @@ private constructor(
*/
fun asFileCitationAnnotation(): FileCitationAnnotation =
fileCitationAnnotation.getOrThrow("fileCitationAnnotation")
+
/**
* A URL for the file that's generated when the assistant used the `code_interpreter` tool to
* generate a file.
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/AnnotationDelta.kt b/openai-java-core/src/main/kotlin/com/openai/models/AnnotationDelta.kt
index 71c89ad4f..eacfccadc 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/AnnotationDelta.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/AnnotationDelta.kt
@@ -41,6 +41,7 @@ private constructor(
*/
fun fileCitationDeltaAnnotation(): Optional =
Optional.ofNullable(fileCitationDeltaAnnotation)
+
/**
* A URL for the file that's generated when the assistant used the `code_interpreter` tool to
* generate a file.
@@ -59,6 +60,7 @@ private constructor(
*/
fun asFileCitationDeltaAnnotation(): FileCitationDeltaAnnotation =
fileCitationDeltaAnnotation.getOrThrow("fileCitationDeltaAnnotation")
+
/**
* A URL for the file that's generated when the assistant used the `code_interpreter` tool to
* generate a file.
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/Assistant.kt b/openai-java-core/src/main/kotlin/com/openai/models/Assistant.kt
index 9a83bf522..b489595b6 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/Assistant.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/Assistant.kt
@@ -500,14 +500,74 @@ private constructor(
fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) =
responseFormat(AssistantResponseFormatOption.ofBehavior(behavior))
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatText: ResponseFormatText) =
responseFormat(AssistantResponseFormatOption.ofResponseFormatText(responseFormatText))
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatJsonObject(responseFormatJsonObject)
)
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatJsonSchema(responseFormatJsonSchema)
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/AssistantStreamEvent.kt b/openai-java-core/src/main/kotlin/com/openai/models/AssistantStreamEvent.kt
index 9aef75ab8..281dc564f 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/AssistantStreamEvent.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/AssistantStreamEvent.kt
@@ -87,58 +87,70 @@ private constructor(
* created.
*/
fun threadCreated(): Optional = Optional.ofNullable(threadCreated)
+
/**
* Occurs when a new [run](https://platform.openai.com/docs/api-reference/runs/object) is
* created.
*/
fun threadRunCreated(): Optional = Optional.ofNullable(threadRunCreated)
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a
* `queued` status.
*/
fun threadRunQueued(): Optional = Optional.ofNullable(threadRunQueued)
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to an
* `in_progress` status.
*/
fun threadRunInProgress(): Optional =
Optional.ofNullable(threadRunInProgress)
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a
* `requires_action` status.
*/
fun threadRunRequiresAction(): Optional =
Optional.ofNullable(threadRunRequiresAction)
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is completed.
*/
fun threadRunCompleted(): Optional = Optional.ofNullable(threadRunCompleted)
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) ends with
* status `incomplete`.
*/
fun threadRunIncomplete(): Optional =
Optional.ofNullable(threadRunIncomplete)
+
/** Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) fails. */
fun threadRunFailed(): Optional = Optional.ofNullable(threadRunFailed)
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a
* `cancelling` status.
*/
fun threadRunCancelling(): Optional =
Optional.ofNullable(threadRunCancelling)
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is cancelled.
*/
fun threadRunCancelled(): Optional = Optional.ofNullable(threadRunCancelled)
+
/** Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) expires. */
fun threadRunExpired(): Optional = Optional.ofNullable(threadRunExpired)
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is created.
*/
fun threadRunStepCreated(): Optional =
Optional.ofNullable(threadRunStepCreated)
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) moves to an
@@ -146,12 +158,14 @@ private constructor(
*/
fun threadRunStepInProgress(): Optional =
Optional.ofNullable(threadRunStepInProgress)
+
/**
* Occurs when parts of a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) are being
* streamed.
*/
fun threadRunStepDelta(): Optional = Optional.ofNullable(threadRunStepDelta)
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is
@@ -159,12 +173,14 @@ private constructor(
*/
fun threadRunStepCompleted(): Optional =
Optional.ofNullable(threadRunStepCompleted)
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) fails.
*/
fun threadRunStepFailed(): Optional =
Optional.ofNullable(threadRunStepFailed)
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is
@@ -172,41 +188,48 @@ private constructor(
*/
fun threadRunStepCancelled(): Optional =
Optional.ofNullable(threadRunStepCancelled)
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) expires.
*/
fun threadRunStepExpired(): Optional =
Optional.ofNullable(threadRunStepExpired)
+
/**
* Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is
* created.
*/
fun threadMessageCreated(): Optional =
Optional.ofNullable(threadMessageCreated)
+
/**
* Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) moves
* to an `in_progress` state.
*/
fun threadMessageInProgress(): Optional =
Optional.ofNullable(threadMessageInProgress)
+
/**
* Occurs when parts of a
* [Message](https://platform.openai.com/docs/api-reference/messages/object) are being streamed.
*/
fun threadMessageDelta(): Optional = Optional.ofNullable(threadMessageDelta)
+
/**
* Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is
* completed.
*/
fun threadMessageCompleted(): Optional =
Optional.ofNullable(threadMessageCompleted)
+
/**
* Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) ends
* before it is completed.
*/
fun threadMessageIncomplete(): Optional =
Optional.ofNullable(threadMessageIncomplete)
+
/**
* Occurs when an [error](https://platform.openai.com/docs/guides/error-codes#api-errors)
* occurs. This can happen due to an internal server error or a timeout.
@@ -266,60 +289,72 @@ private constructor(
* created.
*/
fun asThreadCreated(): ThreadCreated = threadCreated.getOrThrow("threadCreated")
+
/**
* Occurs when a new [run](https://platform.openai.com/docs/api-reference/runs/object) is
* created.
*/
fun asThreadRunCreated(): ThreadRunCreated = threadRunCreated.getOrThrow("threadRunCreated")
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a
* `queued` status.
*/
fun asThreadRunQueued(): ThreadRunQueued = threadRunQueued.getOrThrow("threadRunQueued")
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to an
* `in_progress` status.
*/
fun asThreadRunInProgress(): ThreadRunInProgress =
threadRunInProgress.getOrThrow("threadRunInProgress")
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a
* `requires_action` status.
*/
fun asThreadRunRequiresAction(): ThreadRunRequiresAction =
threadRunRequiresAction.getOrThrow("threadRunRequiresAction")
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is completed.
*/
fun asThreadRunCompleted(): ThreadRunCompleted =
threadRunCompleted.getOrThrow("threadRunCompleted")
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) ends with
* status `incomplete`.
*/
fun asThreadRunIncomplete(): ThreadRunIncomplete =
threadRunIncomplete.getOrThrow("threadRunIncomplete")
+
/** Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) fails. */
fun asThreadRunFailed(): ThreadRunFailed = threadRunFailed.getOrThrow("threadRunFailed")
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a
* `cancelling` status.
*/
fun asThreadRunCancelling(): ThreadRunCancelling =
threadRunCancelling.getOrThrow("threadRunCancelling")
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is cancelled.
*/
fun asThreadRunCancelled(): ThreadRunCancelled =
threadRunCancelled.getOrThrow("threadRunCancelled")
+
/** Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) expires. */
fun asThreadRunExpired(): ThreadRunExpired = threadRunExpired.getOrThrow("threadRunExpired")
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is created.
*/
fun asThreadRunStepCreated(): ThreadRunStepCreated =
threadRunStepCreated.getOrThrow("threadRunStepCreated")
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) moves to an
@@ -327,6 +362,7 @@ private constructor(
*/
fun asThreadRunStepInProgress(): ThreadRunStepInProgress =
threadRunStepInProgress.getOrThrow("threadRunStepInProgress")
+
/**
* Occurs when parts of a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) are being
@@ -334,6 +370,7 @@ private constructor(
*/
fun asThreadRunStepDelta(): ThreadRunStepDelta =
threadRunStepDelta.getOrThrow("threadRunStepDelta")
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is
@@ -341,12 +378,14 @@ private constructor(
*/
fun asThreadRunStepCompleted(): ThreadRunStepCompleted =
threadRunStepCompleted.getOrThrow("threadRunStepCompleted")
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) fails.
*/
fun asThreadRunStepFailed(): ThreadRunStepFailed =
threadRunStepFailed.getOrThrow("threadRunStepFailed")
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is
@@ -354,42 +393,49 @@ private constructor(
*/
fun asThreadRunStepCancelled(): ThreadRunStepCancelled =
threadRunStepCancelled.getOrThrow("threadRunStepCancelled")
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) expires.
*/
fun asThreadRunStepExpired(): ThreadRunStepExpired =
threadRunStepExpired.getOrThrow("threadRunStepExpired")
+
/**
* Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is
* created.
*/
fun asThreadMessageCreated(): ThreadMessageCreated =
threadMessageCreated.getOrThrow("threadMessageCreated")
+
/**
* Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) moves
* to an `in_progress` state.
*/
fun asThreadMessageInProgress(): ThreadMessageInProgress =
threadMessageInProgress.getOrThrow("threadMessageInProgress")
+
/**
* Occurs when parts of a
* [Message](https://platform.openai.com/docs/api-reference/messages/object) are being streamed.
*/
fun asThreadMessageDelta(): ThreadMessageDelta =
threadMessageDelta.getOrThrow("threadMessageDelta")
+
/**
* Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is
* completed.
*/
fun asThreadMessageCompleted(): ThreadMessageCompleted =
threadMessageCompleted.getOrThrow("threadMessageCompleted")
+
/**
* Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) ends
* before it is completed.
*/
fun asThreadMessageIncomplete(): ThreadMessageIncomplete =
threadMessageIncomplete.getOrThrow("threadMessageIncomplete")
+
/**
* Occurs when an [error](https://platform.openai.com/docs/guides/error-codes#api-errors)
* occurs. This can happen due to an internal server error or a timeout.
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/AssistantToolChoiceOption.kt b/openai-java-core/src/main/kotlin/com/openai/models/AssistantToolChoiceOption.kt
index a868b55ac..66ad0c65b 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/AssistantToolChoiceOption.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/AssistantToolChoiceOption.kt
@@ -45,6 +45,7 @@ private constructor(
* means the model must call one or more tools before responding to the user.
*/
fun behavior(): Optional = Optional.ofNullable(behavior)
+
/** Specifies a tool the model should use. Use to force the model to call a specific tool. */
fun assistantToolChoice(): Optional =
Optional.ofNullable(assistantToolChoice)
@@ -59,6 +60,7 @@ private constructor(
* means the model must call one or more tools before responding to the user.
*/
fun asBehavior(): Behavior = behavior.getOrThrow("behavior")
+
/** Specifies a tool the model should use. Use to force the model to call a specific tool. */
fun asAssistantToolChoice(): AssistantToolChoice =
assistantToolChoice.getOrThrow("assistantToolChoice")
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantCreateParams.kt
index a6a3427f0..3b9e0e21e 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantCreateParams.kt
@@ -592,11 +592,53 @@ constructor(
fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) =
responseFormat(AssistantResponseFormatOption.ofBehavior(behavior))
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
+ * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
+ * the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
+ * the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
+ * JSON yourself via a system or user message. Without this, the model may generate an
+ * unending stream of whitespace until the generation reaches the token limit, resulting
+ * in a long-running and seemingly "stuck" request. Also note that the message content
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatText: ResponseFormatText) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatText(responseFormatText)
)
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
+ * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
+ * the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
+ * the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
+ * JSON yourself via a system or user message. Without this, the model may generate an
+ * unending stream of whitespace until the generation reaches the token limit, resulting
+ * in a long-running and seemingly "stuck" request. Also note that the message content
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatJsonObject(
@@ -604,6 +646,27 @@ constructor(
)
)
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
+ * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
+ * the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
+ * the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
+ * JSON yourself via a system or user message. Without this, the model may generate an
+ * unending stream of whitespace until the generation reaches the token limit, resulting
+ * in a long-running and seemingly "stuck" request. Also note that the message content
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatJsonSchema(
@@ -976,14 +1039,74 @@ constructor(
body.responseFormat(behavior)
}
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatText: ResponseFormatText) = apply {
body.responseFormat(responseFormatText)
}
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) = apply {
body.responseFormat(responseFormatJsonObject)
}
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) = apply {
body.responseFormat(responseFormatJsonSchema)
}
@@ -1805,6 +1928,10 @@ constructor(
)
)
+ /**
+ * The chunking strategy used to chunk the file(s). If not set, will use the
+ * `auto` strategy. Only applicable if `file_ids` is non-empty.
+ */
fun chunkingStrategy(
staticFileChunkingStrategyParam: StaticFileChunkingStrategyParam
) =
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantUpdateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantUpdateParams.kt
index e7fd3ae87..b45113a89 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantUpdateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantUpdateParams.kt
@@ -594,11 +594,53 @@ constructor(
fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) =
responseFormat(AssistantResponseFormatOption.ofBehavior(behavior))
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
+ * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
+ * the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
+ * the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
+ * JSON yourself via a system or user message. Without this, the model may generate an
+ * unending stream of whitespace until the generation reaches the token limit, resulting
+ * in a long-running and seemingly "stuck" request. Also note that the message content
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatText: ResponseFormatText) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatText(responseFormatText)
)
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
+ * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
+ * the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
+ * the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
+ * JSON yourself via a system or user message. Without this, the model may generate an
+ * unending stream of whitespace until the generation reaches the token limit, resulting
+ * in a long-running and seemingly "stuck" request. Also note that the message content
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatJsonObject(
@@ -606,6 +648,27 @@ constructor(
)
)
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
+ * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
+ * the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
+ * the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
+ * JSON yourself via a system or user message. Without this, the model may generate an
+ * unending stream of whitespace until the generation reaches the token limit, resulting
+ * in a long-running and seemingly "stuck" request. Also note that the message content
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatJsonSchema(
@@ -974,14 +1037,74 @@ constructor(
body.responseFormat(behavior)
}
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatText: ResponseFormatText) = apply {
body.responseFormat(responseFormatText)
}
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) = apply {
body.responseFormat(responseFormatJsonObject)
}
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) = apply {
body.responseFormat(responseFormatJsonSchema)
}
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadCreateAndRunParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadCreateAndRunParams.kt
index 66febe860..2a6dacc1f 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadCreateAndRunParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadCreateAndRunParams.kt
@@ -914,11 +914,53 @@ constructor(
fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) =
responseFormat(AssistantResponseFormatOption.ofBehavior(behavior))
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
+ * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
+ * the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
+ * the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
+ * JSON yourself via a system or user message. Without this, the model may generate an
+ * unending stream of whitespace until the generation reaches the token limit, resulting
+ * in a long-running and seemingly "stuck" request. Also note that the message content
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatText: ResponseFormatText) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatText(responseFormatText)
)
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
+ * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
+ * the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
+ * the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
+ * JSON yourself via a system or user message. Without this, the model may generate an
+ * unending stream of whitespace until the generation reaches the token limit, resulting
+ * in a long-running and seemingly "stuck" request. Also note that the message content
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatJsonObject(
@@ -926,6 +968,27 @@ constructor(
)
)
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
+ * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
+ * the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
+ * the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
+ * JSON yourself via a system or user message. Without this, the model may generate an
+ * unending stream of whitespace until the generation reaches the token limit, resulting
+ * in a long-running and seemingly "stuck" request. Also note that the message content
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatJsonSchema(
@@ -1479,14 +1542,74 @@ constructor(
body.responseFormat(behavior)
}
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatText: ResponseFormatText) = apply {
body.responseFormat(responseFormatText)
}
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) = apply {
body.responseFormat(responseFormatJsonObject)
}
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) = apply {
body.responseFormat(responseFormatJsonSchema)
}
@@ -2246,6 +2369,7 @@ constructor(
/** The text contents of the message. */
fun textContent(): Optional = Optional.ofNullable(textContent)
+
/**
* An array of content parts with a defined type, each can be of type `text` or
* images can be passed with `image_url` or `image_file`. Image types are only
@@ -2260,6 +2384,7 @@ constructor(
/** The text contents of the message. */
fun asTextContent(): String = textContent.getOrThrow("textContent")
+
/**
* An array of content parts with a defined type, each can be of type `text` or
* images can be passed with `image_url` or `image_file`. Image types are only
@@ -3448,6 +3573,10 @@ constructor(
)
)
+ /**
+ * The chunking strategy used to chunk the file(s). If not set, will use the
+ * `auto` strategy. Only applicable if `file_ids` is non-empty.
+ */
fun chunkingStrategy(
staticFileChunkingStrategyParam: StaticFileChunkingStrategyParam
) =
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadCreateParams.kt
index 6f0ea5a49..6e8143226 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadCreateParams.kt
@@ -699,6 +699,7 @@ constructor(
/** The text contents of the message. */
fun textContent(): Optional = Optional.ofNullable(textContent)
+
/**
* An array of content parts with a defined type, each can be of type `text` or images
* can be passed with `image_url` or `image_file`. Image types are only supported on
@@ -713,6 +714,7 @@ constructor(
/** The text contents of the message. */
fun asTextContent(): String = textContent.getOrThrow("textContent")
+
/**
* An array of content parts with a defined type, each can be of type `text` or images
* can be passed with `image_url` or `image_file`. Image types are only supported on
@@ -1886,6 +1888,10 @@ constructor(
)
)
+ /**
+ * The chunking strategy used to chunk the file(s). If not set, will use the
+ * `auto` strategy. Only applicable if `file_ids` is non-empty.
+ */
fun chunkingStrategy(
staticFileChunkingStrategyParam: StaticFileChunkingStrategyParam
) =
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadMessageCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadMessageCreateParams.kt
index 79fdee0c7..678c8e3ac 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadMessageCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadMessageCreateParams.kt
@@ -537,6 +537,7 @@ constructor(
/** The text contents of the message. */
fun textContent(): Optional = Optional.ofNullable(textContent)
+
/**
* An array of content parts with a defined type, each can be of type `text` or images can
* be passed with `image_url` or `image_file`. Image types are only supported on
@@ -551,6 +552,7 @@ constructor(
/** The text contents of the message. */
fun asTextContent(): String = textContent.getOrThrow("textContent")
+
/**
* An array of content parts with a defined type, each can be of type `text` or images can
* be passed with `image_url` or `image_file`. Image types are only supported on
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunCreateParams.kt
index 9aac95b9e..3f142cf0f 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunCreateParams.kt
@@ -1002,11 +1002,53 @@ constructor(
fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) =
responseFormat(AssistantResponseFormatOption.ofBehavior(behavior))
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
+ * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
+ * the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
+ * the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
+ * JSON yourself via a system or user message. Without this, the model may generate an
+ * unending stream of whitespace until the generation reaches the token limit, resulting
+ * in a long-running and seemingly "stuck" request. Also note that the message content
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatText: ResponseFormatText) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatText(responseFormatText)
)
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
+ * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
+ * the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
+ * the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
+ * JSON yourself via a system or user message. Without this, the model may generate an
+ * unending stream of whitespace until the generation reaches the token limit, resulting
+ * in a long-running and seemingly "stuck" request. Also note that the message content
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatJsonObject(
@@ -1014,6 +1056,27 @@ constructor(
)
)
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all
+ * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
+ * the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
+ * the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
+ * JSON yourself via a system or user message. Without this, the model may generate an
+ * unending stream of whitespace until the generation reaches the token limit, resulting
+ * in a long-running and seemingly "stuck" request. Also note that the message content
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatJsonSchema(
@@ -1628,14 +1691,74 @@ constructor(
body.responseFormat(behavior)
}
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatText: ResponseFormatText) = apply {
body.responseFormat(responseFormatText)
}
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) = apply {
body.responseFormat(responseFormatJsonObject)
}
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) = apply {
body.responseFormat(responseFormatJsonSchema)
}
@@ -2161,6 +2284,7 @@ constructor(
/** The text contents of the message. */
fun textContent(): Optional = Optional.ofNullable(textContent)
+
/**
* An array of content parts with a defined type, each can be of type `text` or images
* can be passed with `image_url` or `image_file`. Image types are only supported on
@@ -2175,6 +2299,7 @@ constructor(
/** The text contents of the message. */
fun asTextContent(): String = textContent.getOrThrow("textContent")
+
/**
* An array of content parts with a defined type, each can be of type `text` or images
* can be passed with `image_url` or `image_file`. Image types are only supported on
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreCreateParams.kt
index 70bf2d606..93add8157 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreCreateParams.kt
@@ -223,6 +223,10 @@ constructor(
)
)
+ /**
+ * The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+ * strategy. Only applicable if `file_ids` is non-empty.
+ */
fun chunkingStrategy(staticFileChunkingStrategyParam: StaticFileChunkingStrategyParam) =
chunkingStrategy(
FileChunkingStrategyParam.ofStaticFileChunkingStrategyParam(
@@ -375,6 +379,10 @@ constructor(
body.chunkingStrategy(autoFileChunkingStrategyParam)
}
+ /**
+ * The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+ * strategy. Only applicable if `file_ids` is non-empty.
+ */
fun chunkingStrategy(staticFileChunkingStrategyParam: StaticFileChunkingStrategyParam) =
apply {
body.chunkingStrategy(staticFileChunkingStrategyParam)
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileBatchCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileBatchCreateParams.kt
index f657977ad..c54ff4f70 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileBatchCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileBatchCreateParams.kt
@@ -207,6 +207,10 @@ constructor(
)
)
+ /**
+ * The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+ * strategy. Only applicable if `file_ids` is non-empty.
+ */
fun chunkingStrategy(staticFileChunkingStrategyParam: StaticFileChunkingStrategyParam) =
chunkingStrategy(
FileChunkingStrategyParam.ofStaticFileChunkingStrategyParam(
@@ -331,6 +335,10 @@ constructor(
body.chunkingStrategy(autoFileChunkingStrategyParam)
}
+ /**
+ * The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+ * strategy. Only applicable if `file_ids` is non-empty.
+ */
fun chunkingStrategy(staticFileChunkingStrategyParam: StaticFileChunkingStrategyParam) =
apply {
body.chunkingStrategy(staticFileChunkingStrategyParam)
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileCreateParams.kt
index 6c7260625..0e944fc59 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileCreateParams.kt
@@ -191,6 +191,10 @@ constructor(
)
)
+ /**
+ * The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+ * strategy. Only applicable if `file_ids` is non-empty.
+ */
fun chunkingStrategy(staticFileChunkingStrategyParam: StaticFileChunkingStrategyParam) =
chunkingStrategy(
FileChunkingStrategyParam.ofStaticFileChunkingStrategyParam(
@@ -307,6 +311,10 @@ constructor(
body.chunkingStrategy(autoFileChunkingStrategyParam)
}
+ /**
+ * The chunking strategy used to chunk the file(s). If not set, will use the `auto`
+ * strategy. Only applicable if `file_ids` is non-empty.
+ */
fun chunkingStrategy(staticFileChunkingStrategyParam: StaticFileChunkingStrategyParam) =
apply {
body.chunkingStrategy(staticFileChunkingStrategyParam)
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionAssistantMessageParam.kt b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionAssistantMessageParam.kt
index debbeecf1..f80f88ca0 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionAssistantMessageParam.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionAssistantMessageParam.kt
@@ -495,6 +495,7 @@ private constructor(
/** The contents of the assistant message. */
fun textContent(): Optional = Optional.ofNullable(textContent)
+
/**
* An array of content parts with a defined type. Can be one or more of type `text`, or
* exactly one of type `refusal`.
@@ -509,6 +510,7 @@ private constructor(
/** The contents of the assistant message. */
fun asTextContent(): String = textContent.getOrThrow("textContent")
+
/**
* An array of content parts with a defined type. Can be one or more of type `text`, or
* exactly one of type `refusal`.
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionContentPart.kt b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionContentPart.kt
index 837263b2b..1cc5869ac 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionContentPart.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionContentPart.kt
@@ -34,9 +34,11 @@ private constructor(
/** Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation). */
fun chatCompletionContentPartText(): Optional =
Optional.ofNullable(chatCompletionContentPartText)
+
/** Learn about [image inputs](https://platform.openai.com/docs/guides/vision). */
fun chatCompletionContentPartImage(): Optional =
Optional.ofNullable(chatCompletionContentPartImage)
+
/** Learn about [audio inputs](https://platform.openai.com/docs/guides/audio). */
fun chatCompletionContentPartInputAudio(): Optional =
Optional.ofNullable(chatCompletionContentPartInputAudio)
@@ -51,9 +53,11 @@ private constructor(
/** Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation). */
fun asChatCompletionContentPartText(): ChatCompletionContentPartText =
chatCompletionContentPartText.getOrThrow("chatCompletionContentPartText")
+
/** Learn about [image inputs](https://platform.openai.com/docs/guides/vision). */
fun asChatCompletionContentPartImage(): ChatCompletionContentPartImage =
chatCompletionContentPartImage.getOrThrow("chatCompletionContentPartImage")
+
/** Learn about [audio inputs](https://platform.openai.com/docs/guides/audio). */
fun asChatCompletionContentPartInputAudio(): ChatCompletionContentPartInputAudio =
chatCompletionContentPartInputAudio.getOrThrow("chatCompletionContentPartInputAudio")
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionCreateParams.kt
index 12c3aba7d..e16d9f493 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionCreateParams.kt
@@ -1357,12 +1357,9 @@ constructor(
}
/**
- * A list of messages comprising the conversation so far. Depending on the
- * [model](https://platform.openai.com/docs/models) you use, different message types
- * (modalities) are supported, like
- * [text](https://platform.openai.com/docs/guides/text-generation),
- * [images](https://platform.openai.com/docs/guides/vision), and
- * [audio](https://platform.openai.com/docs/guides/audio).
+ * Developer-provided instructions that the model should follow, regardless of messages
+ * sent by the user. With o1 models and newer, `developer` messages replace the previous
+ * `system` messages.
*/
fun addMessage(
chatCompletionDeveloperMessageParam: ChatCompletionDeveloperMessageParam
@@ -1374,12 +1371,9 @@ constructor(
)
/**
- * A list of messages comprising the conversation so far. Depending on the
- * [model](https://platform.openai.com/docs/models) you use, different message types
- * (modalities) are supported, like
- * [text](https://platform.openai.com/docs/guides/text-generation),
- * [images](https://platform.openai.com/docs/guides/vision), and
- * [audio](https://platform.openai.com/docs/guides/audio).
+ * Developer-provided instructions that the model should follow, regardless of messages
+ * sent by the user. With o1 models and newer, use `developer` messages for this purpose
+ * instead.
*/
fun addMessage(chatCompletionSystemMessageParam: ChatCompletionSystemMessageParam) =
addMessage(
@@ -1389,12 +1383,7 @@ constructor(
)
/**
- * A list of messages comprising the conversation so far. Depending on the
- * [model](https://platform.openai.com/docs/models) you use, different message types
- * (modalities) are supported, like
- * [text](https://platform.openai.com/docs/guides/text-generation),
- * [images](https://platform.openai.com/docs/guides/vision), and
- * [audio](https://platform.openai.com/docs/guides/audio).
+ * Messages sent by an end user, containing prompts or additional context information.
*/
fun addMessage(chatCompletionUserMessageParam: ChatCompletionUserMessageParam) =
addMessage(
@@ -1403,14 +1392,7 @@ constructor(
)
)
- /**
- * A list of messages comprising the conversation so far. Depending on the
- * [model](https://platform.openai.com/docs/models) you use, different message types
- * (modalities) are supported, like
- * [text](https://platform.openai.com/docs/guides/text-generation),
- * [images](https://platform.openai.com/docs/guides/vision), and
- * [audio](https://platform.openai.com/docs/guides/audio).
- */
+ /** Messages sent by the model in response to user messages. */
fun addMessage(
chatCompletionAssistantMessageParam: ChatCompletionAssistantMessageParam
) =
@@ -2004,12 +1986,66 @@ constructor(
this.responseFormat = responseFormat
}
+ /**
+ * An object specifying the format that the model must output.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
+ * the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
+ * the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
+ * JSON yourself via a system or user message. Without this, the model may generate an
+ * unending stream of whitespace until the generation reaches the token limit, resulting
+ * in a long-running and seemingly "stuck" request. Also note that the message content
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatText: ResponseFormatText) =
responseFormat(ResponseFormat.ofResponseFormatText(responseFormatText))
+ /**
+ * An object specifying the format that the model must output.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
+ * the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
+ * the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
+ * JSON yourself via a system or user message. Without this, the model may generate an
+ * unending stream of whitespace until the generation reaches the token limit, resulting
+ * in a long-running and seemingly "stuck" request. Also note that the message content
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) =
responseFormat(ResponseFormat.ofResponseFormatJsonObject(responseFormatJsonObject))
+ /**
+ * An object specifying the format that the model must output.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
+ * Outputs which ensures the model will match your supplied JSON schema. Learn more in
+ * the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message
+ * the model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce
+ * JSON yourself via a system or user message. Without this, the model may generate an
+ * unending stream of whitespace until the generation reaches the token limit, resulting
+ * in a long-running and seemingly "stuck" request. Also note that the message content
+ * may be partially cut off if `finish_reason="length"`, which indicates the generation
+ * exceeded `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) =
responseFormat(ResponseFormat.ofResponseFormatJsonSchema(responseFormatJsonSchema))
@@ -2107,8 +2143,10 @@ constructor(
/** Up to 4 sequences where the API will stop generating further tokens. */
fun stop(stop: JsonField) = apply { this.stop = stop }
+ /** Up to 4 sequences where the API will stop generating further tokens. */
fun stop(string: String) = stop(Stop.ofString(string))
+ /** Up to 4 sequences where the API will stop generating further tokens. */
fun stopOfStrings(strings: List) = stop(Stop.ofStrings(strings))
/**
@@ -2473,12 +2511,9 @@ constructor(
fun addMessage(message: ChatCompletionMessageParam) = apply { body.addMessage(message) }
/**
- * A list of messages comprising the conversation so far. Depending on the
- * [model](https://platform.openai.com/docs/models) you use, different message types
- * (modalities) are supported, like
- * [text](https://platform.openai.com/docs/guides/text-generation),
- * [images](https://platform.openai.com/docs/guides/vision), and
- * [audio](https://platform.openai.com/docs/guides/audio).
+ * Developer-provided instructions that the model should follow, regardless of messages sent
+ * by the user. With o1 models and newer, `developer` messages replace the previous `system`
+ * messages.
*/
fun addMessage(chatCompletionDeveloperMessageParam: ChatCompletionDeveloperMessageParam) =
apply {
@@ -2486,37 +2521,19 @@ constructor(
}
/**
- * A list of messages comprising the conversation so far. Depending on the
- * [model](https://platform.openai.com/docs/models) you use, different message types
- * (modalities) are supported, like
- * [text](https://platform.openai.com/docs/guides/text-generation),
- * [images](https://platform.openai.com/docs/guides/vision), and
- * [audio](https://platform.openai.com/docs/guides/audio).
+ * Developer-provided instructions that the model should follow, regardless of messages sent
+ * by the user. With o1 models and newer, use `developer` messages for this purpose instead.
*/
fun addMessage(chatCompletionSystemMessageParam: ChatCompletionSystemMessageParam) = apply {
body.addMessage(chatCompletionSystemMessageParam)
}
- /**
- * A list of messages comprising the conversation so far. Depending on the
- * [model](https://platform.openai.com/docs/models) you use, different message types
- * (modalities) are supported, like
- * [text](https://platform.openai.com/docs/guides/text-generation),
- * [images](https://platform.openai.com/docs/guides/vision), and
- * [audio](https://platform.openai.com/docs/guides/audio).
- */
+ /** Messages sent by an end user, containing prompts or additional context information. */
fun addMessage(chatCompletionUserMessageParam: ChatCompletionUserMessageParam) = apply {
body.addMessage(chatCompletionUserMessageParam)
}
- /**
- * A list of messages comprising the conversation so far. Depending on the
- * [model](https://platform.openai.com/docs/models) you use, different message types
- * (modalities) are supported, like
- * [text](https://platform.openai.com/docs/guides/text-generation),
- * [images](https://platform.openai.com/docs/guides/vision), and
- * [audio](https://platform.openai.com/docs/guides/audio).
- */
+ /** Messages sent by the model in response to user messages. */
fun addMessage(chatCompletionAssistantMessageParam: ChatCompletionAssistantMessageParam) =
apply {
body.addMessage(chatCompletionAssistantMessageParam)
@@ -3071,14 +3088,65 @@ constructor(
body.responseFormat(responseFormat)
}
+ /**
+ * An object specifying the format that the model must output.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatText: ResponseFormatText) = apply {
body.responseFormat(responseFormatText)
}
+ /**
+ * An object specifying the format that the model must output.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) = apply {
body.responseFormat(responseFormatJsonObject)
}
+ /**
+ * An object specifying the format that the model must output.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) = apply {
body.responseFormat(responseFormatJsonSchema)
}
@@ -3172,8 +3240,10 @@ constructor(
/** Up to 4 sequences where the API will stop generating further tokens. */
fun stop(stop: JsonField) = apply { body.stop(stop) }
+ /** Up to 4 sequences where the API will stop generating further tokens. */
fun stop(string: String) = apply { body.stop(string) }
+ /** Up to 4 sequences where the API will stop generating further tokens. */
fun stopOfStrings(strings: List) = apply { body.stopOfStrings(strings) }
/**
@@ -3552,6 +3622,7 @@ constructor(
* means the model can pick between generating a message or calling a function.
*/
fun behavior(): Optional = Optional.ofNullable(behavior)
+
/**
* Specifying a particular function via `{"name": "my_function"}` forces the model to call
* that function.
@@ -3568,6 +3639,7 @@ constructor(
* means the model can pick between generating a message or calling a function.
*/
fun asBehavior(): Behavior = behavior.getOrThrow("behavior")
+
/**
* Specifying a particular function via `{"name": "my_function"}` forces the model to call
* that function.
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionDeveloperMessageParam.kt b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionDeveloperMessageParam.kt
index a240e9a97..1792b836c 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionDeveloperMessageParam.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionDeveloperMessageParam.kt
@@ -184,6 +184,7 @@ private constructor(
/** The contents of the developer message. */
fun textContent(): Optional = Optional.ofNullable(textContent)
+
/**
* An array of content parts with a defined type. For developer messages, only type `text`
* is supported.
@@ -197,6 +198,7 @@ private constructor(
/** The contents of the developer message. */
fun asTextContent(): String = textContent.getOrThrow("textContent")
+
/**
* An array of content parts with a defined type. For developer messages, only type `text`
* is supported.
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionMessageParam.kt b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionMessageParam.kt
index 6efb9f977..396935cb6 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionMessageParam.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionMessageParam.kt
@@ -44,15 +44,18 @@ private constructor(
*/
fun chatCompletionDeveloperMessageParam(): Optional =
Optional.ofNullable(chatCompletionDeveloperMessageParam)
+
/**
* Developer-provided instructions that the model should follow, regardless of messages sent by
* the user. With o1 models and newer, use `developer` messages for this purpose instead.
*/
fun chatCompletionSystemMessageParam(): Optional =
Optional.ofNullable(chatCompletionSystemMessageParam)
+
/** Messages sent by an end user, containing prompts or additional context information. */
fun chatCompletionUserMessageParam(): Optional =
Optional.ofNullable(chatCompletionUserMessageParam)
+
/** Messages sent by the model in response to user messages. */
fun chatCompletionAssistantMessageParam(): Optional =
Optional.ofNullable(chatCompletionAssistantMessageParam)
@@ -84,15 +87,18 @@ private constructor(
*/
fun asChatCompletionDeveloperMessageParam(): ChatCompletionDeveloperMessageParam =
chatCompletionDeveloperMessageParam.getOrThrow("chatCompletionDeveloperMessageParam")
+
/**
* Developer-provided instructions that the model should follow, regardless of messages sent by
* the user. With o1 models and newer, use `developer` messages for this purpose instead.
*/
fun asChatCompletionSystemMessageParam(): ChatCompletionSystemMessageParam =
chatCompletionSystemMessageParam.getOrThrow("chatCompletionSystemMessageParam")
+
/** Messages sent by an end user, containing prompts or additional context information. */
fun asChatCompletionUserMessageParam(): ChatCompletionUserMessageParam =
chatCompletionUserMessageParam.getOrThrow("chatCompletionUserMessageParam")
+
/** Messages sent by the model in response to user messages. */
fun asChatCompletionAssistantMessageParam(): ChatCompletionAssistantMessageParam =
chatCompletionAssistantMessageParam.getOrThrow("chatCompletionAssistantMessageParam")
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionPredictionContent.kt b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionPredictionContent.kt
index 0ced5b585..7c86e6ddc 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionPredictionContent.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionPredictionContent.kt
@@ -187,6 +187,7 @@ private constructor(
* regenerating with minor changes.
*/
fun textContent(): Optional = Optional.ofNullable(textContent)
+
/**
* An array of content parts with a defined type. Supported options differ based on the
* [model](https://platform.openai.com/docs/models) being used to generate the response. Can
@@ -204,6 +205,7 @@ private constructor(
* regenerating with minor changes.
*/
fun asTextContent(): String = textContent.getOrThrow("textContent")
+
/**
* An array of content parts with a defined type. Supported options differ based on the
* [model](https://platform.openai.com/docs/models) being used to generate the response. Can
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionSystemMessageParam.kt b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionSystemMessageParam.kt
index 976cf18d2..05252671d 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionSystemMessageParam.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionSystemMessageParam.kt
@@ -183,6 +183,7 @@ private constructor(
/** The contents of the system message. */
fun textContent(): Optional = Optional.ofNullable(textContent)
+
/**
* An array of content parts with a defined type. For system messages, only type `text` is
* supported.
@@ -196,6 +197,7 @@ private constructor(
/** The contents of the system message. */
fun asTextContent(): String = textContent.getOrThrow("textContent")
+
/**
* An array of content parts with a defined type. For system messages, only type `text` is
* supported.
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionToolChoiceOption.kt b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionToolChoiceOption.kt
index 46614d52d..0e899d0be 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionToolChoiceOption.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionToolChoiceOption.kt
@@ -46,6 +46,7 @@ private constructor(
* means the model must call one or more tools.
*/
fun behavior(): Optional = Optional.ofNullable(behavior)
+
/**
* Specifies a tool the model should use. Use to force the model to call a specific function.
*/
@@ -62,6 +63,7 @@ private constructor(
* means the model must call one or more tools.
*/
fun asBehavior(): Behavior = behavior.getOrThrow("behavior")
+
/**
* Specifies a tool the model should use. Use to force the model to call a specific function.
*/
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionToolMessageParam.kt b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionToolMessageParam.kt
index 7e779a833..0621c3cbf 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionToolMessageParam.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionToolMessageParam.kt
@@ -168,6 +168,7 @@ private constructor(
/** The contents of the tool message. */
fun textContent(): Optional = Optional.ofNullable(textContent)
+
/**
* An array of content parts with a defined type. For tool messages, only type `text` is
* supported.
@@ -181,6 +182,7 @@ private constructor(
/** The contents of the tool message. */
fun asTextContent(): String = textContent.getOrThrow("textContent")
+
/**
* An array of content parts with a defined type. For tool messages, only type `text` is
* supported.
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionUserMessageParam.kt b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionUserMessageParam.kt
index 1f4587ff2..70f9091db 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionUserMessageParam.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionUserMessageParam.kt
@@ -180,6 +180,7 @@ private constructor(
/** The text contents of the message. */
fun textContent(): Optional = Optional.ofNullable(textContent)
+
/**
* An array of content parts with a defined type. Supported options differ based on the
* [model](https://platform.openai.com/docs/models) being used to generate the response. Can
@@ -194,6 +195,7 @@ private constructor(
/** The text contents of the message. */
fun asTextContent(): String = textContent.getOrThrow("textContent")
+
/**
* An array of content parts with a defined type. Supported options differ based on the
* [model](https://platform.openai.com/docs/models) being used to generate the response. Can
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/CodeInterpreterToolCall.kt b/openai-java-core/src/main/kotlin/com/openai/models/CodeInterpreterToolCall.kt
index b56b77ea4..3419dc403 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/CodeInterpreterToolCall.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/CodeInterpreterToolCall.kt
@@ -269,11 +269,7 @@ private constructor(
}
}
- /**
- * The outputs from the Code Interpreter tool call. Code Interpreter can output one or
- * more items, including text (`logs`) or images (`image`). Each of these are
- * represented by a different object type.
- */
+ /** Text output from the Code Interpreter tool call as part of a run step. */
fun addOutput(logs: Output.LogsOutput) = addOutput(Output.ofLogs(logs))
/**
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/CodeInterpreterToolCallDelta.kt b/openai-java-core/src/main/kotlin/com/openai/models/CodeInterpreterToolCallDelta.kt
index 74637564e..8019d5df7 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/CodeInterpreterToolCallDelta.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/CodeInterpreterToolCallDelta.kt
@@ -338,11 +338,7 @@ private constructor(
}
}
- /**
- * The outputs from the Code Interpreter tool call. Code Interpreter can output one or
- * more items, including text (`logs`) or images (`image`). Each of these are
- * represented by a different object type.
- */
+ /** Text output from the Code Interpreter tool call as part of a run step. */
fun addOutput(codeInterpreterLogs: CodeInterpreterLogs) =
addOutput(Output.ofCodeInterpreterLogs(codeInterpreterLogs))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/CompletionCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/CompletionCreateParams.kt
index fcd5573f8..c33884b0f 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/CompletionCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/CompletionCreateParams.kt
@@ -835,14 +835,46 @@ constructor(
*/
fun prompt(prompt: JsonField) = apply { this.prompt = prompt }
+ /**
+ * The prompt(s) to generate completions for, encoded as a string, array of strings,
+ * array of tokens, or array of token arrays.
+ *
+ * Note that <|endoftext|> is the document separator that the model sees during
+ * training, so if a prompt is not specified the model will generate as if from the
+ * beginning of a new document.
+ */
fun prompt(string: String) = prompt(Prompt.ofString(string))
+ /**
+ * The prompt(s) to generate completions for, encoded as a string, array of strings,
+ * array of tokens, or array of token arrays.
+ *
+ * Note that <|endoftext|> is the document separator that the model sees during
+ * training, so if a prompt is not specified the model will generate as if from the
+ * beginning of a new document.
+ */
fun promptOfArrayOfStrings(arrayOfStrings: List) =
prompt(Prompt.ofArrayOfStrings(arrayOfStrings))
+ /**
+ * The prompt(s) to generate completions for, encoded as a string, array of strings,
+ * array of tokens, or array of token arrays.
+ *
+ * Note that <|endoftext|> is the document separator that the model sees during
+ * training, so if a prompt is not specified the model will generate as if from the
+ * beginning of a new document.
+ */
fun promptOfArrayOfTokens(arrayOfTokens: List) =
prompt(Prompt.ofArrayOfTokens(arrayOfTokens))
+ /**
+ * The prompt(s) to generate completions for, encoded as a string, array of strings,
+ * array of tokens, or array of token arrays.
+ *
+ * Note that <|endoftext|> is the document separator that the model sees during
+ * training, so if a prompt is not specified the model will generate as if from the
+ * beginning of a new document.
+ */
fun promptOfArrayOfTokenArrays(arrayOfTokenArrays: List>) =
prompt(Prompt.ofArrayOfTokenArrays(arrayOfTokenArrays))
@@ -1230,8 +1262,16 @@ constructor(
*/
fun stop(stop: JsonField) = apply { this.stop = stop }
+ /**
+ * Up to 4 sequences where the API will stop generating further tokens. The returned
+ * text will not contain the stop sequence.
+ */
fun stop(string: String) = stop(Stop.ofString(string))
+ /**
+ * Up to 4 sequences where the API will stop generating further tokens. The returned
+ * text will not contain the stop sequence.
+ */
fun stopOfStrings(strings: List) = stop(Stop.ofStrings(strings))
/** Options for streaming response. Only set this when you set `stream: true`. */
@@ -1494,16 +1534,48 @@ constructor(
*/
fun prompt(prompt: JsonField) = apply { body.prompt(prompt) }
+ /**
+ * The prompt(s) to generate completions for, encoded as a string, array of strings, array
+ * of tokens, or array of token arrays.
+ *
+ * Note that <|endoftext|> is the document separator that the model sees during training, so
+ * if a prompt is not specified the model will generate as if from the beginning of a new
+ * document.
+ */
fun prompt(string: String) = apply { body.prompt(string) }
+ /**
+ * The prompt(s) to generate completions for, encoded as a string, array of strings, array
+ * of tokens, or array of token arrays.
+ *
+ * Note that <|endoftext|> is the document separator that the model sees during training, so
+ * if a prompt is not specified the model will generate as if from the beginning of a new
+ * document.
+ */
fun promptOfArrayOfStrings(arrayOfStrings: List) = apply {
body.promptOfArrayOfStrings(arrayOfStrings)
}
+ /**
+ * The prompt(s) to generate completions for, encoded as a string, array of strings, array
+ * of tokens, or array of token arrays.
+ *
+ * Note that <|endoftext|> is the document separator that the model sees during training, so
+ * if a prompt is not specified the model will generate as if from the beginning of a new
+ * document.
+ */
fun promptOfArrayOfTokens(arrayOfTokens: List) = apply {
body.promptOfArrayOfTokens(arrayOfTokens)
}
+ /**
+ * The prompt(s) to generate completions for, encoded as a string, array of strings, array
+ * of tokens, or array of token arrays.
+ *
+ * Note that <|endoftext|> is the document separator that the model sees during training, so
+ * if a prompt is not specified the model will generate as if from the beginning of a new
+ * document.
+ */
fun promptOfArrayOfTokenArrays(arrayOfTokenArrays: List>) = apply {
body.promptOfArrayOfTokenArrays(arrayOfTokenArrays)
}
@@ -1885,8 +1957,16 @@ constructor(
*/
fun stop(stop: JsonField) = apply { body.stop(stop) }
+ /**
+ * Up to 4 sequences where the API will stop generating further tokens. The returned text
+ * will not contain the stop sequence.
+ */
fun stop(string: String) = apply { body.stop(string) }
+ /**
+ * Up to 4 sequences where the API will stop generating further tokens. The returned text
+ * will not contain the stop sequence.
+ */
fun stopOfStrings(strings: List) = apply { body.stopOfStrings(strings) }
/** Options for streaming response. Only set this when you set `stream: true`. */
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/EmbeddingCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/EmbeddingCreateParams.kt
index 7119e6a38..19bf5a24e 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/EmbeddingCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/EmbeddingCreateParams.kt
@@ -690,10 +690,13 @@ constructor(
/** The string that will be turned into an embedding. */
fun string(): Optional = Optional.ofNullable(string)
+
/** The array of strings that will be turned into an embedding. */
fun arrayOfStrings(): Optional> = Optional.ofNullable(arrayOfStrings)
+
/** The array of integers that will be turned into an embedding. */
fun arrayOfTokens(): Optional> = Optional.ofNullable(arrayOfTokens)
+
/** The array of arrays containing integers that will be turned into an embedding. */
fun arrayOfTokenArrays(): Optional>> =
Optional.ofNullable(arrayOfTokenArrays)
@@ -708,10 +711,13 @@ constructor(
/** The string that will be turned into an embedding. */
fun asString(): String = string.getOrThrow("string")
+
/** The array of strings that will be turned into an embedding. */
fun asArrayOfStrings(): List = arrayOfStrings.getOrThrow("arrayOfStrings")
+
/** The array of integers that will be turned into an embedding. */
fun asArrayOfTokens(): List = arrayOfTokens.getOrThrow("arrayOfTokens")
+
/** The array of arrays containing integers that will be turned into an embedding. */
fun asArrayOfTokenArrays(): List> =
arrayOfTokenArrays.getOrThrow("arrayOfTokenArrays")
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/FileChunkingStrategy.kt b/openai-java-core/src/main/kotlin/com/openai/models/FileChunkingStrategy.kt
index a10e07a94..d2c9b38d2 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/FileChunkingStrategy.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/FileChunkingStrategy.kt
@@ -32,6 +32,7 @@ private constructor(
fun staticFileChunkingStrategyObject(): Optional =
Optional.ofNullable(staticFileChunkingStrategyObject)
+
/**
* This is returned when the chunking strategy is unknown. Typically, this is because the file
* was indexed before the `chunking_strategy` concept was introduced in the API.
@@ -45,6 +46,7 @@ private constructor(
fun asStaticFileChunkingStrategyObject(): StaticFileChunkingStrategyObject =
staticFileChunkingStrategyObject.getOrThrow("staticFileChunkingStrategyObject")
+
/**
* This is returned when the chunking strategy is unknown. Typically, this is because the file
* was indexed before the `chunking_strategy` concept was introduced in the API.
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJob.kt b/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJob.kt
index 87ab524b2..6772af370 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJob.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJob.kt
@@ -964,8 +964,16 @@ private constructor(
*/
fun batchSize(batchSize: JsonField) = apply { this.batchSize = batchSize }
+ /**
+ * Number of examples in each batch. A larger batch size means that model parameters are
+ * updated less frequently, but with lower variance.
+ */
fun batchSize(auto: BatchSize.Auto) = batchSize(BatchSize.ofAuto(auto))
+ /**
+ * Number of examples in each batch. A larger batch size means that model parameters are
+ * updated less frequently, but with lower variance.
+ */
fun batchSize(manual: Long) = batchSize(BatchSize.ofManual(manual))
/**
@@ -984,9 +992,17 @@ private constructor(
this.learningRateMultiplier = learningRateMultiplier
}
+ /**
+ * Scaling factor for the learning rate. A smaller learning rate may be useful to avoid
+ * overfitting.
+ */
fun learningRateMultiplier(auto: LearningRateMultiplier.Auto) =
learningRateMultiplier(LearningRateMultiplier.ofAuto(auto))
+ /**
+ * Scaling factor for the learning rate. A smaller learning rate may be useful to avoid
+ * overfitting.
+ */
fun learningRateMultiplier(number: Double) =
learningRateMultiplier(LearningRateMultiplier.ofNumber(number))
@@ -1002,8 +1018,16 @@ private constructor(
*/
fun nEpochs(nEpochs: JsonField) = apply { this.nEpochs = nEpochs }
+ /**
+ * The number of epochs to train the model for. An epoch refers to one full cycle
+ * through the training dataset.
+ */
fun nEpochs(behavior: NEpochs.Behavior) = nEpochs(NEpochs.ofBehavior(behavior))
+ /**
+ * The number of epochs to train the model for. An epoch refers to one full cycle
+ * through the training dataset.
+ */
fun nEpochs(integer: Long) = nEpochs(NEpochs.ofInteger(integer))
fun additionalProperties(additionalProperties: Map) = apply {
@@ -2027,8 +2051,16 @@ private constructor(
this.batchSize = batchSize
}
+ /**
+ * Number of examples in each batch. A larger batch size means that model
+ * parameters are updated less frequently, but with lower variance.
+ */
fun batchSize(auto: BatchSize.Auto) = batchSize(BatchSize.ofAuto(auto))
+ /**
+ * Number of examples in each batch. A larger batch size means that model
+ * parameters are updated less frequently, but with lower variance.
+ */
fun batchSize(manual: Long) = batchSize(BatchSize.ofManual(manual))
/**
@@ -2043,8 +2075,16 @@ private constructor(
*/
fun beta(beta: JsonField) = apply { this.beta = beta }
+ /**
+ * The beta value for the DPO method. A higher beta value will increase the
+ * weight of the penalty between the policy and reference model.
+ */
fun beta(auto: Beta.Auto) = beta(Beta.ofAuto(auto))
+ /**
+ * The beta value for the DPO method. A higher beta value will increase the
+ * weight of the penalty between the policy and reference model.
+ */
fun beta(manual: Double) = beta(Beta.ofManual(manual))
/**
@@ -2062,9 +2102,17 @@ private constructor(
learningRateMultiplier: JsonField
) = apply { this.learningRateMultiplier = learningRateMultiplier }
+ /**
+ * Scaling factor for the learning rate. A smaller learning rate may be useful
+ * to avoid overfitting.
+ */
fun learningRateMultiplier(auto: LearningRateMultiplier.Auto) =
learningRateMultiplier(LearningRateMultiplier.ofAuto(auto))
+ /**
+ * Scaling factor for the learning rate. A smaller learning rate may be useful
+ * to avoid overfitting.
+ */
fun learningRateMultiplier(manual: Double) =
learningRateMultiplier(LearningRateMultiplier.ofManual(manual))
@@ -2080,8 +2128,16 @@ private constructor(
*/
fun nEpochs(nEpochs: JsonField) = apply { this.nEpochs = nEpochs }
+ /**
+ * The number of epochs to train the model for. An epoch refers to one full
+ * cycle through the training dataset.
+ */
fun nEpochs(auto: NEpochs.Auto) = nEpochs(NEpochs.ofAuto(auto))
+ /**
+ * The number of epochs to train the model for. An epoch refers to one full
+ * cycle through the training dataset.
+ */
fun nEpochs(manual: Long) = nEpochs(NEpochs.ofManual(manual))
fun additionalProperties(additionalProperties: Map) = apply {
@@ -3036,8 +3092,16 @@ private constructor(
this.batchSize = batchSize
}
+ /**
+ * Number of examples in each batch. A larger batch size means that model
+ * parameters are updated less frequently, but with lower variance.
+ */
fun batchSize(auto: BatchSize.Auto) = batchSize(BatchSize.ofAuto(auto))
+ /**
+ * Number of examples in each batch. A larger batch size means that model
+ * parameters are updated less frequently, but with lower variance.
+ */
fun batchSize(manual: Long) = batchSize(BatchSize.ofManual(manual))
/**
@@ -3055,9 +3119,17 @@ private constructor(
learningRateMultiplier: JsonField
) = apply { this.learningRateMultiplier = learningRateMultiplier }
+ /**
+ * Scaling factor for the learning rate. A smaller learning rate may be useful
+ * to avoid overfitting.
+ */
fun learningRateMultiplier(auto: LearningRateMultiplier.Auto) =
learningRateMultiplier(LearningRateMultiplier.ofAuto(auto))
+ /**
+ * Scaling factor for the learning rate. A smaller learning rate may be useful
+ * to avoid overfitting.
+ */
fun learningRateMultiplier(manual: Double) =
learningRateMultiplier(LearningRateMultiplier.ofManual(manual))
@@ -3073,8 +3145,16 @@ private constructor(
*/
fun nEpochs(nEpochs: JsonField) = apply { this.nEpochs = nEpochs }
+ /**
+ * The number of epochs to train the model for. An epoch refers to one full
+ * cycle through the training dataset.
+ */
fun nEpochs(auto: NEpochs.Auto) = nEpochs(NEpochs.ofAuto(auto))
+ /**
+ * The number of epochs to train the model for. An epoch refers to one full
+ * cycle through the training dataset.
+ */
fun nEpochs(manual: Long) = nEpochs(NEpochs.ofManual(manual))
fun additionalProperties(additionalProperties: Map) = apply {
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobCreateParams.kt
index 1d3f2f2f0..d61ddcd50 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobCreateParams.kt
@@ -1218,8 +1218,16 @@ constructor(
*/
fun batchSize(batchSize: JsonField) = apply { this.batchSize = batchSize }
+ /**
+ * Number of examples in each batch. A larger batch size means that model parameters are
+ * updated less frequently, but with lower variance.
+ */
fun batchSize(behavior: BatchSize.Behavior) = batchSize(BatchSize.ofBehavior(behavior))
+ /**
+ * Number of examples in each batch. A larger batch size means that model parameters are
+ * updated less frequently, but with lower variance.
+ */
fun batchSize(integer: Long) = batchSize(BatchSize.ofInteger(integer))
/**
@@ -1238,9 +1246,17 @@ constructor(
this.learningRateMultiplier = learningRateMultiplier
}
+ /**
+ * Scaling factor for the learning rate. A smaller learning rate may be useful to avoid
+ * overfitting.
+ */
fun learningRateMultiplier(behavior: LearningRateMultiplier.Behavior) =
learningRateMultiplier(LearningRateMultiplier.ofBehavior(behavior))
+ /**
+ * Scaling factor for the learning rate. A smaller learning rate may be useful to avoid
+ * overfitting.
+ */
fun learningRateMultiplier(number: Double) =
learningRateMultiplier(LearningRateMultiplier.ofNumber(number))
@@ -1256,8 +1272,16 @@ constructor(
*/
fun nEpochs(nEpochs: JsonField) = apply { this.nEpochs = nEpochs }
+ /**
+ * The number of epochs to train the model for. An epoch refers to one full cycle
+ * through the training dataset.
+ */
fun nEpochs(behavior: NEpochs.Behavior) = nEpochs(NEpochs.ofBehavior(behavior))
+ /**
+ * The number of epochs to train the model for. An epoch refers to one full cycle
+ * through the training dataset.
+ */
fun nEpochs(integer: Long) = nEpochs(NEpochs.ofInteger(integer))
fun additionalProperties(additionalProperties: Map) = apply {
@@ -2587,8 +2611,16 @@ constructor(
this.batchSize = batchSize
}
+ /**
+ * Number of examples in each batch. A larger batch size means that model
+ * parameters are updated less frequently, but with lower variance.
+ */
fun batchSize(auto: BatchSize.Auto) = batchSize(BatchSize.ofAuto(auto))
+ /**
+ * Number of examples in each batch. A larger batch size means that model
+ * parameters are updated less frequently, but with lower variance.
+ */
fun batchSize(manual: Long) = batchSize(BatchSize.ofManual(manual))
/**
@@ -2603,8 +2635,16 @@ constructor(
*/
fun beta(beta: JsonField) = apply { this.beta = beta }
+ /**
+ * The beta value for the DPO method. A higher beta value will increase the
+ * weight of the penalty between the policy and reference model.
+ */
fun beta(auto: Beta.Auto) = beta(Beta.ofAuto(auto))
+ /**
+ * The beta value for the DPO method. A higher beta value will increase the
+ * weight of the penalty between the policy and reference model.
+ */
fun beta(manual: Double) = beta(Beta.ofManual(manual))
/**
@@ -2622,9 +2662,17 @@ constructor(
learningRateMultiplier: JsonField
) = apply { this.learningRateMultiplier = learningRateMultiplier }
+ /**
+ * Scaling factor for the learning rate. A smaller learning rate may be useful
+ * to avoid overfitting.
+ */
fun learningRateMultiplier(auto: LearningRateMultiplier.Auto) =
learningRateMultiplier(LearningRateMultiplier.ofAuto(auto))
+ /**
+ * Scaling factor for the learning rate. A smaller learning rate may be useful
+ * to avoid overfitting.
+ */
fun learningRateMultiplier(manual: Double) =
learningRateMultiplier(LearningRateMultiplier.ofManual(manual))
@@ -2640,8 +2688,16 @@ constructor(
*/
fun nEpochs(nEpochs: JsonField) = apply { this.nEpochs = nEpochs }
+ /**
+ * The number of epochs to train the model for. An epoch refers to one full
+ * cycle through the training dataset.
+ */
fun nEpochs(auto: NEpochs.Auto) = nEpochs(NEpochs.ofAuto(auto))
+ /**
+ * The number of epochs to train the model for. An epoch refers to one full
+ * cycle through the training dataset.
+ */
fun nEpochs(manual: Long) = nEpochs(NEpochs.ofManual(manual))
fun additionalProperties(additionalProperties: Map) = apply {
@@ -3596,8 +3652,16 @@ constructor(
this.batchSize = batchSize
}
+ /**
+ * Number of examples in each batch. A larger batch size means that model
+ * parameters are updated less frequently, but with lower variance.
+ */
fun batchSize(auto: BatchSize.Auto) = batchSize(BatchSize.ofAuto(auto))
+ /**
+ * Number of examples in each batch. A larger batch size means that model
+ * parameters are updated less frequently, but with lower variance.
+ */
fun batchSize(manual: Long) = batchSize(BatchSize.ofManual(manual))
/**
@@ -3615,9 +3679,17 @@ constructor(
learningRateMultiplier: JsonField
) = apply { this.learningRateMultiplier = learningRateMultiplier }
+ /**
+ * Scaling factor for the learning rate. A smaller learning rate may be useful
+ * to avoid overfitting.
+ */
fun learningRateMultiplier(auto: LearningRateMultiplier.Auto) =
learningRateMultiplier(LearningRateMultiplier.ofAuto(auto))
+ /**
+ * Scaling factor for the learning rate. A smaller learning rate may be useful
+ * to avoid overfitting.
+ */
fun learningRateMultiplier(manual: Double) =
learningRateMultiplier(LearningRateMultiplier.ofManual(manual))
@@ -3633,8 +3705,16 @@ constructor(
*/
fun nEpochs(nEpochs: JsonField) = apply { this.nEpochs = nEpochs }
+ /**
+ * The number of epochs to train the model for. An epoch refers to one full
+ * cycle through the training dataset.
+ */
fun nEpochs(auto: NEpochs.Auto) = nEpochs(NEpochs.ofAuto(auto))
+ /**
+ * The number of epochs to train the model for. An epoch refers to one full
+ * cycle through the training dataset.
+ */
fun nEpochs(manual: Long) = nEpochs(NEpochs.ofManual(manual))
fun additionalProperties(additionalProperties: Map) = apply {
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/Message.kt b/openai-java-core/src/main/kotlin/com/openai/models/Message.kt
index da3bf8ebd..8164402a9 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/Message.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/Message.kt
@@ -356,19 +356,22 @@ private constructor(
}
}
- /** The content of the message in array of text and/or images. */
+ /**
+ * References an image [File](https://platform.openai.com/docs/api-reference/files) in the
+ * content of a message.
+ */
fun addContent(imageFileContentBlock: ImageFileContentBlock) =
addContent(MessageContent.ofImageFileContentBlock(imageFileContentBlock))
- /** The content of the message in array of text and/or images. */
+ /** References an image URL in the content of a message. */
fun addContent(imageUrlContentBlock: ImageUrlContentBlock) =
addContent(MessageContent.ofImageUrlContentBlock(imageUrlContentBlock))
- /** The content of the message in array of text and/or images. */
+ /** The text content that is part of a message. */
fun addContent(textContentBlock: TextContentBlock) =
addContent(MessageContent.ofTextContentBlock(textContentBlock))
- /** The content of the message in array of text and/or images. */
+ /** The refusal content generated by the assistant. */
fun addContent(refusalContentBlock: RefusalContentBlock) =
addContent(MessageContent.ofRefusalContentBlock(refusalContentBlock))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/MessageContent.kt b/openai-java-core/src/main/kotlin/com/openai/models/MessageContent.kt
index 89be4f37f..295fc5aaa 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/MessageContent.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/MessageContent.kt
@@ -41,11 +41,14 @@ private constructor(
*/
fun imageFileContentBlock(): Optional =
Optional.ofNullable(imageFileContentBlock)
+
/** References an image URL in the content of a message. */
fun imageUrlContentBlock(): Optional =
Optional.ofNullable(imageUrlContentBlock)
+
/** The text content that is part of a message. */
fun textContentBlock(): Optional = Optional.ofNullable(textContentBlock)
+
/** The refusal content generated by the assistant. */
fun refusalContentBlock(): Optional =
Optional.ofNullable(refusalContentBlock)
@@ -64,11 +67,14 @@ private constructor(
*/
fun asImageFileContentBlock(): ImageFileContentBlock =
imageFileContentBlock.getOrThrow("imageFileContentBlock")
+
/** References an image URL in the content of a message. */
fun asImageUrlContentBlock(): ImageUrlContentBlock =
imageUrlContentBlock.getOrThrow("imageUrlContentBlock")
+
/** The text content that is part of a message. */
fun asTextContentBlock(): TextContentBlock = textContentBlock.getOrThrow("textContentBlock")
+
/** The refusal content generated by the assistant. */
fun asRefusalContentBlock(): RefusalContentBlock =
refusalContentBlock.getOrThrow("refusalContentBlock")
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/MessageContentDelta.kt b/openai-java-core/src/main/kotlin/com/openai/models/MessageContentDelta.kt
index 3aac3e7a7..b39c5030f 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/MessageContentDelta.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/MessageContentDelta.kt
@@ -41,10 +41,13 @@ private constructor(
*/
fun imageFileDeltaBlock(): Optional =
Optional.ofNullable(imageFileDeltaBlock)
+
/** The text content that is part of a message. */
fun textDeltaBlock(): Optional = Optional.ofNullable(textDeltaBlock)
+
/** The refusal content that is part of a message. */
fun refusalDeltaBlock(): Optional = Optional.ofNullable(refusalDeltaBlock)
+
/** References an image URL in the content of a message. */
fun imageUrlDeltaBlock(): Optional = Optional.ofNullable(imageUrlDeltaBlock)
@@ -62,10 +65,13 @@ private constructor(
*/
fun asImageFileDeltaBlock(): ImageFileDeltaBlock =
imageFileDeltaBlock.getOrThrow("imageFileDeltaBlock")
+
/** The text content that is part of a message. */
fun asTextDeltaBlock(): TextDeltaBlock = textDeltaBlock.getOrThrow("textDeltaBlock")
+
/** The refusal content that is part of a message. */
fun asRefusalDeltaBlock(): RefusalDeltaBlock = refusalDeltaBlock.getOrThrow("refusalDeltaBlock")
+
/** References an image URL in the content of a message. */
fun asImageUrlDeltaBlock(): ImageUrlDeltaBlock =
imageUrlDeltaBlock.getOrThrow("imageUrlDeltaBlock")
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/MessageContentPartParam.kt b/openai-java-core/src/main/kotlin/com/openai/models/MessageContentPartParam.kt
index 8fcdebd30..d1647799a 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/MessageContentPartParam.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/MessageContentPartParam.kt
@@ -40,9 +40,11 @@ private constructor(
*/
fun imageFileContentBlock(): Optional =
Optional.ofNullable(imageFileContentBlock)
+
/** References an image URL in the content of a message. */
fun imageUrlContentBlock(): Optional =
Optional.ofNullable(imageUrlContentBlock)
+
/** The text content that is part of a message. */
fun textContentBlockParam(): Optional =
Optional.ofNullable(textContentBlockParam)
@@ -59,9 +61,11 @@ private constructor(
*/
fun asImageFileContentBlock(): ImageFileContentBlock =
imageFileContentBlock.getOrThrow("imageFileContentBlock")
+
/** References an image URL in the content of a message. */
fun asImageUrlContentBlock(): ImageUrlContentBlock =
imageUrlContentBlock.getOrThrow("imageUrlContentBlock")
+
/** The text content that is part of a message. */
fun asTextContentBlockParam(): TextContentBlockParam =
textContentBlockParam.getOrThrow("textContentBlockParam")
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/MessageDelta.kt b/openai-java-core/src/main/kotlin/com/openai/models/MessageDelta.kt
index 3e2f35200..6944a2571 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/MessageDelta.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/MessageDelta.kt
@@ -101,19 +101,22 @@ private constructor(
}
}
- /** The content of the message in array of text and/or images. */
+ /**
+ * References an image [File](https://platform.openai.com/docs/api-reference/files) in the
+ * content of a message.
+ */
fun addContent(imageFileDeltaBlock: ImageFileDeltaBlock) =
addContent(MessageContentDelta.ofImageFileDeltaBlock(imageFileDeltaBlock))
- /** The content of the message in array of text and/or images. */
+ /** The text content that is part of a message. */
fun addContent(textDeltaBlock: TextDeltaBlock) =
addContent(MessageContentDelta.ofTextDeltaBlock(textDeltaBlock))
- /** The content of the message in array of text and/or images. */
+ /** The refusal content that is part of a message. */
fun addContent(refusalDeltaBlock: RefusalDeltaBlock) =
addContent(MessageContentDelta.ofRefusalDeltaBlock(refusalDeltaBlock))
- /** The content of the message in array of text and/or images. */
+ /** References an image URL in the content of a message. */
fun addContent(imageUrlDeltaBlock: ImageUrlDeltaBlock) =
addContent(MessageContentDelta.ofImageUrlDeltaBlock(imageUrlDeltaBlock))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/MessageStreamEvent.kt b/openai-java-core/src/main/kotlin/com/openai/models/MessageStreamEvent.kt
index d22276f4d..eab72e98a 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/MessageStreamEvent.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/MessageStreamEvent.kt
@@ -53,23 +53,27 @@ private constructor(
*/
fun threadMessageCreated(): Optional =
Optional.ofNullable(threadMessageCreated)
+
/**
* Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) moves
* to an `in_progress` state.
*/
fun threadMessageInProgress(): Optional =
Optional.ofNullable(threadMessageInProgress)
+
/**
* Occurs when parts of a
* [Message](https://platform.openai.com/docs/api-reference/messages/object) are being streamed.
*/
fun threadMessageDelta(): Optional = Optional.ofNullable(threadMessageDelta)
+
/**
* Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is
* completed.
*/
fun threadMessageCompleted(): Optional =
Optional.ofNullable(threadMessageCompleted)
+
/**
* Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) ends
* before it is completed.
@@ -93,24 +97,28 @@ private constructor(
*/
fun asThreadMessageCreated(): ThreadMessageCreated =
threadMessageCreated.getOrThrow("threadMessageCreated")
+
/**
* Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) moves
* to an `in_progress` state.
*/
fun asThreadMessageInProgress(): ThreadMessageInProgress =
threadMessageInProgress.getOrThrow("threadMessageInProgress")
+
/**
* Occurs when parts of a
* [Message](https://platform.openai.com/docs/api-reference/messages/object) are being streamed.
*/
fun asThreadMessageDelta(): ThreadMessageDelta =
threadMessageDelta.getOrThrow("threadMessageDelta")
+
/**
* Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is
* completed.
*/
fun asThreadMessageCompleted(): ThreadMessageCompleted =
threadMessageCompleted.getOrThrow("threadMessageCompleted")
+
/**
* Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) ends
* before it is completed.
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ModerationCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/ModerationCreateParams.kt
index e8dca93ea..7279968a2 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/ModerationCreateParams.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/ModerationCreateParams.kt
@@ -449,8 +449,10 @@ constructor(
/** A string of text to classify for moderation. */
fun string(): Optional = Optional.ofNullable(string)
+
/** An array of strings to classify for moderation. */
fun strings(): Optional> = Optional.ofNullable(strings)
+
/** An array of multi-modal inputs to the moderation model. */
fun moderationMultiModalArray(): Optional> =
Optional.ofNullable(moderationMultiModalArray)
@@ -463,8 +465,10 @@ constructor(
/** A string of text to classify for moderation. */
fun asString(): String = string.getOrThrow("string")
+
/** An array of strings to classify for moderation. */
fun asStrings(): List = strings.getOrThrow("strings")
+
/** An array of multi-modal inputs to the moderation model. */
fun asModerationMultiModalArray(): List =
moderationMultiModalArray.getOrThrow("moderationMultiModalArray")
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ModerationMultiModalInput.kt b/openai-java-core/src/main/kotlin/com/openai/models/ModerationMultiModalInput.kt
index 3e3146654..c92747aad 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/ModerationMultiModalInput.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/ModerationMultiModalInput.kt
@@ -33,6 +33,7 @@ private constructor(
/** An object describing an image to classify. */
fun moderationImageUrlInput(): Optional =
Optional.ofNullable(moderationImageUrlInput)
+
/** An object describing text to classify. */
fun moderationTextInput(): Optional =
Optional.ofNullable(moderationTextInput)
@@ -44,6 +45,7 @@ private constructor(
/** An object describing an image to classify. */
fun asModerationImageUrlInput(): ModerationImageUrlInput =
moderationImageUrlInput.getOrThrow("moderationImageUrlInput")
+
/** An object describing text to classify. */
fun asModerationTextInput(): ModerationTextInput =
moderationTextInput.getOrThrow("moderationTextInput")
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/Run.kt b/openai-java-core/src/main/kotlin/com/openai/models/Run.kt
index 3439a2e04..7414982b4 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/Run.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/Run.kt
@@ -832,14 +832,74 @@ private constructor(
fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) =
responseFormat(AssistantResponseFormatOption.ofBehavior(behavior))
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatText: ResponseFormatText) =
responseFormat(AssistantResponseFormatOption.ofResponseFormatText(responseFormatText))
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonObject: ResponseFormatJsonObject) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatJsonObject(responseFormatJsonObject)
)
+ /**
+ * Specifies the format that the model must output. Compatible with
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4
+ * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5
+ * Turbo models since `gpt-3.5-turbo-1106`.
+ *
+ * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs
+ * which ensures the model will match your supplied JSON schema. Learn more in the
+ * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
+ *
+ * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the
+ * model generates is valid JSON.
+ *
+ * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
+ * yourself via a system or user message. Without this, the model may generate an unending
+ * stream of whitespace until the generation reaches the token limit, resulting in a
+ * long-running and seemingly "stuck" request. Also note that the message content may be
+ * partially cut off if `finish_reason="length"`, which indicates the generation exceeded
+ * `max_tokens` or the conversation exceeded the max context length.
+ */
fun responseFormat(responseFormatJsonSchema: ResponseFormatJsonSchema) =
responseFormat(
AssistantResponseFormatOption.ofResponseFormatJsonSchema(responseFormatJsonSchema)
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/RunStep.kt b/openai-java-core/src/main/kotlin/com/openai/models/RunStep.kt
index 46c436ed3..238091af8 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/RunStep.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/RunStep.kt
@@ -828,6 +828,7 @@ private constructor(
/** Details of the message creation by the run step. */
fun messageCreationStepDetails(): Optional =
Optional.ofNullable(messageCreationStepDetails)
+
/** Details of the tool call. */
fun toolCallsStepDetails(): Optional =
Optional.ofNullable(toolCallsStepDetails)
@@ -839,6 +840,7 @@ private constructor(
/** Details of the message creation by the run step. */
fun asMessageCreationStepDetails(): MessageCreationStepDetails =
messageCreationStepDetails.getOrThrow("messageCreationStepDetails")
+
/** Details of the tool call. */
fun asToolCallsStepDetails(): ToolCallsStepDetails =
toolCallsStepDetails.getOrThrow("toolCallsStepDetails")
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/RunStepDelta.kt b/openai-java-core/src/main/kotlin/com/openai/models/RunStepDelta.kt
index 942c59c58..bff00a954 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/RunStepDelta.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/RunStepDelta.kt
@@ -132,6 +132,7 @@ private constructor(
/** Details of the message creation by the run step. */
fun runStepDeltaMessageDelta(): Optional =
Optional.ofNullable(runStepDeltaMessageDelta)
+
/** Details of the tool call. */
fun toolCallDeltaObject(): Optional =
Optional.ofNullable(toolCallDeltaObject)
@@ -143,6 +144,7 @@ private constructor(
/** Details of the message creation by the run step. */
fun asRunStepDeltaMessageDelta(): RunStepDeltaMessageDelta =
runStepDeltaMessageDelta.getOrThrow("runStepDeltaMessageDelta")
+
/** Details of the tool call. */
fun asToolCallDeltaObject(): ToolCallDeltaObject =
toolCallDeltaObject.getOrThrow("toolCallDeltaObject")
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/RunStepStreamEvent.kt b/openai-java-core/src/main/kotlin/com/openai/models/RunStepStreamEvent.kt
index 746d68a6f..d5819f523 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/RunStepStreamEvent.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/RunStepStreamEvent.kt
@@ -55,6 +55,7 @@ private constructor(
*/
fun threadRunStepCreated(): Optional =
Optional.ofNullable(threadRunStepCreated)
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) moves to an
@@ -62,12 +63,14 @@ private constructor(
*/
fun threadRunStepInProgress(): Optional =
Optional.ofNullable(threadRunStepInProgress)
+
/**
* Occurs when parts of a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) are being
* streamed.
*/
fun threadRunStepDelta(): Optional = Optional.ofNullable(threadRunStepDelta)
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is
@@ -75,12 +78,14 @@ private constructor(
*/
fun threadRunStepCompleted(): Optional =
Optional.ofNullable(threadRunStepCompleted)
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) fails.
*/
fun threadRunStepFailed(): Optional =
Optional.ofNullable(threadRunStepFailed)
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is
@@ -88,6 +93,7 @@ private constructor(
*/
fun threadRunStepCancelled(): Optional =
Optional.ofNullable(threadRunStepCancelled)
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) expires.
@@ -115,6 +121,7 @@ private constructor(
*/
fun asThreadRunStepCreated(): ThreadRunStepCreated =
threadRunStepCreated.getOrThrow("threadRunStepCreated")
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) moves to an
@@ -122,6 +129,7 @@ private constructor(
*/
fun asThreadRunStepInProgress(): ThreadRunStepInProgress =
threadRunStepInProgress.getOrThrow("threadRunStepInProgress")
+
/**
* Occurs when parts of a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) are being
@@ -129,6 +137,7 @@ private constructor(
*/
fun asThreadRunStepDelta(): ThreadRunStepDelta =
threadRunStepDelta.getOrThrow("threadRunStepDelta")
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is
@@ -136,12 +145,14 @@ private constructor(
*/
fun asThreadRunStepCompleted(): ThreadRunStepCompleted =
threadRunStepCompleted.getOrThrow("threadRunStepCompleted")
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) fails.
*/
fun asThreadRunStepFailed(): ThreadRunStepFailed =
threadRunStepFailed.getOrThrow("threadRunStepFailed")
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is
@@ -149,6 +160,7 @@ private constructor(
*/
fun asThreadRunStepCancelled(): ThreadRunStepCancelled =
threadRunStepCancelled.getOrThrow("threadRunStepCancelled")
+
/**
* Occurs when a
* [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) expires.
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/RunStreamEvent.kt b/openai-java-core/src/main/kotlin/com/openai/models/RunStreamEvent.kt
index 3708f0f79..4a60b7bbc 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/RunStreamEvent.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/RunStreamEvent.kt
@@ -56,45 +56,54 @@ private constructor(
* created.
*/
fun threadRunCreated(): Optional = Optional.ofNullable(threadRunCreated)
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a
* `queued` status.
*/
fun threadRunQueued(): Optional = Optional.ofNullable(threadRunQueued)
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to an
* `in_progress` status.
*/
fun threadRunInProgress(): Optional =
Optional.ofNullable(threadRunInProgress)
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a
* `requires_action` status.
*/
fun threadRunRequiresAction(): Optional =
Optional.ofNullable(threadRunRequiresAction)
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is completed.
*/
fun threadRunCompleted(): Optional = Optional.ofNullable(threadRunCompleted)
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) ends with
* status `incomplete`.
*/
fun threadRunIncomplete(): Optional =
Optional.ofNullable(threadRunIncomplete)
+
/** Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) fails. */
fun threadRunFailed(): Optional = Optional.ofNullable(threadRunFailed)
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a
* `cancelling` status.
*/
fun threadRunCancelling(): Optional =
Optional.ofNullable(threadRunCancelling)
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is cancelled.
*/
fun threadRunCancelled(): Optional = Optional.ofNullable(threadRunCancelled)
+
/** Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) expires. */
fun threadRunExpired(): Optional = Optional.ofNullable(threadRunExpired)
@@ -123,47 +132,56 @@ private constructor(
* created.
*/
fun asThreadRunCreated(): ThreadRunCreated = threadRunCreated.getOrThrow("threadRunCreated")
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a
* `queued` status.
*/
fun asThreadRunQueued(): ThreadRunQueued = threadRunQueued.getOrThrow("threadRunQueued")
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to an
* `in_progress` status.
*/
fun asThreadRunInProgress(): ThreadRunInProgress =
threadRunInProgress.getOrThrow("threadRunInProgress")
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a
* `requires_action` status.
*/
fun asThreadRunRequiresAction(): ThreadRunRequiresAction =
threadRunRequiresAction.getOrThrow("threadRunRequiresAction")
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is completed.
*/
fun asThreadRunCompleted(): ThreadRunCompleted =
threadRunCompleted.getOrThrow("threadRunCompleted")
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) ends with
* status `incomplete`.
*/
fun asThreadRunIncomplete(): ThreadRunIncomplete =
threadRunIncomplete.getOrThrow("threadRunIncomplete")
+
/** Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) fails. */
fun asThreadRunFailed(): ThreadRunFailed = threadRunFailed.getOrThrow("threadRunFailed")
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a
* `cancelling` status.
*/
fun asThreadRunCancelling(): ThreadRunCancelling =
threadRunCancelling.getOrThrow("threadRunCancelling")
+
/**
* Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is cancelled.
*/
fun asThreadRunCancelled(): ThreadRunCancelled =
threadRunCancelled.getOrThrow("threadRunCancelled")
+
/** Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) expires. */
fun asThreadRunExpired(): ThreadRunExpired = threadRunExpired.getOrThrow("threadRunExpired")
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/Text.kt b/openai-java-core/src/main/kotlin/com/openai/models/Text.kt
index c762222db..b6aca81b8 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/Text.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/Text.kt
@@ -91,9 +91,18 @@ private constructor(
}
}
+ /**
+ * A citation within the message that points to a specific quote from a specific File
+ * associated with the assistant or the message. Generated when the assistant uses the
+ * "file_search" tool to search files.
+ */
fun addAnnotation(fileCitationAnnotation: FileCitationAnnotation) =
addAnnotation(Annotation.ofFileCitationAnnotation(fileCitationAnnotation))
+ /**
+ * A URL for the file that's generated when the assistant used the `code_interpreter` tool
+ * to generate a file.
+ */
fun addAnnotation(filePathAnnotation: FilePathAnnotation) =
addAnnotation(Annotation.ofFilePathAnnotation(filePathAnnotation))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/TextDelta.kt b/openai-java-core/src/main/kotlin/com/openai/models/TextDelta.kt
index 15487d569..2754a33cf 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/TextDelta.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/TextDelta.kt
@@ -93,11 +93,20 @@ private constructor(
}
}
+ /**
+ * A citation within the message that points to a specific quote from a specific File
+ * associated with the assistant or the message. Generated when the assistant uses the
+ * "file_search" tool to search files.
+ */
fun addAnnotation(fileCitationDeltaAnnotation: FileCitationDeltaAnnotation) =
addAnnotation(
AnnotationDelta.ofFileCitationDeltaAnnotation(fileCitationDeltaAnnotation)
)
+ /**
+ * A URL for the file that's generated when the assistant used the `code_interpreter` tool
+ * to generate a file.
+ */
fun addAnnotation(filePathDeltaAnnotation: FilePathDeltaAnnotation) =
addAnnotation(AnnotationDelta.ofFilePathDeltaAnnotation(filePathDeltaAnnotation))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ToolCallDeltaObject.kt b/openai-java-core/src/main/kotlin/com/openai/models/ToolCallDeltaObject.kt
index 0c4d10167..92f9b532c 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/ToolCallDeltaObject.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/ToolCallDeltaObject.kt
@@ -122,10 +122,7 @@ private constructor(
}
}
- /**
- * An array of tool calls the run step was involved in. These can be associated with one of
- * three types of tools: `code_interpreter`, `file_search`, or `function`.
- */
+ /** Details of the Code Interpreter tool call the run step was involved in. */
fun addToolCall(codeInterpreterToolCallDelta: CodeInterpreterToolCallDelta) =
addToolCall(ToolCallDelta.ofCodeInterpreterToolCallDelta(codeInterpreterToolCallDelta))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ToolCallsStepDetails.kt b/openai-java-core/src/main/kotlin/com/openai/models/ToolCallsStepDetails.kt
index 6e50d167b..6cdd32583 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/ToolCallsStepDetails.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/ToolCallsStepDetails.kt
@@ -114,10 +114,7 @@ private constructor(
}
}
- /**
- * An array of tool calls the run step was involved in. These can be associated with one of
- * three types of tools: `code_interpreter`, `file_search`, or `function`.
- */
+ /** Details of the Code Interpreter tool call the run step was involved in. */
fun addToolCall(codeInterpreterToolCall: CodeInterpreterToolCall) =
addToolCall(ToolCall.ofCodeInterpreterToolCall(codeInterpreterToolCall))
diff --git a/openai-java-core/src/main/kotlin/com/openai/models/VectorStoreFile.kt b/openai-java-core/src/main/kotlin/com/openai/models/VectorStoreFile.kt
index 058cbd0f4..ec7a4ad96 100644
--- a/openai-java-core/src/main/kotlin/com/openai/models/VectorStoreFile.kt
+++ b/openai-java-core/src/main/kotlin/com/openai/models/VectorStoreFile.kt
@@ -266,6 +266,7 @@ private constructor(
this.chunkingStrategy = chunkingStrategy
}
+ /** The strategy used to chunk the file. */
fun chunkingStrategy(staticFileChunkingStrategyObject: StaticFileChunkingStrategyObject) =
chunkingStrategy(
FileChunkingStrategy.ofStaticFileChunkingStrategyObject(