From 8a8f2f9ac5f6555c372a81acc196b3ab04fd9555 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 7 Jan 2025 15:36:38 +0000 Subject: [PATCH 1/4] feat(client): allow passing null or optional for nullable fields (#84) --- .../client/okhttp/OpenAIOkHttpClient.kt | 9 +- .../client/okhttp/OpenAIOkHttpClientAsync.kt | 9 +- .../kotlin/com/openai/core/ClientOptions.kt | 9 +- .../com/openai/models/BatchCreateParams.kt | 10 +- .../com/openai/models/BatchListParams.kt | 25 +- .../models/BetaAssistantCreateParams.kt | 296 +++++- .../openai/models/BetaAssistantListParams.kt | 43 +- .../models/BetaAssistantUpdateParams.kt | 274 +++++- .../models/BetaThreadCreateAndRunParams.kt | 585 ++++++++++-- .../openai/models/BetaThreadCreateParams.kt | 161 +++- .../models/BetaThreadMessageCreateParams.kt | 42 +- .../models/BetaThreadMessageListParams.kt | 48 +- .../models/BetaThreadMessageUpdateParams.kt | 18 +- .../models/BetaThreadRunCreateParams.kt | 461 +++++++++- .../openai/models/BetaThreadRunListParams.kt | 43 +- .../models/BetaThreadRunStepListParams.kt | 58 +- .../models/BetaThreadRunStepRetrieveParams.kt | 15 +- .../BetaThreadRunSubmitToolOutputsParams.kt | 13 +- .../models/BetaThreadRunUpdateParams.kt | 18 +- .../openai/models/BetaThreadUpdateParams.kt | 73 +- .../models/BetaVectorStoreCreateParams.kt | 74 +- .../BetaVectorStoreFileBatchCreateParams.kt | 18 +- ...BetaVectorStoreFileBatchListFilesParams.kt | 48 +- .../models/BetaVectorStoreFileCreateParams.kt | 18 +- .../models/BetaVectorStoreFileListParams.kt | 48 +- .../models/BetaVectorStoreListParams.kt | 43 +- .../models/BetaVectorStoreUpdateParams.kt | 40 +- .../models/ChatCompletionCreateParams.kt | 840 ++++++++++++++++-- .../openai/models/CompletionCreateParams.kt | 561 +++++++++++- .../openai/models/EmbeddingCreateParams.kt | 67 +- .../com/openai/models/FileListParams.kt | 38 +- .../FineTuningJobCheckpointListParams.kt | 14 +- .../models/FineTuningJobCreateParams.kt | 272 +++++- .../models/FineTuningJobListEventsParams.kt | 14 +- .../openai/models/FineTuningJobListParams.kt | 14 +- .../com/openai/models/ImageGenerateParams.kt | 131 ++- .../openai/models/ModerationCreateParams.kt | 18 +- .../com/openai/models/UploadCompleteParams.kt | 16 +- 38 files changed, 4067 insertions(+), 417 deletions(-) diff --git a/openai-java-client-okhttp/src/main/kotlin/com/openai/client/okhttp/OpenAIOkHttpClient.kt b/openai-java-client-okhttp/src/main/kotlin/com/openai/client/okhttp/OpenAIOkHttpClient.kt index 0a01dc68e..025640fd1 100644 --- a/openai-java-client-okhttp/src/main/kotlin/com/openai/client/okhttp/OpenAIOkHttpClient.kt +++ b/openai-java-client-okhttp/src/main/kotlin/com/openai/client/okhttp/OpenAIOkHttpClient.kt @@ -13,6 +13,7 @@ import com.openai.credential.Credential import java.net.Proxy import java.time.Clock import java.time.Duration +import java.util.Optional class OpenAIOkHttpClient private constructor() { @@ -138,9 +139,13 @@ class OpenAIOkHttpClient private constructor() { clientOptions.azureServiceVersion(azureServiceVersion) } - fun organization(organization: String) = apply { clientOptions.organization(organization) } + fun organization(organization: String?) = apply { clientOptions.organization(organization) } - fun project(project: String) = apply { clientOptions.project(project) } + fun organization(organization: Optional) = organization(organization.orElse(null)) + + fun project(project: String?) = apply { clientOptions.project(project) } + + fun project(project: Optional) = project(project.orElse(null)) fun fromEnv() = apply { clientOptions.fromEnv() } diff --git a/openai-java-client-okhttp/src/main/kotlin/com/openai/client/okhttp/OpenAIOkHttpClientAsync.kt b/openai-java-client-okhttp/src/main/kotlin/com/openai/client/okhttp/OpenAIOkHttpClientAsync.kt index e88af6969..20753d76c 100644 --- a/openai-java-client-okhttp/src/main/kotlin/com/openai/client/okhttp/OpenAIOkHttpClientAsync.kt +++ b/openai-java-client-okhttp/src/main/kotlin/com/openai/client/okhttp/OpenAIOkHttpClientAsync.kt @@ -13,6 +13,7 @@ import com.openai.credential.Credential import java.net.Proxy import java.time.Clock import java.time.Duration +import java.util.Optional class OpenAIOkHttpClientAsync private constructor() { @@ -138,9 +139,13 @@ class OpenAIOkHttpClientAsync private constructor() { clientOptions.azureServiceVersion(azureServiceVersion) } - fun organization(organization: String) = apply { clientOptions.organization(organization) } + fun organization(organization: String?) = apply { clientOptions.organization(organization) } - fun project(project: String) = apply { clientOptions.project(project) } + fun organization(organization: Optional) = organization(organization.orElse(null)) + + fun project(project: String?) = apply { clientOptions.project(project) } + + fun project(project: Optional) = project(project.orElse(null)) fun fromEnv() = apply { clientOptions.fromEnv() } diff --git a/openai-java-core/src/main/kotlin/com/openai/core/ClientOptions.kt b/openai-java-core/src/main/kotlin/com/openai/core/ClientOptions.kt index aa08c1c77..44832b458 100644 --- a/openai-java-core/src/main/kotlin/com/openai/core/ClientOptions.kt +++ b/openai-java-core/src/main/kotlin/com/openai/core/ClientOptions.kt @@ -14,6 +14,7 @@ import com.openai.core.http.RetryingHttpClient import com.openai.credential.BearerTokenCredential import com.openai.credential.Credential import java.time.Clock +import java.util.Optional import java.util.concurrent.Executor import java.util.concurrent.Executors import java.util.concurrent.ThreadFactory @@ -187,9 +188,13 @@ private constructor( this.azureServiceVersion = azureServiceVersion } - fun organization(organization: String) = apply { this.organization = organization } + fun organization(organization: String?) = apply { this.organization = organization } - fun project(project: String) = apply { this.project = project } + fun organization(organization: Optional) = organization(organization.orElse(null)) + + fun project(project: String?) = apply { this.project = project } + + fun project(project: Optional) = project(project.orElse(null)) fun fromEnv() = apply { val openAIKey = System.getenv("OPENAI_API_KEY") diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BatchCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BatchCreateParams.kt index 8c62580c5..72229e73f 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BatchCreateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BatchCreateParams.kt @@ -167,7 +167,10 @@ constructor( fun inputFileId(inputFileId: String) = apply { this.inputFileId = inputFileId } /** Optional custom metadata for the batch. */ - fun metadata(metadata: Metadata) = apply { this.metadata = metadata } + fun metadata(metadata: Metadata?) = apply { this.metadata = metadata } + + /** Optional custom metadata for the batch. */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -269,7 +272,10 @@ constructor( fun inputFileId(inputFileId: String) = apply { body.inputFileId(inputFileId) } /** Optional custom metadata for the batch. */ - fun metadata(metadata: Metadata) = apply { body.metadata(metadata) } + fun metadata(metadata: Metadata?) = apply { body.metadata(metadata) } + + /** Optional custom metadata for the batch. */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BatchListParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BatchListParams.kt index 8e9d7d33b..2c2bc05c6 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BatchListParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BatchListParams.kt @@ -73,13 +73,34 @@ constructor( * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page * of the list. */ - fun after(after: String) = apply { this.after = after } + fun after(after: String?) = apply { this.after = after } + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the + * list. For instance, if you make a list request and receive 100 objects, ending with + * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page + * of the list. + */ + fun after(after: Optional) = after(after.orElse(null)) + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and + * the default is 20. + */ + fun limit(limit: Long?) = apply { this.limit = limit } + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and + * the default is 20. + */ + fun limit(limit: Long) = limit(limit as Long?) /** * A limit on the number of objects to be returned. Limit can range between 1 and 100, and * the default is 20. */ - fun limit(limit: Long) = apply { this.limit = limit } + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun limit(limit: Optional) = limit(limit.orElse(null) as Long?) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantCreateParams.kt index 77aa53a11..f6a38c4d5 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantCreateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantCreateParams.kt @@ -272,23 +272,43 @@ constructor( fun model(value: String) = apply { model = ChatModel.of(value) } /** The description of the assistant. The maximum length is 512 characters. */ - fun description(description: String) = apply { this.description = description } + fun description(description: String?) = apply { this.description = description } + + /** The description of the assistant. The maximum length is 512 characters. */ + fun description(description: Optional) = description(description.orElse(null)) + + /** + * The system instructions that the assistant uses. The maximum length is 256,000 + * characters. + */ + fun instructions(instructions: String?) = apply { this.instructions = instructions } /** * The system instructions that the assistant uses. The maximum length is 256,000 * characters. */ - fun instructions(instructions: String) = apply { this.instructions = instructions } + fun instructions(instructions: Optional) = + instructions(instructions.orElse(null)) + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } /** * Set of 16 key-value pairs that can be attached to an object. This can be useful for * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) + + /** The name of the assistant. The maximum length is 256 characters. */ + fun name(name: String?) = apply { this.name = name } /** The name of the assistant. The maximum length is 256 characters. */ - fun name(name: String) = apply { this.name = name } + fun name(name: Optional) = name(name.orElse(null)) /** * Specifies the format that the model must output. Compatible with @@ -311,10 +331,34 @@ constructor( * may be partially cut off if `finish_reason="length"`, which indicates the generation * exceeded `max_tokens` or the conversation exceeded the max context length. */ - fun responseFormat(responseFormat: AssistantResponseFormatOption) = apply { + fun responseFormat(responseFormat: AssistantResponseFormatOption?) = apply { this.responseFormat = responseFormat } + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 + * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all + * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + * Outputs which ensures the model will match your supplied JSON schema. Learn more in + * the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message + * the model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to produce + * JSON yourself via a system or user message. Without this, the model may generate an + * unending stream of whitespace until the generation reaches the token limit, resulting + * in a long-running and seemingly "stuck" request. Also note that the message content + * may be partially cut off if `finish_reason="length"`, which indicates the generation + * exceeded `max_tokens` or the conversation exceeded the max context length. + */ + fun responseFormat(responseFormat: Optional) = + responseFormat(responseFormat.orElse(null)) + /** `auto` is the default value */ fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) = apply { this.responseFormat = AssistantResponseFormatOption.ofBehavior(behavior) @@ -344,22 +388,52 @@ constructor( * the output more random, while lower values like 0.2 will make it more focused and * deterministic. */ - fun temperature(temperature: Double) = apply { this.temperature = temperature } + fun temperature(temperature: Double?) = apply { this.temperature = temperature } + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make + * the output more random, while lower values like 0.2 will make it more focused and + * deterministic. + */ + fun temperature(temperature: Double) = temperature(temperature as Double?) + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make + * the output more random, while lower values like 0.2 will make it more focused and + * deterministic. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun temperature(temperature: Optional) = + temperature(temperature.orElse(null) as Double?) /** * A set of resources that are used by the assistant's tools. The resources are specific * to the type of tool. For example, the `code_interpreter` tool requires a list of file * IDs, while the `file_search` tool requires a list of vector store IDs. */ - fun toolResources(toolResources: ToolResources) = apply { + fun toolResources(toolResources: ToolResources?) = apply { this.toolResources = toolResources } + /** + * A set of resources that are used by the assistant's tools. The resources are specific + * to the type of tool. For example, the `code_interpreter` tool requires a list of file + * IDs, while the `file_search` tool requires a list of vector store IDs. + */ + fun toolResources(toolResources: Optional) = + toolResources(toolResources.orElse(null)) + + /** + * A list of tool enabled on the assistant. There can be a maximum of 128 tools per + * assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + */ + fun tools(tools: List?) = apply { this.tools = tools?.toMutableList() } + /** * A list of tool enabled on the assistant. There can be a maximum of 128 tools per * assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. */ - fun tools(tools: List) = apply { this.tools = tools.toMutableList() } + fun tools(tools: Optional>) = tools(tools.orElse(null)) /** * A list of tool enabled on the assistant. There can be a maximum of 128 tools per @@ -376,7 +450,26 @@ constructor( * * We generally recommend altering this or temperature but not both. */ - fun topP(topP: Double) = apply { this.topP = topP } + fun topP(topP: Double?) = apply { this.topP = topP } + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only + * the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + fun topP(topP: Double) = topP(topP as Double?) + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only + * the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun topP(topP: Optional) = topP(topP.orElse(null) as Double?) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -469,23 +562,42 @@ constructor( fun model(value: String) = apply { body.model(value) } /** The description of the assistant. The maximum length is 512 characters. */ - fun description(description: String) = apply { body.description(description) } + fun description(description: String?) = apply { body.description(description) } + + /** The description of the assistant. The maximum length is 512 characters. */ + fun description(description: Optional) = description(description.orElse(null)) /** * The system instructions that the assistant uses. The maximum length is 256,000 * characters. */ - fun instructions(instructions: String) = apply { body.instructions(instructions) } + fun instructions(instructions: String?) = apply { body.instructions(instructions) } + + /** + * The system instructions that the assistant uses. The maximum length is 256,000 + * characters. + */ + fun instructions(instructions: Optional) = instructions(instructions.orElse(null)) + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: JsonValue?) = apply { body.metadata(metadata) } /** * Set of 16 key-value pairs that can be attached to an object. This can be useful for * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { body.metadata(metadata) } + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) + + /** The name of the assistant. The maximum length is 256 characters. */ + fun name(name: String?) = apply { body.name(name) } /** The name of the assistant. The maximum length is 256 characters. */ - fun name(name: String) = apply { body.name(name) } + fun name(name: Optional) = name(name.orElse(null)) /** * Specifies the format that the model must output. Compatible with @@ -507,10 +619,33 @@ constructor( * partially cut off if `finish_reason="length"`, which indicates the generation exceeded * `max_tokens` or the conversation exceeded the max context length. */ - fun responseFormat(responseFormat: AssistantResponseFormatOption) = apply { + fun responseFormat(responseFormat: AssistantResponseFormatOption?) = apply { body.responseFormat(responseFormat) } + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 + * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 + * Turbo models since `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs + * which ensures the model will match your supplied JSON schema. Learn more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the + * model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON + * yourself via a system or user message. Without this, the model may generate an unending + * stream of whitespace until the generation reaches the token limit, resulting in a + * long-running and seemingly "stuck" request. Also note that the message content may be + * partially cut off if `finish_reason="length"`, which indicates the generation exceeded + * `max_tokens` or the conversation exceeded the max context length. + */ + fun responseFormat(responseFormat: Optional) = + responseFormat(responseFormat.orElse(null)) + /** `auto` is the default value */ fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) = apply { body.responseFormat(behavior) @@ -533,22 +668,52 @@ constructor( * output more random, while lower values like 0.2 will make it more focused and * deterministic. */ - fun temperature(temperature: Double) = apply { body.temperature(temperature) } + fun temperature(temperature: Double?) = apply { body.temperature(temperature) } + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the + * output more random, while lower values like 0.2 will make it more focused and + * deterministic. + */ + fun temperature(temperature: Double) = temperature(temperature as Double?) + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the + * output more random, while lower values like 0.2 will make it more focused and + * deterministic. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun temperature(temperature: Optional) = + temperature(temperature.orElse(null) as Double?) /** * A set of resources that are used by the assistant's tools. The resources are specific to * the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, * while the `file_search` tool requires a list of vector store IDs. */ - fun toolResources(toolResources: ToolResources) = apply { + fun toolResources(toolResources: ToolResources?) = apply { body.toolResources(toolResources) } + /** + * A set of resources that are used by the assistant's tools. The resources are specific to + * the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, + * while the `file_search` tool requires a list of vector store IDs. + */ + fun toolResources(toolResources: Optional) = + toolResources(toolResources.orElse(null)) + + /** + * A list of tool enabled on the assistant. There can be a maximum of 128 tools per + * assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + */ + fun tools(tools: List?) = apply { body.tools(tools) } + /** * A list of tool enabled on the assistant. There can be a maximum of 128 tools per * assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. */ - fun tools(tools: List) = apply { body.tools(tools) } + fun tools(tools: Optional>) = tools(tools.orElse(null)) /** * A list of tool enabled on the assistant. There can be a maximum of 128 tools per @@ -563,7 +728,26 @@ constructor( * * We generally recommend altering this or temperature but not both. */ - fun topP(topP: Double) = apply { body.topP(topP) } + fun topP(topP: Double?) = apply { body.topP(topP) } + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the + * tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + fun topP(topP: Double) = topP(topP as Double?) + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the + * tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun topP(topP: Optional) = topP(topP.orElse(null) as Double?) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() @@ -735,11 +919,16 @@ constructor( additionalProperties = toolResources.additionalProperties.toMutableMap() } - fun codeInterpreter(codeInterpreter: CodeInterpreter) = apply { + fun codeInterpreter(codeInterpreter: CodeInterpreter?) = apply { this.codeInterpreter = codeInterpreter } - fun fileSearch(fileSearch: FileSearch) = apply { this.fileSearch = fileSearch } + fun codeInterpreter(codeInterpreter: Optional) = + codeInterpreter(codeInterpreter.orElse(null)) + + fun fileSearch(fileSearch: FileSearch?) = apply { this.fileSearch = fileSearch } + + fun fileSearch(fileSearch: Optional) = fileSearch(fileSearch.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -812,10 +1001,17 @@ constructor( * available to the `code_interpreter` tool. There can be a maximum of 20 files * associated with the tool. */ - fun fileIds(fileIds: List) = apply { - this.fileIds = fileIds.toMutableList() + fun fileIds(fileIds: List?) = apply { + this.fileIds = fileIds?.toMutableList() } + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + * available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. + */ + fun fileIds(fileIds: Optional>) = fileIds(fileIds.orElse(null)) + /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made * available to the `code_interpreter` tool. There can be a maximum of 20 files @@ -927,10 +1123,19 @@ constructor( * attached to this assistant. There can be a maximum of 1 vector store attached to * the assistant. */ - fun vectorStoreIds(vectorStoreIds: List) = apply { - this.vectorStoreIds = vectorStoreIds.toMutableList() + fun vectorStoreIds(vectorStoreIds: List?) = apply { + this.vectorStoreIds = vectorStoreIds?.toMutableList() } + /** + * The + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this assistant. There can be a maximum of 1 vector store attached to + * the assistant. + */ + fun vectorStoreIds(vectorStoreIds: Optional>) = + vectorStoreIds(vectorStoreIds.orElse(null)) + /** * The * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) @@ -948,10 +1153,19 @@ constructor( * with file_ids and attach it to this assistant. There can be a maximum of 1 vector * store attached to the assistant. */ - fun vectorStores(vectorStores: List) = apply { - this.vectorStores = vectorStores.toMutableList() + fun vectorStores(vectorStores: List?) = apply { + this.vectorStores = vectorStores?.toMutableList() } + /** + * A helper to create a + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * with file_ids and attach it to this assistant. There can be a maximum of 1 vector + * store attached to the assistant. + */ + fun vectorStores(vectorStores: Optional>) = + vectorStores(vectorStores.orElse(null)) + /** * A helper to create a * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) @@ -1058,10 +1272,17 @@ constructor( * The chunking strategy used to chunk the file(s). If not set, will use the * `auto` strategy. Only applicable if `file_ids` is non-empty. */ - fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam) = apply { + fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam?) = apply { this.chunkingStrategy = chunkingStrategy } + /** + * The chunking strategy used to chunk the file(s). If not set, will use the + * `auto` strategy. Only applicable if `file_ids` is non-empty. + */ + fun chunkingStrategy(chunkingStrategy: Optional) = + chunkingStrategy(chunkingStrategy.orElse(null)) + /** * The default strategy. This strategy currently uses a `max_chunk_size_tokens` * of `800` and `chunk_overlap_tokens` of `400`. @@ -1089,10 +1310,17 @@ constructor( * add to the vector store. There can be a maximum of 10000 files in a vector * store. */ - fun fileIds(fileIds: List) = apply { - this.fileIds = fileIds.toMutableList() + fun fileIds(fileIds: List?) = apply { + this.fileIds = fileIds?.toMutableList() } + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + * add to the vector store. There can be a maximum of 10000 files in a vector + * store. + */ + fun fileIds(fileIds: Optional>) = fileIds(fileIds.orElse(null)) + /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to * add to the vector store. There can be a maximum of 10000 files in a vector @@ -1108,7 +1336,15 @@ constructor( * structured format. Keys can be a maximum of 64 characters long and values can * be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } + + /** + * Set of 16 key-value pairs that can be attached to a vector store. This can be + * useful for storing additional information about the vector store in a + * structured format. Keys can be a maximum of 64 characters long and values can + * be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantListParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantListParams.kt index 21912e347..ae2896fd9 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantListParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantListParams.kt @@ -98,7 +98,15 @@ constructor( * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page * of the list. */ - fun after(after: String) = apply { this.after = after } + fun after(after: String?) = apply { this.after = after } + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the + * list. For instance, if you make a list request and receive 100 objects, ending with + * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page + * of the list. + */ + fun after(after: Optional) = after(after.orElse(null)) /** * A cursor for use in pagination. `before` is an object ID that defines your place in the @@ -106,19 +114,46 @@ constructor( * obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous * page of the list. */ - fun before(before: String) = apply { this.before = before } + fun before(before: String?) = apply { this.before = before } + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the + * list. For instance, if you make a list request and receive 100 objects, starting with + * obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous + * page of the list. + */ + fun before(before: Optional) = before(before.orElse(null)) /** * A limit on the number of objects to be returned. Limit can range between 1 and 100, and * the default is 20. */ - fun limit(limit: Long) = apply { this.limit = limit } + fun limit(limit: Long?) = apply { this.limit = limit } + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and + * the default is 20. + */ + fun limit(limit: Long) = limit(limit as Long?) + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and + * the default is 20. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun limit(limit: Optional) = limit(limit.orElse(null) as Long?) + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and + * `desc` for descending order. + */ + fun order(order: Order?) = apply { this.order = order } /** * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and * `desc` for descending order. */ - fun order(order: Order) = apply { this.order = order } + fun order(order: Optional) = order(order.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantUpdateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantUpdateParams.kt index 9c782ac99..ff93a3b9d 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantUpdateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaAssistantUpdateParams.kt @@ -266,20 +266,37 @@ constructor( } /** The description of the assistant. The maximum length is 512 characters. */ - fun description(description: String) = apply { this.description = description } + fun description(description: String?) = apply { this.description = description } + + /** The description of the assistant. The maximum length is 512 characters. */ + fun description(description: Optional) = description(description.orElse(null)) + + /** + * The system instructions that the assistant uses. The maximum length is 256,000 + * characters. + */ + fun instructions(instructions: String?) = apply { this.instructions = instructions } /** * The system instructions that the assistant uses. The maximum length is 256,000 * characters. */ - fun instructions(instructions: String) = apply { this.instructions = instructions } + fun instructions(instructions: Optional) = + instructions(instructions.orElse(null)) /** * Set of 16 key-value pairs that can be attached to an object. This can be useful for * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) /** * ID of the model to use. You can use the @@ -287,10 +304,21 @@ constructor( * all of your available models, or see our * [Model overview](https://platform.openai.com/docs/models) for descriptions of them. */ - fun model(model: String) = apply { this.model = model } + fun model(model: String?) = apply { this.model = model } + + /** + * ID of the model to use. You can use the + * [List models](https://platform.openai.com/docs/api-reference/models/list) API to see + * all of your available models, or see our + * [Model overview](https://platform.openai.com/docs/models) for descriptions of them. + */ + fun model(model: Optional) = model(model.orElse(null)) /** The name of the assistant. The maximum length is 256 characters. */ - fun name(name: String) = apply { this.name = name } + fun name(name: String?) = apply { this.name = name } + + /** The name of the assistant. The maximum length is 256 characters. */ + fun name(name: Optional) = name(name.orElse(null)) /** * Specifies the format that the model must output. Compatible with @@ -313,10 +341,34 @@ constructor( * may be partially cut off if `finish_reason="length"`, which indicates the generation * exceeded `max_tokens` or the conversation exceeded the max context length. */ - fun responseFormat(responseFormat: AssistantResponseFormatOption) = apply { + fun responseFormat(responseFormat: AssistantResponseFormatOption?) = apply { this.responseFormat = responseFormat } + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 + * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all + * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + * Outputs which ensures the model will match your supplied JSON schema. Learn more in + * the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message + * the model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to produce + * JSON yourself via a system or user message. Without this, the model may generate an + * unending stream of whitespace until the generation reaches the token limit, resulting + * in a long-running and seemingly "stuck" request. Also note that the message content + * may be partially cut off if `finish_reason="length"`, which indicates the generation + * exceeded `max_tokens` or the conversation exceeded the max context length. + */ + fun responseFormat(responseFormat: Optional) = + responseFormat(responseFormat.orElse(null)) + /** `auto` is the default value */ fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) = apply { this.responseFormat = AssistantResponseFormatOption.ofBehavior(behavior) @@ -346,22 +398,52 @@ constructor( * the output more random, while lower values like 0.2 will make it more focused and * deterministic. */ - fun temperature(temperature: Double) = apply { this.temperature = temperature } + fun temperature(temperature: Double?) = apply { this.temperature = temperature } + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make + * the output more random, while lower values like 0.2 will make it more focused and + * deterministic. + */ + fun temperature(temperature: Double) = temperature(temperature as Double?) + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make + * the output more random, while lower values like 0.2 will make it more focused and + * deterministic. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun temperature(temperature: Optional) = + temperature(temperature.orElse(null) as Double?) /** * A set of resources that are used by the assistant's tools. The resources are specific * to the type of tool. For example, the `code_interpreter` tool requires a list of file * IDs, while the `file_search` tool requires a list of vector store IDs. */ - fun toolResources(toolResources: ToolResources) = apply { + fun toolResources(toolResources: ToolResources?) = apply { this.toolResources = toolResources } + /** + * A set of resources that are used by the assistant's tools. The resources are specific + * to the type of tool. For example, the `code_interpreter` tool requires a list of file + * IDs, while the `file_search` tool requires a list of vector store IDs. + */ + fun toolResources(toolResources: Optional) = + toolResources(toolResources.orElse(null)) + /** * A list of tool enabled on the assistant. There can be a maximum of 128 tools per * assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. */ - fun tools(tools: List) = apply { this.tools = tools.toMutableList() } + fun tools(tools: List?) = apply { this.tools = tools?.toMutableList() } + + /** + * A list of tool enabled on the assistant. There can be a maximum of 128 tools per + * assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + */ + fun tools(tools: Optional>) = tools(tools.orElse(null)) /** * A list of tool enabled on the assistant. There can be a maximum of 128 tools per @@ -378,7 +460,26 @@ constructor( * * We generally recommend altering this or temperature but not both. */ - fun topP(topP: Double) = apply { this.topP = topP } + fun topP(topP: Double?) = apply { this.topP = topP } + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only + * the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + fun topP(topP: Double) = topP(topP as Double?) + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only + * the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun topP(topP: Optional) = topP(topP.orElse(null) as Double?) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -459,20 +560,44 @@ constructor( fun assistantId(assistantId: String) = apply { this.assistantId = assistantId } /** The description of the assistant. The maximum length is 512 characters. */ - fun description(description: String) = apply { body.description(description) } + fun description(description: String?) = apply { body.description(description) } + + /** The description of the assistant. The maximum length is 512 characters. */ + fun description(description: Optional) = description(description.orElse(null)) /** * The system instructions that the assistant uses. The maximum length is 256,000 * characters. */ - fun instructions(instructions: String) = apply { body.instructions(instructions) } + fun instructions(instructions: String?) = apply { body.instructions(instructions) } + + /** + * The system instructions that the assistant uses. The maximum length is 256,000 + * characters. + */ + fun instructions(instructions: Optional) = instructions(instructions.orElse(null)) /** * Set of 16 key-value pairs that can be attached to an object. This can be useful for * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { body.metadata(metadata) } + fun metadata(metadata: JsonValue?) = apply { body.metadata(metadata) } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) + + /** + * ID of the model to use. You can use the + * [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all + * of your available models, or see our + * [Model overview](https://platform.openai.com/docs/models) for descriptions of them. + */ + fun model(model: String?) = apply { body.model(model) } /** * ID of the model to use. You can use the @@ -480,10 +605,13 @@ constructor( * of your available models, or see our * [Model overview](https://platform.openai.com/docs/models) for descriptions of them. */ - fun model(model: String) = apply { body.model(model) } + fun model(model: Optional) = model(model.orElse(null)) /** The name of the assistant. The maximum length is 256 characters. */ - fun name(name: String) = apply { body.name(name) } + fun name(name: String?) = apply { body.name(name) } + + /** The name of the assistant. The maximum length is 256 characters. */ + fun name(name: Optional) = name(name.orElse(null)) /** * Specifies the format that the model must output. Compatible with @@ -505,10 +633,33 @@ constructor( * partially cut off if `finish_reason="length"`, which indicates the generation exceeded * `max_tokens` or the conversation exceeded the max context length. */ - fun responseFormat(responseFormat: AssistantResponseFormatOption) = apply { + fun responseFormat(responseFormat: AssistantResponseFormatOption?) = apply { body.responseFormat(responseFormat) } + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 + * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 + * Turbo models since `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs + * which ensures the model will match your supplied JSON schema. Learn more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the + * model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON + * yourself via a system or user message. Without this, the model may generate an unending + * stream of whitespace until the generation reaches the token limit, resulting in a + * long-running and seemingly "stuck" request. Also note that the message content may be + * partially cut off if `finish_reason="length"`, which indicates the generation exceeded + * `max_tokens` or the conversation exceeded the max context length. + */ + fun responseFormat(responseFormat: Optional) = + responseFormat(responseFormat.orElse(null)) + /** `auto` is the default value */ fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) = apply { body.responseFormat(behavior) @@ -531,22 +682,52 @@ constructor( * output more random, while lower values like 0.2 will make it more focused and * deterministic. */ - fun temperature(temperature: Double) = apply { body.temperature(temperature) } + fun temperature(temperature: Double?) = apply { body.temperature(temperature) } + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the + * output more random, while lower values like 0.2 will make it more focused and + * deterministic. + */ + fun temperature(temperature: Double) = temperature(temperature as Double?) + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the + * output more random, while lower values like 0.2 will make it more focused and + * deterministic. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun temperature(temperature: Optional) = + temperature(temperature.orElse(null) as Double?) /** * A set of resources that are used by the assistant's tools. The resources are specific to * the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, * while the `file_search` tool requires a list of vector store IDs. */ - fun toolResources(toolResources: ToolResources) = apply { + fun toolResources(toolResources: ToolResources?) = apply { body.toolResources(toolResources) } + /** + * A set of resources that are used by the assistant's tools. The resources are specific to + * the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, + * while the `file_search` tool requires a list of vector store IDs. + */ + fun toolResources(toolResources: Optional) = + toolResources(toolResources.orElse(null)) + /** * A list of tool enabled on the assistant. There can be a maximum of 128 tools per * assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. */ - fun tools(tools: List) = apply { body.tools(tools) } + fun tools(tools: List?) = apply { body.tools(tools) } + + /** + * A list of tool enabled on the assistant. There can be a maximum of 128 tools per + * assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + */ + fun tools(tools: Optional>) = tools(tools.orElse(null)) /** * A list of tool enabled on the assistant. There can be a maximum of 128 tools per @@ -561,7 +742,26 @@ constructor( * * We generally recommend altering this or temperature but not both. */ - fun topP(topP: Double) = apply { body.topP(topP) } + fun topP(topP: Double?) = apply { body.topP(topP) } + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the + * tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + fun topP(topP: Double) = topP(topP as Double?) + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the + * tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun topP(topP: Optional) = topP(topP.orElse(null) as Double?) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() @@ -734,11 +934,16 @@ constructor( additionalProperties = toolResources.additionalProperties.toMutableMap() } - fun codeInterpreter(codeInterpreter: CodeInterpreter) = apply { + fun codeInterpreter(codeInterpreter: CodeInterpreter?) = apply { this.codeInterpreter = codeInterpreter } - fun fileSearch(fileSearch: FileSearch) = apply { this.fileSearch = fileSearch } + fun codeInterpreter(codeInterpreter: Optional) = + codeInterpreter(codeInterpreter.orElse(null)) + + fun fileSearch(fileSearch: FileSearch?) = apply { this.fileSearch = fileSearch } + + fun fileSearch(fileSearch: Optional) = fileSearch(fileSearch.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -812,10 +1017,18 @@ constructor( * to the `code_interpreter` tool. There can be a maximum of 20 files associated * with the tool. */ - fun fileIds(fileIds: List) = apply { - this.fileIds = fileIds.toMutableList() + fun fileIds(fileIds: List?) = apply { + this.fileIds = fileIds?.toMutableList() } + /** + * Overrides the list of + * [file](https://platform.openai.com/docs/api-reference/files) IDs made available + * to the `code_interpreter` tool. There can be a maximum of 20 files associated + * with the tool. + */ + fun fileIds(fileIds: Optional>) = fileIds(fileIds.orElse(null)) + /** * Overrides the list of * [file](https://platform.openai.com/docs/api-reference/files) IDs made available @@ -916,10 +1129,19 @@ constructor( * attached to this assistant. There can be a maximum of 1 vector store attached to * the assistant. */ - fun vectorStoreIds(vectorStoreIds: List) = apply { - this.vectorStoreIds = vectorStoreIds.toMutableList() + fun vectorStoreIds(vectorStoreIds: List?) = apply { + this.vectorStoreIds = vectorStoreIds?.toMutableList() } + /** + * Overrides the + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this assistant. There can be a maximum of 1 vector store attached to + * the assistant. + */ + fun vectorStoreIds(vectorStoreIds: Optional>) = + vectorStoreIds(vectorStoreIds.orElse(null)) + /** * Overrides the * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadCreateAndRunParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadCreateAndRunParams.kt index 1d1a3afc5..e5a8398e9 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadCreateAndRunParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadCreateAndRunParams.kt @@ -382,7 +382,14 @@ constructor( * Override the default system message of the assistant. This is useful for modifying * the behavior on a per-run basis. */ - fun instructions(instructions: String) = apply { this.instructions = instructions } + fun instructions(instructions: String?) = apply { this.instructions = instructions } + + /** + * Override the default system message of the assistant. This is useful for modifying + * the behavior on a per-run basis. + */ + fun instructions(instructions: Optional) = + instructions(instructions.orElse(null)) /** * The maximum number of completion tokens that may be used over the course of the run. @@ -391,26 +398,72 @@ constructor( * completion tokens specified, the run will end with status `incomplete`. See * `incomplete_details` for more info. */ - fun maxCompletionTokens(maxCompletionTokens: Long) = apply { + fun maxCompletionTokens(maxCompletionTokens: Long?) = apply { this.maxCompletionTokens = maxCompletionTokens } + /** + * The maximum number of completion tokens that may be used over the course of the run. + * The run will make a best effort to use only the number of completion tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * completion tokens specified, the run will end with status `incomplete`. See + * `incomplete_details` for more info. + */ + fun maxCompletionTokens(maxCompletionTokens: Long) = + maxCompletionTokens(maxCompletionTokens as Long?) + + /** + * The maximum number of completion tokens that may be used over the course of the run. + * The run will make a best effort to use only the number of completion tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * completion tokens specified, the run will end with status `incomplete`. See + * `incomplete_details` for more info. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun maxCompletionTokens(maxCompletionTokens: Optional) = + maxCompletionTokens(maxCompletionTokens.orElse(null) as Long?) + /** * The maximum number of prompt tokens that may be used over the course of the run. The * run will make a best effort to use only the number of prompt tokens specified, across * multiple turns of the run. If the run exceeds the number of prompt tokens specified, * the run will end with status `incomplete`. See `incomplete_details` for more info. */ - fun maxPromptTokens(maxPromptTokens: Long) = apply { + fun maxPromptTokens(maxPromptTokens: Long?) = apply { this.maxPromptTokens = maxPromptTokens } + /** + * The maximum number of prompt tokens that may be used over the course of the run. The + * run will make a best effort to use only the number of prompt tokens specified, across + * multiple turns of the run. If the run exceeds the number of prompt tokens specified, + * the run will end with status `incomplete`. See `incomplete_details` for more info. + */ + fun maxPromptTokens(maxPromptTokens: Long) = maxPromptTokens(maxPromptTokens as Long?) + + /** + * The maximum number of prompt tokens that may be used over the course of the run. The + * run will make a best effort to use only the number of prompt tokens specified, across + * multiple turns of the run. If the run exceeds the number of prompt tokens specified, + * the run will end with status `incomplete`. See `incomplete_details` for more info. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun maxPromptTokens(maxPromptTokens: Optional) = + maxPromptTokens(maxPromptTokens.orElse(null) as Long?) + /** * Set of 16 key-value pairs that can be attached to an object. This can be useful for * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) /** * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be @@ -418,7 +471,15 @@ constructor( * associated with the assistant. If not, the model associated with the assistant will * be used. */ - fun model(model: ChatModel) = apply { this.model = model } + fun model(model: ChatModel?) = apply { this.model = model } + + /** + * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be + * used to execute this run. If a value is provided here, it will override the model + * associated with the assistant. If not, the model associated with the assistant will + * be used. + */ + fun model(model: Optional) = model(model.orElse(null)) /** * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be @@ -433,10 +494,27 @@ constructor( * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) * during tool use. */ - fun parallelToolCalls(parallelToolCalls: Boolean) = apply { + fun parallelToolCalls(parallelToolCalls: Boolean?) = apply { this.parallelToolCalls = parallelToolCalls } + /** + * Whether to enable + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + * during tool use. + */ + fun parallelToolCalls(parallelToolCalls: Boolean) = + parallelToolCalls(parallelToolCalls as Boolean?) + + /** + * Whether to enable + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + * during tool use. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun parallelToolCalls(parallelToolCalls: Optional) = + parallelToolCalls(parallelToolCalls.orElse(null) as Boolean?) + /** * Specifies the format that the model must output. Compatible with * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 @@ -458,10 +536,34 @@ constructor( * may be partially cut off if `finish_reason="length"`, which indicates the generation * exceeded `max_tokens` or the conversation exceeded the max context length. */ - fun responseFormat(responseFormat: AssistantResponseFormatOption) = apply { + fun responseFormat(responseFormat: AssistantResponseFormatOption?) = apply { this.responseFormat = responseFormat } + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 + * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all + * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + * Outputs which ensures the model will match your supplied JSON schema. Learn more in + * the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message + * the model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to produce + * JSON yourself via a system or user message. Without this, the model may generate an + * unending stream of whitespace until the generation reaches the token limit, resulting + * in a long-running and seemingly "stuck" request. Also note that the message content + * may be partially cut off if `finish_reason="length"`, which indicates the generation + * exceeded `max_tokens` or the conversation exceeded the max context length. + */ + fun responseFormat(responseFormat: Optional) = + responseFormat(responseFormat.orElse(null)) + /** `auto` is the default value */ fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) = apply { this.responseFormat = AssistantResponseFormatOption.ofBehavior(behavior) @@ -491,10 +593,29 @@ constructor( * the output more random, while lower values like 0.2 will make it more focused and * deterministic. */ - fun temperature(temperature: Double) = apply { this.temperature = temperature } + fun temperature(temperature: Double?) = apply { this.temperature = temperature } + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make + * the output more random, while lower values like 0.2 will make it more focused and + * deterministic. + */ + fun temperature(temperature: Double) = temperature(temperature as Double?) + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make + * the output more random, while lower values like 0.2 will make it more focused and + * deterministic. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun temperature(temperature: Optional) = + temperature(temperature.orElse(null) as Double?) + + /** If no thread is provided, an empty thread will be created. */ + fun thread(thread: Thread?) = apply { this.thread = thread } /** If no thread is provided, an empty thread will be created. */ - fun thread(thread: Thread) = apply { this.thread = thread } + fun thread(thread: Optional) = thread(thread.orElse(null)) /** * Controls which (if any) tool is called by the model. `none` means the model will not @@ -504,10 +625,21 @@ constructor( * Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", * "function": {"name": "my_function"}}` forces the model to call that tool. */ - fun toolChoice(toolChoice: AssistantToolChoiceOption) = apply { + fun toolChoice(toolChoice: AssistantToolChoiceOption?) = apply { this.toolChoice = toolChoice } + /** + * Controls which (if any) tool is called by the model. `none` means the model will not + * call any tools and instead generates a message. `auto` is the default value and means + * the model can pick between generating a message or calling one or more tools. + * `required` means the model must call one or more tools before responding to the user. + * Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", + * "function": {"name": "my_function"}}` forces the model to call that tool. + */ + fun toolChoice(toolChoice: Optional) = + toolChoice(toolChoice.orElse(null)) + /** * `none` means the model will not call any tools and instead generates a message. * `auto` means the model can pick between generating a message or calling one or more @@ -532,15 +664,29 @@ constructor( * to the type of tool. For example, the `code_interpreter` tool requires a list of file * IDs, while the `file_search` tool requires a list of vector store IDs. */ - fun toolResources(toolResources: ToolResources) = apply { + fun toolResources(toolResources: ToolResources?) = apply { this.toolResources = toolResources } + /** + * A set of resources that are used by the assistant's tools. The resources are specific + * to the type of tool. For example, the `code_interpreter` tool requires a list of file + * IDs, while the `file_search` tool requires a list of vector store IDs. + */ + fun toolResources(toolResources: Optional) = + toolResources(toolResources.orElse(null)) + + /** + * Override the tools the assistant can use for this run. This is useful for modifying + * the behavior on a per-run basis. + */ + fun tools(tools: List?) = apply { this.tools = tools?.toMutableList() } + /** * Override the tools the assistant can use for this run. This is useful for modifying * the behavior on a per-run basis. */ - fun tools(tools: List) = apply { this.tools = tools.toMutableList() } + fun tools(tools: Optional>) = tools(tools.orElse(null)) /** * Override the tools the assistant can use for this run. This is useful for modifying @@ -557,16 +703,42 @@ constructor( * * We generally recommend altering this or temperature but not both. */ - fun topP(topP: Double) = apply { this.topP = topP } + fun topP(topP: Double?) = apply { this.topP = topP } + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only + * the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + fun topP(topP: Double) = topP(topP as Double?) + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only + * the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun topP(topP: Optional) = topP(topP.orElse(null) as Double?) /** * Controls for how a thread will be truncated prior to the run. Use this to control the * intial context window of the run. */ - fun truncationStrategy(truncationStrategy: TruncationStrategy) = apply { + fun truncationStrategy(truncationStrategy: TruncationStrategy?) = apply { this.truncationStrategy = truncationStrategy } + /** + * Controls for how a thread will be truncated prior to the run. Use this to control the + * intial context window of the run. + */ + fun truncationStrategy(truncationStrategy: Optional) = + truncationStrategy(truncationStrategy.orElse(null)) + fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() putAllAdditionalProperties(additionalProperties) @@ -656,7 +828,13 @@ constructor( * Override the default system message of the assistant. This is useful for modifying the * behavior on a per-run basis. */ - fun instructions(instructions: String) = apply { body.instructions(instructions) } + fun instructions(instructions: String?) = apply { body.instructions(instructions) } + + /** + * Override the default system message of the assistant. This is useful for modifying the + * behavior on a per-run basis. + */ + fun instructions(instructions: Optional) = instructions(instructions.orElse(null)) /** * The maximum number of completion tokens that may be used over the course of the run. The @@ -664,31 +842,84 @@ constructor( * multiple turns of the run. If the run exceeds the number of completion tokens specified, * the run will end with status `incomplete`. See `incomplete_details` for more info. */ - fun maxCompletionTokens(maxCompletionTokens: Long) = apply { + fun maxCompletionTokens(maxCompletionTokens: Long?) = apply { body.maxCompletionTokens(maxCompletionTokens) } + /** + * The maximum number of completion tokens that may be used over the course of the run. The + * run will make a best effort to use only the number of completion tokens specified, across + * multiple turns of the run. If the run exceeds the number of completion tokens specified, + * the run will end with status `incomplete`. See `incomplete_details` for more info. + */ + fun maxCompletionTokens(maxCompletionTokens: Long) = + maxCompletionTokens(maxCompletionTokens as Long?) + + /** + * The maximum number of completion tokens that may be used over the course of the run. The + * run will make a best effort to use only the number of completion tokens specified, across + * multiple turns of the run. If the run exceeds the number of completion tokens specified, + * the run will end with status `incomplete`. See `incomplete_details` for more info. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun maxCompletionTokens(maxCompletionTokens: Optional) = + maxCompletionTokens(maxCompletionTokens.orElse(null) as Long?) + + /** + * The maximum number of prompt tokens that may be used over the course of the run. The run + * will make a best effort to use only the number of prompt tokens specified, across + * multiple turns of the run. If the run exceeds the number of prompt tokens specified, the + * run will end with status `incomplete`. See `incomplete_details` for more info. + */ + fun maxPromptTokens(maxPromptTokens: Long?) = apply { + body.maxPromptTokens(maxPromptTokens) + } + + /** + * The maximum number of prompt tokens that may be used over the course of the run. The run + * will make a best effort to use only the number of prompt tokens specified, across + * multiple turns of the run. If the run exceeds the number of prompt tokens specified, the + * run will end with status `incomplete`. See `incomplete_details` for more info. + */ + fun maxPromptTokens(maxPromptTokens: Long) = maxPromptTokens(maxPromptTokens as Long?) + /** * The maximum number of prompt tokens that may be used over the course of the run. The run * will make a best effort to use only the number of prompt tokens specified, across * multiple turns of the run. If the run exceeds the number of prompt tokens specified, the * run will end with status `incomplete`. See `incomplete_details` for more info. */ - fun maxPromptTokens(maxPromptTokens: Long) = apply { body.maxPromptTokens(maxPromptTokens) } + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun maxPromptTokens(maxPromptTokens: Optional) = + maxPromptTokens(maxPromptTokens.orElse(null) as Long?) /** * Set of 16 key-value pairs that can be attached to an object. This can be useful for * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { body.metadata(metadata) } + fun metadata(metadata: JsonValue?) = apply { body.metadata(metadata) } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) /** * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used * to execute this run. If a value is provided here, it will override the model associated * with the assistant. If not, the model associated with the assistant will be used. */ - fun model(model: ChatModel) = apply { body.model(model) } + fun model(model: ChatModel?) = apply { body.model(model) } + + /** + * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used + * to execute this run. If a value is provided here, it will override the model associated + * with the assistant. If not, the model associated with the assistant will be used. + */ + fun model(model: Optional) = model(model.orElse(null)) /** * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used @@ -702,10 +933,27 @@ constructor( * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) * during tool use. */ - fun parallelToolCalls(parallelToolCalls: Boolean) = apply { + fun parallelToolCalls(parallelToolCalls: Boolean?) = apply { body.parallelToolCalls(parallelToolCalls) } + /** + * Whether to enable + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + * during tool use. + */ + fun parallelToolCalls(parallelToolCalls: Boolean) = + parallelToolCalls(parallelToolCalls as Boolean?) + + /** + * Whether to enable + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + * during tool use. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun parallelToolCalls(parallelToolCalls: Optional) = + parallelToolCalls(parallelToolCalls.orElse(null) as Boolean?) + /** * Specifies the format that the model must output. Compatible with * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 @@ -726,10 +974,33 @@ constructor( * partially cut off if `finish_reason="length"`, which indicates the generation exceeded * `max_tokens` or the conversation exceeded the max context length. */ - fun responseFormat(responseFormat: AssistantResponseFormatOption) = apply { + fun responseFormat(responseFormat: AssistantResponseFormatOption?) = apply { body.responseFormat(responseFormat) } + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 + * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 + * Turbo models since `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs + * which ensures the model will match your supplied JSON schema. Learn more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the + * model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON + * yourself via a system or user message. Without this, the model may generate an unending + * stream of whitespace until the generation reaches the token limit, resulting in a + * long-running and seemingly "stuck" request. Also note that the message content may be + * partially cut off if `finish_reason="length"`, which indicates the generation exceeded + * `max_tokens` or the conversation exceeded the max context length. + */ + fun responseFormat(responseFormat: Optional) = + responseFormat(responseFormat.orElse(null)) + /** `auto` is the default value */ fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) = apply { body.responseFormat(behavior) @@ -752,10 +1023,29 @@ constructor( * output more random, while lower values like 0.2 will make it more focused and * deterministic. */ - fun temperature(temperature: Double) = apply { body.temperature(temperature) } + fun temperature(temperature: Double?) = apply { body.temperature(temperature) } + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the + * output more random, while lower values like 0.2 will make it more focused and + * deterministic. + */ + fun temperature(temperature: Double) = temperature(temperature as Double?) + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the + * output more random, while lower values like 0.2 will make it more focused and + * deterministic. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun temperature(temperature: Optional) = + temperature(temperature.orElse(null) as Double?) + + /** If no thread is provided, an empty thread will be created. */ + fun thread(thread: Thread?) = apply { body.thread(thread) } /** If no thread is provided, an empty thread will be created. */ - fun thread(thread: Thread) = apply { body.thread(thread) } + fun thread(thread: Optional) = thread(thread.orElse(null)) /** * Controls which (if any) tool is called by the model. `none` means the model will not call @@ -765,10 +1055,21 @@ constructor( * particular tool like `{"type": "file_search"}` or `{"type": "function", "function": * {"name": "my_function"}}` forces the model to call that tool. */ - fun toolChoice(toolChoice: AssistantToolChoiceOption) = apply { + fun toolChoice(toolChoice: AssistantToolChoiceOption?) = apply { body.toolChoice(toolChoice) } + /** + * Controls which (if any) tool is called by the model. `none` means the model will not call + * any tools and instead generates a message. `auto` is the default value and means the + * model can pick between generating a message or calling one or more tools. `required` + * means the model must call one or more tools before responding to the user. Specifying a + * particular tool like `{"type": "file_search"}` or `{"type": "function", "function": + * {"name": "my_function"}}` forces the model to call that tool. + */ + fun toolChoice(toolChoice: Optional) = + toolChoice(toolChoice.orElse(null)) + /** * `none` means the model will not call any tools and instead generates a message. `auto` * means the model can pick between generating a message or calling one or more tools. @@ -790,15 +1091,29 @@ constructor( * the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, * while the `file_search` tool requires a list of vector store IDs. */ - fun toolResources(toolResources: ToolResources) = apply { + fun toolResources(toolResources: ToolResources?) = apply { body.toolResources(toolResources) } + /** + * A set of resources that are used by the assistant's tools. The resources are specific to + * the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, + * while the `file_search` tool requires a list of vector store IDs. + */ + fun toolResources(toolResources: Optional) = + toolResources(toolResources.orElse(null)) + + /** + * Override the tools the assistant can use for this run. This is useful for modifying the + * behavior on a per-run basis. + */ + fun tools(tools: List?) = apply { body.tools(tools) } + /** * Override the tools the assistant can use for this run. This is useful for modifying the * behavior on a per-run basis. */ - fun tools(tools: List) = apply { body.tools(tools) } + fun tools(tools: Optional>) = tools(tools.orElse(null)) /** * Override the tools the assistant can use for this run. This is useful for modifying the @@ -813,16 +1128,42 @@ constructor( * * We generally recommend altering this or temperature but not both. */ - fun topP(topP: Double) = apply { body.topP(topP) } + fun topP(topP: Double?) = apply { body.topP(topP) } + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the + * tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + fun topP(topP: Double) = topP(topP as Double?) + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the + * tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun topP(topP: Optional) = topP(topP.orElse(null) as Double?) /** * Controls for how a thread will be truncated prior to the run. Use this to control the * intial context window of the run. */ - fun truncationStrategy(truncationStrategy: TruncationStrategy) = apply { + fun truncationStrategy(truncationStrategy: TruncationStrategy?) = apply { body.truncationStrategy(truncationStrategy) } + /** + * Controls for how a thread will be truncated prior to the run. Use this to control the + * intial context window of the run. + */ + fun truncationStrategy(truncationStrategy: Optional) = + truncationStrategy(truncationStrategy.orElse(null)) + fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() putAllAdditionalHeaders(additionalHeaders) @@ -1014,10 +1355,16 @@ constructor( * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to * start the thread with. */ - fun messages(messages: List) = apply { - this.messages = messages.toMutableList() + fun messages(messages: List?) = apply { + this.messages = messages?.toMutableList() } + /** + * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + * start the thread with. + */ + fun messages(messages: Optional>) = messages(messages.orElse(null)) + /** * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to * start the thread with. @@ -1031,7 +1378,14 @@ constructor( * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) /** * A set of resources that are made available to the assistant's tools in this thread. @@ -1039,10 +1393,19 @@ constructor( * tool requires a list of file IDs, while the `file_search` tool requires a list of * vector store IDs. */ - fun toolResources(toolResources: ToolResources) = apply { + fun toolResources(toolResources: ToolResources?) = apply { this.toolResources = toolResources } + /** + * A set of resources that are made available to the assistant's tools in this thread. + * The resources are specific to the type of tool. For example, the `code_interpreter` + * tool requires a list of file IDs, while the `file_search` tool requires a list of + * vector store IDs. + */ + fun toolResources(toolResources: Optional) = + toolResources(toolResources.orElse(null)) + fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() putAllAdditionalProperties(additionalProperties) @@ -1164,10 +1527,16 @@ constructor( /** * A list of files attached to the message, and the tools they should be added to. */ - fun attachments(attachments: List) = apply { - this.attachments = attachments.toMutableList() + fun attachments(attachments: List?) = apply { + this.attachments = attachments?.toMutableList() } + /** + * A list of files attached to the message, and the tools they should be added to. + */ + fun attachments(attachments: Optional>) = + attachments(attachments.orElse(null)) + /** * A list of files attached to the message, and the tools they should be added to. */ @@ -1181,7 +1550,15 @@ constructor( * can be a maximum of 64 characters long and values can be a maximum of 512 * characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format. Keys + * can be a maximum of 64 characters long and values can be a maximum of 512 + * characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -1443,10 +1820,16 @@ constructor( } /** The ID of the file to attach to the message. */ - fun fileId(fileId: String) = apply { this.fileId = fileId } + fun fileId(fileId: String?) = apply { this.fileId = fileId } + + /** The ID of the file to attach to the message. */ + fun fileId(fileId: Optional) = fileId(fileId.orElse(null)) + + /** The tools to add this file to. */ + fun tools(tools: List?) = apply { this.tools = tools?.toMutableList() } /** The tools to add this file to. */ - fun tools(tools: List) = apply { this.tools = tools.toMutableList() } + fun tools(tools: Optional>) = tools(tools.orElse(null)) /** The tools to add this file to. */ fun addTool(tool: Tool) = apply { @@ -1821,11 +2204,17 @@ constructor( additionalProperties = toolResources.additionalProperties.toMutableMap() } - fun codeInterpreter(codeInterpreter: CodeInterpreter) = apply { + fun codeInterpreter(codeInterpreter: CodeInterpreter?) = apply { this.codeInterpreter = codeInterpreter } - fun fileSearch(fileSearch: FileSearch) = apply { this.fileSearch = fileSearch } + fun codeInterpreter(codeInterpreter: Optional) = + codeInterpreter(codeInterpreter.orElse(null)) + + fun fileSearch(fileSearch: FileSearch?) = apply { this.fileSearch = fileSearch } + + fun fileSearch(fileSearch: Optional) = + fileSearch(fileSearch.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -1901,10 +2290,17 @@ constructor( * made available to the `code_interpreter` tool. There can be a maximum of 20 * files associated with the tool. */ - fun fileIds(fileIds: List) = apply { - this.fileIds = fileIds.toMutableList() + fun fileIds(fileIds: List?) = apply { + this.fileIds = fileIds?.toMutableList() } + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs + * made available to the `code_interpreter` tool. There can be a maximum of 20 + * files associated with the tool. + */ + fun fileIds(fileIds: Optional>) = fileIds(fileIds.orElse(null)) + /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs * made available to the `code_interpreter` tool. There can be a maximum of 20 @@ -2016,10 +2412,19 @@ constructor( * attached to this thread. There can be a maximum of 1 vector store attached to * the thread. */ - fun vectorStoreIds(vectorStoreIds: List) = apply { - this.vectorStoreIds = vectorStoreIds.toMutableList() + fun vectorStoreIds(vectorStoreIds: List?) = apply { + this.vectorStoreIds = vectorStoreIds?.toMutableList() } + /** + * The + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this thread. There can be a maximum of 1 vector store attached to + * the thread. + */ + fun vectorStoreIds(vectorStoreIds: Optional>) = + vectorStoreIds(vectorStoreIds.orElse(null)) + /** * The * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) @@ -2037,10 +2442,19 @@ constructor( * with file_ids and attach it to this thread. There can be a maximum of 1 * vector store attached to the thread. */ - fun vectorStores(vectorStores: List) = apply { - this.vectorStores = vectorStores.toMutableList() + fun vectorStores(vectorStores: List?) = apply { + this.vectorStores = vectorStores?.toMutableList() } + /** + * A helper to create a + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * with file_ids and attach it to this thread. There can be a maximum of 1 + * vector store attached to the thread. + */ + fun vectorStores(vectorStores: Optional>) = + vectorStores(vectorStores.orElse(null)) + /** * A helper to create a * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) @@ -2149,10 +2563,18 @@ constructor( * The chunking strategy used to chunk the file(s). If not set, will use the * `auto` strategy. Only applicable if `file_ids` is non-empty. */ - fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam) = apply { + fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam?) = apply { this.chunkingStrategy = chunkingStrategy } + /** + * The chunking strategy used to chunk the file(s). If not set, will use the + * `auto` strategy. Only applicable if `file_ids` is non-empty. + */ + fun chunkingStrategy( + chunkingStrategy: Optional + ) = chunkingStrategy(chunkingStrategy.orElse(null)) + /** * The default strategy. This strategy currently uses a * `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`. @@ -2180,10 +2602,17 @@ constructor( * IDs to add to the vector store. There can be a maximum of 10000 files in * a vector store. */ - fun fileIds(fileIds: List) = apply { - this.fileIds = fileIds.toMutableList() + fun fileIds(fileIds: List?) = apply { + this.fileIds = fileIds?.toMutableList() } + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) + * IDs to add to the vector store. There can be a maximum of 10000 files in + * a vector store. + */ + fun fileIds(fileIds: Optional>) = fileIds(fileIds.orElse(null)) + /** * A list of [file](https://platform.openai.com/docs/api-reference/files) * IDs to add to the vector store. There can be a maximum of 10000 files in @@ -2199,7 +2628,16 @@ constructor( * in a structured format. Keys can be a maximum of 64 characters long and * values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } + + /** + * Set of 16 key-value pairs that can be attached to a vector store. This + * can be useful for storing additional information about the vector store + * in a structured format. Keys can be a maximum of 64 characters long and + * values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = + metadata(metadata.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { @@ -2349,11 +2787,16 @@ constructor( additionalProperties = toolResources.additionalProperties.toMutableMap() } - fun codeInterpreter(codeInterpreter: CodeInterpreter) = apply { + fun codeInterpreter(codeInterpreter: CodeInterpreter?) = apply { this.codeInterpreter = codeInterpreter } - fun fileSearch(fileSearch: FileSearch) = apply { this.fileSearch = fileSearch } + fun codeInterpreter(codeInterpreter: Optional) = + codeInterpreter(codeInterpreter.orElse(null)) + + fun fileSearch(fileSearch: FileSearch?) = apply { this.fileSearch = fileSearch } + + fun fileSearch(fileSearch: Optional) = fileSearch(fileSearch.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -2426,10 +2869,17 @@ constructor( * available to the `code_interpreter` tool. There can be a maximum of 20 files * associated with the tool. */ - fun fileIds(fileIds: List) = apply { - this.fileIds = fileIds.toMutableList() + fun fileIds(fileIds: List?) = apply { + this.fileIds = fileIds?.toMutableList() } + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + * available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. + */ + fun fileIds(fileIds: Optional>) = fileIds(fileIds.orElse(null)) + /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made * available to the `code_interpreter` tool. There can be a maximum of 20 files @@ -2529,10 +2979,19 @@ constructor( * attached to this assistant. There can be a maximum of 1 vector store attached to * the assistant. */ - fun vectorStoreIds(vectorStoreIds: List) = apply { - this.vectorStoreIds = vectorStoreIds.toMutableList() + fun vectorStoreIds(vectorStoreIds: List?) = apply { + this.vectorStoreIds = vectorStoreIds?.toMutableList() } + /** + * The ID of the + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this assistant. There can be a maximum of 1 vector store attached to + * the assistant. + */ + fun vectorStoreIds(vectorStoreIds: Optional>) = + vectorStoreIds(vectorStoreIds.orElse(null)) + /** * The ID of the * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) @@ -2796,7 +3255,21 @@ constructor( * The number of most recent messages from the thread when constructing the context for * the run. */ - fun lastMessages(lastMessages: Long) = apply { this.lastMessages = lastMessages } + fun lastMessages(lastMessages: Long?) = apply { this.lastMessages = lastMessages } + + /** + * The number of most recent messages from the thread when constructing the context for + * the run. + */ + fun lastMessages(lastMessages: Long) = lastMessages(lastMessages as Long?) + + /** + * The number of most recent messages from the thread when constructing the context for + * the run. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun lastMessages(lastMessages: Optional) = + lastMessages(lastMessages.orElse(null) as Long?) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadCreateParams.kt index 4104f8c36..7f2fad0c7 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadCreateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadCreateParams.kt @@ -134,10 +134,16 @@ constructor( * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to * start the thread with. */ - fun messages(messages: List) = apply { - this.messages = messages.toMutableList() + fun messages(messages: List?) = apply { + this.messages = messages?.toMutableList() } + /** + * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to + * start the thread with. + */ + fun messages(messages: Optional>) = messages(messages.orElse(null)) + /** * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to * start the thread with. @@ -151,7 +157,14 @@ constructor( * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) /** * A set of resources that are made available to the assistant's tools in this thread. @@ -159,10 +172,19 @@ constructor( * tool requires a list of file IDs, while the `file_search` tool requires a list of * vector store IDs. */ - fun toolResources(toolResources: ToolResources) = apply { + fun toolResources(toolResources: ToolResources?) = apply { this.toolResources = toolResources } + /** + * A set of resources that are made available to the assistant's tools in this thread. + * The resources are specific to the type of tool. For example, the `code_interpreter` + * tool requires a list of file IDs, while the `file_search` tool requires a list of + * vector store IDs. + */ + fun toolResources(toolResources: Optional) = + toolResources(toolResources.orElse(null)) + fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() putAllAdditionalProperties(additionalProperties) @@ -234,7 +256,13 @@ constructor( * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start * the thread with. */ - fun messages(messages: List) = apply { body.messages(messages) } + fun messages(messages: List?) = apply { body.messages(messages) } + + /** + * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start + * the thread with. + */ + fun messages(messages: Optional>) = messages(messages.orElse(null)) /** * A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start @@ -247,7 +275,14 @@ constructor( * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { body.metadata(metadata) } + fun metadata(metadata: JsonValue?) = apply { body.metadata(metadata) } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) /** * A set of resources that are made available to the assistant's tools in this thread. The @@ -255,10 +290,19 @@ constructor( * requires a list of file IDs, while the `file_search` tool requires a list of vector store * IDs. */ - fun toolResources(toolResources: ToolResources) = apply { + fun toolResources(toolResources: ToolResources?) = apply { body.toolResources(toolResources) } + /** + * A set of resources that are made available to the assistant's tools in this thread. The + * resources are specific to the type of tool. For example, the `code_interpreter` tool + * requires a list of file IDs, while the `file_search` tool requires a list of vector store + * IDs. + */ + fun toolResources(toolResources: Optional) = + toolResources(toolResources.orElse(null)) + fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() putAllAdditionalHeaders(additionalHeaders) @@ -476,10 +520,14 @@ constructor( fun role(role: Role) = apply { this.role = role } /** A list of files attached to the message, and the tools they should be added to. */ - fun attachments(attachments: List) = apply { - this.attachments = attachments.toMutableList() + fun attachments(attachments: List?) = apply { + this.attachments = attachments?.toMutableList() } + /** A list of files attached to the message, and the tools they should be added to. */ + fun attachments(attachments: Optional>) = + attachments(attachments.orElse(null)) + /** A list of files attached to the message, and the tools they should be added to. */ fun addAttachment(attachment: Attachment) = apply { attachments = (attachments ?: mutableListOf()).apply { add(attachment) } @@ -490,7 +538,14 @@ constructor( * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -744,10 +799,16 @@ constructor( } /** The ID of the file to attach to the message. */ - fun fileId(fileId: String) = apply { this.fileId = fileId } + fun fileId(fileId: String?) = apply { this.fileId = fileId } + + /** The ID of the file to attach to the message. */ + fun fileId(fileId: Optional) = fileId(fileId.orElse(null)) + + /** The tools to add this file to. */ + fun tools(tools: List?) = apply { this.tools = tools?.toMutableList() } /** The tools to add this file to. */ - fun tools(tools: List) = apply { this.tools = tools.toMutableList() } + fun tools(tools: Optional>) = tools(tools.orElse(null)) /** The tools to add this file to. */ fun addTool(tool: Tool) = apply { @@ -1117,11 +1178,16 @@ constructor( additionalProperties = toolResources.additionalProperties.toMutableMap() } - fun codeInterpreter(codeInterpreter: CodeInterpreter) = apply { + fun codeInterpreter(codeInterpreter: CodeInterpreter?) = apply { this.codeInterpreter = codeInterpreter } - fun fileSearch(fileSearch: FileSearch) = apply { this.fileSearch = fileSearch } + fun codeInterpreter(codeInterpreter: Optional) = + codeInterpreter(codeInterpreter.orElse(null)) + + fun fileSearch(fileSearch: FileSearch?) = apply { this.fileSearch = fileSearch } + + fun fileSearch(fileSearch: Optional) = fileSearch(fileSearch.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -1194,10 +1260,17 @@ constructor( * available to the `code_interpreter` tool. There can be a maximum of 20 files * associated with the tool. */ - fun fileIds(fileIds: List) = apply { - this.fileIds = fileIds.toMutableList() + fun fileIds(fileIds: List?) = apply { + this.fileIds = fileIds?.toMutableList() } + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + * available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. + */ + fun fileIds(fileIds: Optional>) = fileIds(fileIds.orElse(null)) + /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made * available to the `code_interpreter` tool. There can be a maximum of 20 files @@ -1309,10 +1382,19 @@ constructor( * attached to this thread. There can be a maximum of 1 vector store attached to the * thread. */ - fun vectorStoreIds(vectorStoreIds: List) = apply { - this.vectorStoreIds = vectorStoreIds.toMutableList() + fun vectorStoreIds(vectorStoreIds: List?) = apply { + this.vectorStoreIds = vectorStoreIds?.toMutableList() } + /** + * The + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this thread. There can be a maximum of 1 vector store attached to the + * thread. + */ + fun vectorStoreIds(vectorStoreIds: Optional>) = + vectorStoreIds(vectorStoreIds.orElse(null)) + /** * The * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) @@ -1330,10 +1412,19 @@ constructor( * with file_ids and attach it to this thread. There can be a maximum of 1 vector * store attached to the thread. */ - fun vectorStores(vectorStores: List) = apply { - this.vectorStores = vectorStores.toMutableList() + fun vectorStores(vectorStores: List?) = apply { + this.vectorStores = vectorStores?.toMutableList() } + /** + * A helper to create a + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * with file_ids and attach it to this thread. There can be a maximum of 1 vector + * store attached to the thread. + */ + fun vectorStores(vectorStores: Optional>) = + vectorStores(vectorStores.orElse(null)) + /** * A helper to create a * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) @@ -1440,10 +1531,17 @@ constructor( * The chunking strategy used to chunk the file(s). If not set, will use the * `auto` strategy. Only applicable if `file_ids` is non-empty. */ - fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam) = apply { + fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam?) = apply { this.chunkingStrategy = chunkingStrategy } + /** + * The chunking strategy used to chunk the file(s). If not set, will use the + * `auto` strategy. Only applicable if `file_ids` is non-empty. + */ + fun chunkingStrategy(chunkingStrategy: Optional) = + chunkingStrategy(chunkingStrategy.orElse(null)) + /** * The default strategy. This strategy currently uses a `max_chunk_size_tokens` * of `800` and `chunk_overlap_tokens` of `400`. @@ -1471,10 +1569,17 @@ constructor( * add to the vector store. There can be a maximum of 10000 files in a vector * store. */ - fun fileIds(fileIds: List) = apply { - this.fileIds = fileIds.toMutableList() + fun fileIds(fileIds: List?) = apply { + this.fileIds = fileIds?.toMutableList() } + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to + * add to the vector store. There can be a maximum of 10000 files in a vector + * store. + */ + fun fileIds(fileIds: Optional>) = fileIds(fileIds.orElse(null)) + /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to * add to the vector store. There can be a maximum of 10000 files in a vector @@ -1490,7 +1595,15 @@ constructor( * structured format. Keys can be a maximum of 64 characters long and values can * be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } + + /** + * Set of 16 key-value pairs that can be attached to a vector store. This can be + * useful for storing additional information about the vector store in a + * structured format. Keys can be a maximum of 64 characters long and values can + * be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadMessageCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadMessageCreateParams.kt index 358eeb2a8..a87b825dc 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadMessageCreateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadMessageCreateParams.kt @@ -174,10 +174,14 @@ constructor( fun role(role: Role) = apply { this.role = role } /** A list of files attached to the message, and the tools they should be added to. */ - fun attachments(attachments: List) = apply { - this.attachments = attachments.toMutableList() + fun attachments(attachments: List?) = apply { + this.attachments = attachments?.toMutableList() } + /** A list of files attached to the message, and the tools they should be added to. */ + fun attachments(attachments: Optional>) = + attachments(attachments.orElse(null)) + /** A list of files attached to the message, and the tools they should be added to. */ fun addAttachment(attachment: Attachment) = apply { attachments = (attachments ?: mutableListOf()).apply { add(attachment) } @@ -188,7 +192,14 @@ constructor( * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -289,7 +300,11 @@ constructor( fun role(role: Role) = apply { body.role(role) } /** A list of files attached to the message, and the tools they should be added to. */ - fun attachments(attachments: List) = apply { body.attachments(attachments) } + fun attachments(attachments: List?) = apply { body.attachments(attachments) } + + /** A list of files attached to the message, and the tools they should be added to. */ + fun attachments(attachments: Optional>) = + attachments(attachments.orElse(null)) /** A list of files attached to the message, and the tools they should be added to. */ fun addAttachment(attachment: Attachment) = apply { body.addAttachment(attachment) } @@ -299,7 +314,14 @@ constructor( * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { body.metadata(metadata) } + fun metadata(metadata: JsonValue?) = apply { body.metadata(metadata) } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() @@ -647,10 +669,16 @@ constructor( } /** The ID of the file to attach to the message. */ - fun fileId(fileId: String) = apply { this.fileId = fileId } + fun fileId(fileId: String?) = apply { this.fileId = fileId } + + /** The ID of the file to attach to the message. */ + fun fileId(fileId: Optional) = fileId(fileId.orElse(null)) + + /** The tools to add this file to. */ + fun tools(tools: List?) = apply { this.tools = tools?.toMutableList() } /** The tools to add this file to. */ - fun tools(tools: List) = apply { this.tools = tools.toMutableList() } + fun tools(tools: Optional>) = tools(tools.orElse(null)) /** The tools to add this file to. */ fun addTool(tool: Tool) = apply { diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadMessageListParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadMessageListParams.kt index 941b88e4a..10f770633 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadMessageListParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadMessageListParams.kt @@ -119,7 +119,23 @@ constructor( * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page * of the list. */ - fun after(after: String) = apply { this.after = after } + fun after(after: String?) = apply { this.after = after } + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the + * list. For instance, if you make a list request and receive 100 objects, ending with + * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page + * of the list. + */ + fun after(after: Optional) = after(after.orElse(null)) + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the + * list. For instance, if you make a list request and receive 100 objects, starting with + * obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous + * page of the list. + */ + fun before(before: String?) = apply { this.before = before } /** * A cursor for use in pagination. `before` is an object ID that defines your place in the @@ -127,22 +143,44 @@ constructor( * obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous * page of the list. */ - fun before(before: String) = apply { this.before = before } + fun before(before: Optional) = before(before.orElse(null)) /** * A limit on the number of objects to be returned. Limit can range between 1 and 100, and * the default is 20. */ - fun limit(limit: Long) = apply { this.limit = limit } + fun limit(limit: Long?) = apply { this.limit = limit } + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and + * the default is 20. + */ + fun limit(limit: Long) = limit(limit as Long?) + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and + * the default is 20. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun limit(limit: Optional) = limit(limit.orElse(null) as Long?) /** * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and * `desc` for descending order. */ - fun order(order: Order) = apply { this.order = order } + fun order(order: Order?) = apply { this.order = order } + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and + * `desc` for descending order. + */ + fun order(order: Optional) = order(order.orElse(null)) + + /** Filter messages by the run ID that generated them. */ + fun runId(runId: String?) = apply { this.runId = runId } /** Filter messages by the run ID that generated them. */ - fun runId(runId: String) = apply { this.runId = runId } + fun runId(runId: Optional) = runId(runId.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadMessageUpdateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadMessageUpdateParams.kt index d14346d17..df9c2c0fa 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadMessageUpdateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadMessageUpdateParams.kt @@ -101,7 +101,14 @@ constructor( * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -179,7 +186,14 @@ constructor( * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { body.metadata(metadata) } + fun metadata(metadata: JsonValue?) = apply { body.metadata(metadata) } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunCreateParams.kt index 90742d1dc..8a7c48a04 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunCreateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunCreateParams.kt @@ -415,15 +415,27 @@ constructor( * useful for modifying the behavior on a per-run basis without overriding other * instructions. */ - fun additionalInstructions(additionalInstructions: String) = apply { + fun additionalInstructions(additionalInstructions: String?) = apply { this.additionalInstructions = additionalInstructions } + /** + * Appends additional instructions at the end of the instructions for the run. This is + * useful for modifying the behavior on a per-run basis without overriding other + * instructions. + */ + fun additionalInstructions(additionalInstructions: Optional) = + additionalInstructions(additionalInstructions.orElse(null)) + /** Adds additional messages to the thread before creating the run. */ - fun additionalMessages(additionalMessages: List) = apply { - this.additionalMessages = additionalMessages.toMutableList() + fun additionalMessages(additionalMessages: List?) = apply { + this.additionalMessages = additionalMessages?.toMutableList() } + /** Adds additional messages to the thread before creating the run. */ + fun additionalMessages(additionalMessages: Optional>) = + additionalMessages(additionalMessages.orElse(null)) + /** Adds additional messages to the thread before creating the run. */ fun addAdditionalMessage(additionalMessage: AdditionalMessage) = apply { additionalMessages = @@ -435,7 +447,15 @@ constructor( * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) * of the assistant. This is useful for modifying the behavior on a per-run basis. */ - fun instructions(instructions: String) = apply { this.instructions = instructions } + fun instructions(instructions: String?) = apply { this.instructions = instructions } + + /** + * Overrides the + * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + * of the assistant. This is useful for modifying the behavior on a per-run basis. + */ + fun instructions(instructions: Optional) = + instructions(instructions.orElse(null)) /** * The maximum number of completion tokens that may be used over the course of the run. @@ -444,26 +464,72 @@ constructor( * completion tokens specified, the run will end with status `incomplete`. See * `incomplete_details` for more info. */ - fun maxCompletionTokens(maxCompletionTokens: Long) = apply { + fun maxCompletionTokens(maxCompletionTokens: Long?) = apply { this.maxCompletionTokens = maxCompletionTokens } + /** + * The maximum number of completion tokens that may be used over the course of the run. + * The run will make a best effort to use only the number of completion tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * completion tokens specified, the run will end with status `incomplete`. See + * `incomplete_details` for more info. + */ + fun maxCompletionTokens(maxCompletionTokens: Long) = + maxCompletionTokens(maxCompletionTokens as Long?) + + /** + * The maximum number of completion tokens that may be used over the course of the run. + * The run will make a best effort to use only the number of completion tokens + * specified, across multiple turns of the run. If the run exceeds the number of + * completion tokens specified, the run will end with status `incomplete`. See + * `incomplete_details` for more info. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun maxCompletionTokens(maxCompletionTokens: Optional) = + maxCompletionTokens(maxCompletionTokens.orElse(null) as Long?) + /** * The maximum number of prompt tokens that may be used over the course of the run. The * run will make a best effort to use only the number of prompt tokens specified, across * multiple turns of the run. If the run exceeds the number of prompt tokens specified, * the run will end with status `incomplete`. See `incomplete_details` for more info. */ - fun maxPromptTokens(maxPromptTokens: Long) = apply { + fun maxPromptTokens(maxPromptTokens: Long?) = apply { this.maxPromptTokens = maxPromptTokens } + /** + * The maximum number of prompt tokens that may be used over the course of the run. The + * run will make a best effort to use only the number of prompt tokens specified, across + * multiple turns of the run. If the run exceeds the number of prompt tokens specified, + * the run will end with status `incomplete`. See `incomplete_details` for more info. + */ + fun maxPromptTokens(maxPromptTokens: Long) = maxPromptTokens(maxPromptTokens as Long?) + + /** + * The maximum number of prompt tokens that may be used over the course of the run. The + * run will make a best effort to use only the number of prompt tokens specified, across + * multiple turns of the run. If the run exceeds the number of prompt tokens specified, + * the run will end with status `incomplete`. See `incomplete_details` for more info. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun maxPromptTokens(maxPromptTokens: Optional) = + maxPromptTokens(maxPromptTokens.orElse(null) as Long?) + /** * Set of 16 key-value pairs that can be attached to an object. This can be useful for * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) /** * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be @@ -471,7 +537,15 @@ constructor( * associated with the assistant. If not, the model associated with the assistant will * be used. */ - fun model(model: ChatModel) = apply { this.model = model } + fun model(model: ChatModel?) = apply { this.model = model } + + /** + * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be + * used to execute this run. If a value is provided here, it will override the model + * associated with the assistant. If not, the model associated with the assistant will + * be used. + */ + fun model(model: Optional) = model(model.orElse(null)) /** * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be @@ -486,10 +560,27 @@ constructor( * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) * during tool use. */ - fun parallelToolCalls(parallelToolCalls: Boolean) = apply { + fun parallelToolCalls(parallelToolCalls: Boolean?) = apply { this.parallelToolCalls = parallelToolCalls } + /** + * Whether to enable + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + * during tool use. + */ + fun parallelToolCalls(parallelToolCalls: Boolean) = + parallelToolCalls(parallelToolCalls as Boolean?) + + /** + * Whether to enable + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + * during tool use. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun parallelToolCalls(parallelToolCalls: Optional) = + parallelToolCalls(parallelToolCalls.orElse(null) as Boolean?) + /** * Specifies the format that the model must output. Compatible with * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 @@ -511,10 +602,34 @@ constructor( * may be partially cut off if `finish_reason="length"`, which indicates the generation * exceeded `max_tokens` or the conversation exceeded the max context length. */ - fun responseFormat(responseFormat: AssistantResponseFormatOption) = apply { + fun responseFormat(responseFormat: AssistantResponseFormatOption?) = apply { this.responseFormat = responseFormat } + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 + * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all + * GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + * Outputs which ensures the model will match your supplied JSON schema. Learn more in + * the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message + * the model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to produce + * JSON yourself via a system or user message. Without this, the model may generate an + * unending stream of whitespace until the generation reaches the token limit, resulting + * in a long-running and seemingly "stuck" request. Also note that the message content + * may be partially cut off if `finish_reason="length"`, which indicates the generation + * exceeded `max_tokens` or the conversation exceeded the max context length. + */ + fun responseFormat(responseFormat: Optional) = + responseFormat(responseFormat.orElse(null)) + /** `auto` is the default value */ fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) = apply { this.responseFormat = AssistantResponseFormatOption.ofBehavior(behavior) @@ -544,7 +659,23 @@ constructor( * the output more random, while lower values like 0.2 will make it more focused and * deterministic. */ - fun temperature(temperature: Double) = apply { this.temperature = temperature } + fun temperature(temperature: Double?) = apply { this.temperature = temperature } + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make + * the output more random, while lower values like 0.2 will make it more focused and + * deterministic. + */ + fun temperature(temperature: Double) = temperature(temperature as Double?) + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make + * the output more random, while lower values like 0.2 will make it more focused and + * deterministic. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun temperature(temperature: Optional) = + temperature(temperature.orElse(null) as Double?) /** * Controls which (if any) tool is called by the model. `none` means the model will not @@ -554,10 +685,21 @@ constructor( * Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", * "function": {"name": "my_function"}}` forces the model to call that tool. */ - fun toolChoice(toolChoice: AssistantToolChoiceOption) = apply { + fun toolChoice(toolChoice: AssistantToolChoiceOption?) = apply { this.toolChoice = toolChoice } + /** + * Controls which (if any) tool is called by the model. `none` means the model will not + * call any tools and instead generates a message. `auto` is the default value and means + * the model can pick between generating a message or calling one or more tools. + * `required` means the model must call one or more tools before responding to the user. + * Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", + * "function": {"name": "my_function"}}` forces the model to call that tool. + */ + fun toolChoice(toolChoice: Optional) = + toolChoice(toolChoice.orElse(null)) + /** * `none` means the model will not call any tools and instead generates a message. * `auto` means the model can pick between generating a message or calling one or more @@ -581,7 +723,13 @@ constructor( * Override the tools the assistant can use for this run. This is useful for modifying * the behavior on a per-run basis. */ - fun tools(tools: List) = apply { this.tools = tools.toMutableList() } + fun tools(tools: List?) = apply { this.tools = tools?.toMutableList() } + + /** + * Override the tools the assistant can use for this run. This is useful for modifying + * the behavior on a per-run basis. + */ + fun tools(tools: Optional>) = tools(tools.orElse(null)) /** * Override the tools the assistant can use for this run. This is useful for modifying @@ -598,16 +746,42 @@ constructor( * * We generally recommend altering this or temperature but not both. */ - fun topP(topP: Double) = apply { this.topP = topP } + fun topP(topP: Double?) = apply { this.topP = topP } + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only + * the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + fun topP(topP: Double) = topP(topP as Double?) + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only + * the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun topP(topP: Optional) = topP(topP.orElse(null) as Double?) /** * Controls for how a thread will be truncated prior to the run. Use this to control the * intial context window of the run. */ - fun truncationStrategy(truncationStrategy: TruncationStrategy) = apply { + fun truncationStrategy(truncationStrategy: TruncationStrategy?) = apply { this.truncationStrategy = truncationStrategy } + /** + * Controls for how a thread will be truncated prior to the run. Use this to control the + * intial context window of the run. + */ + fun truncationStrategy(truncationStrategy: Optional) = + truncationStrategy(truncationStrategy.orElse(null)) + fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() putAllAdditionalProperties(additionalProperties) @@ -702,10 +876,21 @@ constructor( * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ - fun include(include: List) = apply { - this.include = include.toMutableList() + fun include(include: List?) = apply { + this.include = include?.toMutableList() } + /** + * A list of additional fields to include in the response. Currently the only supported + * value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + * search result content. + * + * See the + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + * for more information. + */ + fun include(include: Optional>) = include(include.orElse(null)) + /** * A list of additional fields to include in the response. Currently the only supported * value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file @@ -730,15 +915,27 @@ constructor( * useful for modifying the behavior on a per-run basis without overriding other * instructions. */ - fun additionalInstructions(additionalInstructions: String) = apply { + fun additionalInstructions(additionalInstructions: String?) = apply { body.additionalInstructions(additionalInstructions) } + /** + * Appends additional instructions at the end of the instructions for the run. This is + * useful for modifying the behavior on a per-run basis without overriding other + * instructions. + */ + fun additionalInstructions(additionalInstructions: Optional) = + additionalInstructions(additionalInstructions.orElse(null)) + /** Adds additional messages to the thread before creating the run. */ - fun additionalMessages(additionalMessages: List) = apply { + fun additionalMessages(additionalMessages: List?) = apply { body.additionalMessages(additionalMessages) } + /** Adds additional messages to the thread before creating the run. */ + fun additionalMessages(additionalMessages: Optional>) = + additionalMessages(additionalMessages.orElse(null)) + /** Adds additional messages to the thread before creating the run. */ fun addAdditionalMessage(additionalMessage: AdditionalMessage) = apply { body.addAdditionalMessage(additionalMessage) @@ -749,7 +946,14 @@ constructor( * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) * of the assistant. This is useful for modifying the behavior on a per-run basis. */ - fun instructions(instructions: String) = apply { body.instructions(instructions) } + fun instructions(instructions: String?) = apply { body.instructions(instructions) } + + /** + * Overrides the + * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) + * of the assistant. This is useful for modifying the behavior on a per-run basis. + */ + fun instructions(instructions: Optional) = instructions(instructions.orElse(null)) /** * The maximum number of completion tokens that may be used over the course of the run. The @@ -757,31 +961,84 @@ constructor( * multiple turns of the run. If the run exceeds the number of completion tokens specified, * the run will end with status `incomplete`. See `incomplete_details` for more info. */ - fun maxCompletionTokens(maxCompletionTokens: Long) = apply { + fun maxCompletionTokens(maxCompletionTokens: Long?) = apply { body.maxCompletionTokens(maxCompletionTokens) } + /** + * The maximum number of completion tokens that may be used over the course of the run. The + * run will make a best effort to use only the number of completion tokens specified, across + * multiple turns of the run. If the run exceeds the number of completion tokens specified, + * the run will end with status `incomplete`. See `incomplete_details` for more info. + */ + fun maxCompletionTokens(maxCompletionTokens: Long) = + maxCompletionTokens(maxCompletionTokens as Long?) + + /** + * The maximum number of completion tokens that may be used over the course of the run. The + * run will make a best effort to use only the number of completion tokens specified, across + * multiple turns of the run. If the run exceeds the number of completion tokens specified, + * the run will end with status `incomplete`. See `incomplete_details` for more info. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun maxCompletionTokens(maxCompletionTokens: Optional) = + maxCompletionTokens(maxCompletionTokens.orElse(null) as Long?) + + /** + * The maximum number of prompt tokens that may be used over the course of the run. The run + * will make a best effort to use only the number of prompt tokens specified, across + * multiple turns of the run. If the run exceeds the number of prompt tokens specified, the + * run will end with status `incomplete`. See `incomplete_details` for more info. + */ + fun maxPromptTokens(maxPromptTokens: Long?) = apply { + body.maxPromptTokens(maxPromptTokens) + } + + /** + * The maximum number of prompt tokens that may be used over the course of the run. The run + * will make a best effort to use only the number of prompt tokens specified, across + * multiple turns of the run. If the run exceeds the number of prompt tokens specified, the + * run will end with status `incomplete`. See `incomplete_details` for more info. + */ + fun maxPromptTokens(maxPromptTokens: Long) = maxPromptTokens(maxPromptTokens as Long?) + /** * The maximum number of prompt tokens that may be used over the course of the run. The run * will make a best effort to use only the number of prompt tokens specified, across * multiple turns of the run. If the run exceeds the number of prompt tokens specified, the * run will end with status `incomplete`. See `incomplete_details` for more info. */ - fun maxPromptTokens(maxPromptTokens: Long) = apply { body.maxPromptTokens(maxPromptTokens) } + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun maxPromptTokens(maxPromptTokens: Optional) = + maxPromptTokens(maxPromptTokens.orElse(null) as Long?) /** * Set of 16 key-value pairs that can be attached to an object. This can be useful for * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { body.metadata(metadata) } + fun metadata(metadata: JsonValue?) = apply { body.metadata(metadata) } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) /** * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used * to execute this run. If a value is provided here, it will override the model associated * with the assistant. If not, the model associated with the assistant will be used. */ - fun model(model: ChatModel) = apply { body.model(model) } + fun model(model: ChatModel?) = apply { body.model(model) } + + /** + * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used + * to execute this run. If a value is provided here, it will override the model associated + * with the assistant. If not, the model associated with the assistant will be used. + */ + fun model(model: Optional) = model(model.orElse(null)) /** * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used @@ -795,10 +1052,27 @@ constructor( * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) * during tool use. */ - fun parallelToolCalls(parallelToolCalls: Boolean) = apply { + fun parallelToolCalls(parallelToolCalls: Boolean?) = apply { body.parallelToolCalls(parallelToolCalls) } + /** + * Whether to enable + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + * during tool use. + */ + fun parallelToolCalls(parallelToolCalls: Boolean) = + parallelToolCalls(parallelToolCalls as Boolean?) + + /** + * Whether to enable + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + * during tool use. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun parallelToolCalls(parallelToolCalls: Optional) = + parallelToolCalls(parallelToolCalls.orElse(null) as Boolean?) + /** * Specifies the format that the model must output. Compatible with * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 @@ -819,10 +1093,33 @@ constructor( * partially cut off if `finish_reason="length"`, which indicates the generation exceeded * `max_tokens` or the conversation exceeded the max context length. */ - fun responseFormat(responseFormat: AssistantResponseFormatOption) = apply { + fun responseFormat(responseFormat: AssistantResponseFormatOption?) = apply { body.responseFormat(responseFormat) } + /** + * Specifies the format that the model must output. Compatible with + * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 + * Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 + * Turbo models since `gpt-3.5-turbo-1106`. + * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs + * which ensures the model will match your supplied JSON schema. Learn more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the + * model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON + * yourself via a system or user message. Without this, the model may generate an unending + * stream of whitespace until the generation reaches the token limit, resulting in a + * long-running and seemingly "stuck" request. Also note that the message content may be + * partially cut off if `finish_reason="length"`, which indicates the generation exceeded + * `max_tokens` or the conversation exceeded the max context length. + */ + fun responseFormat(responseFormat: Optional) = + responseFormat(responseFormat.orElse(null)) + /** `auto` is the default value */ fun responseFormat(behavior: AssistantResponseFormatOption.Behavior) = apply { body.responseFormat(behavior) @@ -845,7 +1142,23 @@ constructor( * output more random, while lower values like 0.2 will make it more focused and * deterministic. */ - fun temperature(temperature: Double) = apply { body.temperature(temperature) } + fun temperature(temperature: Double?) = apply { body.temperature(temperature) } + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the + * output more random, while lower values like 0.2 will make it more focused and + * deterministic. + */ + fun temperature(temperature: Double) = temperature(temperature as Double?) + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the + * output more random, while lower values like 0.2 will make it more focused and + * deterministic. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun temperature(temperature: Optional) = + temperature(temperature.orElse(null) as Double?) /** * Controls which (if any) tool is called by the model. `none` means the model will not call @@ -855,10 +1168,21 @@ constructor( * particular tool like `{"type": "file_search"}` or `{"type": "function", "function": * {"name": "my_function"}}` forces the model to call that tool. */ - fun toolChoice(toolChoice: AssistantToolChoiceOption) = apply { + fun toolChoice(toolChoice: AssistantToolChoiceOption?) = apply { body.toolChoice(toolChoice) } + /** + * Controls which (if any) tool is called by the model. `none` means the model will not call + * any tools and instead generates a message. `auto` is the default value and means the + * model can pick between generating a message or calling one or more tools. `required` + * means the model must call one or more tools before responding to the user. Specifying a + * particular tool like `{"type": "file_search"}` or `{"type": "function", "function": + * {"name": "my_function"}}` forces the model to call that tool. + */ + fun toolChoice(toolChoice: Optional) = + toolChoice(toolChoice.orElse(null)) + /** * `none` means the model will not call any tools and instead generates a message. `auto` * means the model can pick between generating a message or calling one or more tools. @@ -879,7 +1203,13 @@ constructor( * Override the tools the assistant can use for this run. This is useful for modifying the * behavior on a per-run basis. */ - fun tools(tools: List) = apply { body.tools(tools) } + fun tools(tools: List?) = apply { body.tools(tools) } + + /** + * Override the tools the assistant can use for this run. This is useful for modifying the + * behavior on a per-run basis. + */ + fun tools(tools: Optional>) = tools(tools.orElse(null)) /** * Override the tools the assistant can use for this run. This is useful for modifying the @@ -894,16 +1224,42 @@ constructor( * * We generally recommend altering this or temperature but not both. */ - fun topP(topP: Double) = apply { body.topP(topP) } + fun topP(topP: Double?) = apply { body.topP(topP) } + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the + * tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + fun topP(topP: Double) = topP(topP as Double?) + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the + * tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or temperature but not both. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun topP(topP: Optional) = topP(topP.orElse(null) as Double?) /** * Controls for how a thread will be truncated prior to the run. Use this to control the * intial context window of the run. */ - fun truncationStrategy(truncationStrategy: TruncationStrategy) = apply { + fun truncationStrategy(truncationStrategy: TruncationStrategy?) = apply { body.truncationStrategy(truncationStrategy) } + /** + * Controls for how a thread will be truncated prior to the run. Use this to control the + * intial context window of the run. + */ + fun truncationStrategy(truncationStrategy: Optional) = + truncationStrategy(truncationStrategy.orElse(null)) + fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() putAllAdditionalHeaders(additionalHeaders) @@ -1123,10 +1479,14 @@ constructor( fun role(role: Role) = apply { this.role = role } /** A list of files attached to the message, and the tools they should be added to. */ - fun attachments(attachments: List) = apply { - this.attachments = attachments.toMutableList() + fun attachments(attachments: List?) = apply { + this.attachments = attachments?.toMutableList() } + /** A list of files attached to the message, and the tools they should be added to. */ + fun attachments(attachments: Optional>) = + attachments(attachments.orElse(null)) + /** A list of files attached to the message, and the tools they should be added to. */ fun addAttachment(attachment: Attachment) = apply { attachments = (attachments ?: mutableListOf()).apply { add(attachment) } @@ -1137,7 +1497,14 @@ constructor( * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -1391,10 +1758,16 @@ constructor( } /** The ID of the file to attach to the message. */ - fun fileId(fileId: String) = apply { this.fileId = fileId } + fun fileId(fileId: String?) = apply { this.fileId = fileId } + + /** The ID of the file to attach to the message. */ + fun fileId(fileId: Optional) = fileId(fileId.orElse(null)) /** The tools to add this file to. */ - fun tools(tools: List) = apply { this.tools = tools.toMutableList() } + fun tools(tools: List?) = apply { this.tools = tools?.toMutableList() } + + /** The tools to add this file to. */ + fun tools(tools: Optional>) = tools(tools.orElse(null)) /** The tools to add this file to. */ fun addTool(tool: Tool) = apply { @@ -1784,7 +2157,21 @@ constructor( * The number of most recent messages from the thread when constructing the context for * the run. */ - fun lastMessages(lastMessages: Long) = apply { this.lastMessages = lastMessages } + fun lastMessages(lastMessages: Long?) = apply { this.lastMessages = lastMessages } + + /** + * The number of most recent messages from the thread when constructing the context for + * the run. + */ + fun lastMessages(lastMessages: Long) = lastMessages(lastMessages as Long?) + + /** + * The number of most recent messages from the thread when constructing the context for + * the run. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun lastMessages(lastMessages: Optional) = + lastMessages(lastMessages.orElse(null) as Long?) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunListParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunListParams.kt index c63196696..86cd72154 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunListParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunListParams.kt @@ -112,7 +112,15 @@ constructor( * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page * of the list. */ - fun after(after: String) = apply { this.after = after } + fun after(after: String?) = apply { this.after = after } + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the + * list. For instance, if you make a list request and receive 100 objects, ending with + * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page + * of the list. + */ + fun after(after: Optional) = after(after.orElse(null)) /** * A cursor for use in pagination. `before` is an object ID that defines your place in the @@ -120,19 +128,46 @@ constructor( * obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous * page of the list. */ - fun before(before: String) = apply { this.before = before } + fun before(before: String?) = apply { this.before = before } + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the + * list. For instance, if you make a list request and receive 100 objects, starting with + * obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous + * page of the list. + */ + fun before(before: Optional) = before(before.orElse(null)) /** * A limit on the number of objects to be returned. Limit can range between 1 and 100, and * the default is 20. */ - fun limit(limit: Long) = apply { this.limit = limit } + fun limit(limit: Long?) = apply { this.limit = limit } + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and + * the default is 20. + */ + fun limit(limit: Long) = limit(limit as Long?) + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and + * the default is 20. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun limit(limit: Optional) = limit(limit.orElse(null) as Long?) + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and + * `desc` for descending order. + */ + fun order(order: Order?) = apply { this.order = order } /** * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and * `desc` for descending order. */ - fun order(order: Order) = apply { this.order = order } + fun order(order: Optional) = order(order.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunStepListParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunStepListParams.kt index 104b3682e..08887c349 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunStepListParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunStepListParams.kt @@ -136,7 +136,23 @@ constructor( * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page * of the list. */ - fun after(after: String) = apply { this.after = after } + fun after(after: String?) = apply { this.after = after } + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the + * list. For instance, if you make a list request and receive 100 objects, ending with + * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page + * of the list. + */ + fun after(after: Optional) = after(after.orElse(null)) + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the + * list. For instance, if you make a list request and receive 100 objects, starting with + * obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous + * page of the list. + */ + fun before(before: String?) = apply { this.before = before } /** * A cursor for use in pagination. `before` is an object ID that defines your place in the @@ -144,7 +160,7 @@ constructor( * obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous * page of the list. */ - fun before(before: String) = apply { this.before = before } + fun before(before: Optional) = before(before.orElse(null)) /** * A list of additional fields to include in the response. Currently the only supported @@ -155,10 +171,21 @@ constructor( * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ - fun include(include: List) = apply { - this.include = include.toMutableList() + fun include(include: List?) = apply { + this.include = include?.toMutableList() } + /** + * A list of additional fields to include in the response. Currently the only supported + * value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + * search result content. + * + * See the + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + * for more information. + */ + fun include(include: Optional>) = include(include.orElse(null)) + /** * A list of additional fields to include in the response. Currently the only supported * value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file @@ -176,13 +203,32 @@ constructor( * A limit on the number of objects to be returned. Limit can range between 1 and 100, and * the default is 20. */ - fun limit(limit: Long) = apply { this.limit = limit } + fun limit(limit: Long?) = apply { this.limit = limit } + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and + * the default is 20. + */ + fun limit(limit: Long) = limit(limit as Long?) + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and + * the default is 20. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun limit(limit: Optional) = limit(limit.orElse(null) as Long?) + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and + * `desc` for descending order. + */ + fun order(order: Order?) = apply { this.order = order } /** * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and * `desc` for descending order. */ - fun order(order: Order) = apply { this.order = order } + fun order(order: Optional) = order(order.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunStepRetrieveParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunStepRetrieveParams.kt index c9854adac..376ccef75 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunStepRetrieveParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunStepRetrieveParams.kt @@ -103,10 +103,21 @@ constructor( * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) * for more information. */ - fun include(include: List) = apply { - this.include = include.toMutableList() + fun include(include: List?) = apply { + this.include = include?.toMutableList() } + /** + * A list of additional fields to include in the response. Currently the only supported + * value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file + * search result content. + * + * See the + * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) + * for more information. + */ + fun include(include: Optional>) = include(include.orElse(null)) + /** * A list of additional fields to include in the response. Currently the only supported * value is `step_details.tool_calls[*].file_search.results[*].content` to fetch the file diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunSubmitToolOutputsParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunSubmitToolOutputsParams.kt index 769bd42cd..1b883f363 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunSubmitToolOutputsParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunSubmitToolOutputsParams.kt @@ -355,13 +355,22 @@ constructor( } /** The output of the tool call to be submitted to continue the run. */ - fun output(output: String) = apply { this.output = output } + fun output(output: String?) = apply { this.output = output } + + /** The output of the tool call to be submitted to continue the run. */ + fun output(output: Optional) = output(output.orElse(null)) + + /** + * The ID of the tool call in the `required_action` object within the run object the + * output is being submitted for. + */ + fun toolCallId(toolCallId: String?) = apply { this.toolCallId = toolCallId } /** * The ID of the tool call in the `required_action` object within the run object the * output is being submitted for. */ - fun toolCallId(toolCallId: String) = apply { this.toolCallId = toolCallId } + fun toolCallId(toolCallId: Optional) = toolCallId(toolCallId.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunUpdateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunUpdateParams.kt index 24f5d23b2..27c7001da 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunUpdateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadRunUpdateParams.kt @@ -100,7 +100,14 @@ constructor( * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -177,7 +184,14 @@ constructor( * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { body.metadata(metadata) } + fun metadata(metadata: JsonValue?) = apply { body.metadata(metadata) } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadUpdateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadUpdateParams.kt index aa030853c..6f61c3c39 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadUpdateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaThreadUpdateParams.kt @@ -115,7 +115,14 @@ constructor( * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) /** * A set of resources that are made available to the assistant's tools in this thread. @@ -123,10 +130,19 @@ constructor( * tool requires a list of file IDs, while the `file_search` tool requires a list of * vector store IDs. */ - fun toolResources(toolResources: ToolResources) = apply { + fun toolResources(toolResources: ToolResources?) = apply { this.toolResources = toolResources } + /** + * A set of resources that are made available to the assistant's tools in this thread. + * The resources are specific to the type of tool. For example, the `code_interpreter` + * tool requires a list of file IDs, while the `file_search` tool requires a list of + * vector store IDs. + */ + fun toolResources(toolResources: Optional) = + toolResources(toolResources.orElse(null)) + fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() putAllAdditionalProperties(additionalProperties) @@ -202,7 +218,14 @@ constructor( * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { body.metadata(metadata) } + fun metadata(metadata: JsonValue?) = apply { body.metadata(metadata) } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) /** * A set of resources that are made available to the assistant's tools in this thread. The @@ -210,10 +233,19 @@ constructor( * requires a list of file IDs, while the `file_search` tool requires a list of vector store * IDs. */ - fun toolResources(toolResources: ToolResources) = apply { + fun toolResources(toolResources: ToolResources?) = apply { body.toolResources(toolResources) } + /** + * A set of resources that are made available to the assistant's tools in this thread. The + * resources are specific to the type of tool. For example, the `code_interpreter` tool + * requires a list of file IDs, while the `file_search` tool requires a list of vector store + * IDs. + */ + fun toolResources(toolResources: Optional) = + toolResources(toolResources.orElse(null)) + fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() putAllAdditionalHeaders(additionalHeaders) @@ -385,11 +417,16 @@ constructor( additionalProperties = toolResources.additionalProperties.toMutableMap() } - fun codeInterpreter(codeInterpreter: CodeInterpreter) = apply { + fun codeInterpreter(codeInterpreter: CodeInterpreter?) = apply { this.codeInterpreter = codeInterpreter } - fun fileSearch(fileSearch: FileSearch) = apply { this.fileSearch = fileSearch } + fun codeInterpreter(codeInterpreter: Optional) = + codeInterpreter(codeInterpreter.orElse(null)) + + fun fileSearch(fileSearch: FileSearch?) = apply { this.fileSearch = fileSearch } + + fun fileSearch(fileSearch: Optional) = fileSearch(fileSearch.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -462,10 +499,17 @@ constructor( * available to the `code_interpreter` tool. There can be a maximum of 20 files * associated with the tool. */ - fun fileIds(fileIds: List) = apply { - this.fileIds = fileIds.toMutableList() + fun fileIds(fileIds: List?) = apply { + this.fileIds = fileIds?.toMutableList() } + /** + * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made + * available to the `code_interpreter` tool. There can be a maximum of 20 files + * associated with the tool. + */ + fun fileIds(fileIds: Optional>) = fileIds(fileIds.orElse(null)) + /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made * available to the `code_interpreter` tool. There can be a maximum of 20 files @@ -565,10 +609,19 @@ constructor( * attached to this thread. There can be a maximum of 1 vector store attached to the * thread. */ - fun vectorStoreIds(vectorStoreIds: List) = apply { - this.vectorStoreIds = vectorStoreIds.toMutableList() + fun vectorStoreIds(vectorStoreIds: List?) = apply { + this.vectorStoreIds = vectorStoreIds?.toMutableList() } + /** + * The + * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) + * attached to this thread. There can be a maximum of 1 vector store attached to the + * thread. + */ + fun vectorStoreIds(vectorStoreIds: Optional>) = + vectorStoreIds(vectorStoreIds.orElse(null)) + /** * The * [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreCreateParams.kt index 4cebe5251..18aae6289 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreCreateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreCreateParams.kt @@ -140,10 +140,17 @@ constructor( * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. */ - fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam) = apply { + fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam?) = apply { this.chunkingStrategy = chunkingStrategy } + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. Only applicable if `file_ids` is non-empty. + */ + fun chunkingStrategy(chunkingStrategy: Optional) = + chunkingStrategy(chunkingStrategy.orElse(null)) + /** * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` * and `chunk_overlap_tokens` of `400`. @@ -165,15 +172,25 @@ constructor( } /** The expiration policy for a vector store. */ - fun expiresAfter(expiresAfter: ExpiresAfter) = apply { + fun expiresAfter(expiresAfter: ExpiresAfter?) = apply { this.expiresAfter = expiresAfter } + /** The expiration policy for a vector store. */ + fun expiresAfter(expiresAfter: Optional) = + expiresAfter(expiresAfter.orElse(null)) + + /** + * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the + * vector store should use. Useful for tools like `file_search` that can access files. + */ + fun fileIds(fileIds: List?) = apply { this.fileIds = fileIds?.toMutableList() } + /** * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the * vector store should use. Useful for tools like `file_search` that can access files. */ - fun fileIds(fileIds: List) = apply { this.fileIds = fileIds.toMutableList() } + fun fileIds(fileIds: Optional>) = fileIds(fileIds.orElse(null)) /** * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the @@ -188,10 +205,20 @@ constructor( * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) + + /** The name of the vector store. */ + fun name(name: String?) = apply { this.name = name } /** The name of the vector store. */ - fun name(name: String) = apply { this.name = name } + fun name(name: Optional) = name(name.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -266,10 +293,17 @@ constructor( * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. */ - fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam) = apply { + fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam?) = apply { body.chunkingStrategy(chunkingStrategy) } + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. Only applicable if `file_ids` is non-empty. + */ + fun chunkingStrategy(chunkingStrategy: Optional) = + chunkingStrategy(chunkingStrategy.orElse(null)) + /** * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and * `chunk_overlap_tokens` of `400`. @@ -284,13 +318,23 @@ constructor( } /** The expiration policy for a vector store. */ - fun expiresAfter(expiresAfter: ExpiresAfter) = apply { body.expiresAfter(expiresAfter) } + fun expiresAfter(expiresAfter: ExpiresAfter?) = apply { body.expiresAfter(expiresAfter) } + + /** The expiration policy for a vector store. */ + fun expiresAfter(expiresAfter: Optional) = + expiresAfter(expiresAfter.orElse(null)) + + /** + * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the + * vector store should use. Useful for tools like `file_search` that can access files. + */ + fun fileIds(fileIds: List?) = apply { body.fileIds(fileIds) } /** * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the * vector store should use. Useful for tools like `file_search` that can access files. */ - fun fileIds(fileIds: List) = apply { body.fileIds(fileIds) } + fun fileIds(fileIds: Optional>) = fileIds(fileIds.orElse(null)) /** * A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the @@ -303,10 +347,20 @@ constructor( * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { body.metadata(metadata) } + fun metadata(metadata: JsonValue?) = apply { body.metadata(metadata) } + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) + + /** The name of the vector store. */ + fun name(name: String?) = apply { body.name(name) } /** The name of the vector store. */ - fun name(name: String) = apply { body.name(name) } + fun name(name: Optional) = name(name.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileBatchCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileBatchCreateParams.kt index 0134b5e9d..5f0c33891 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileBatchCreateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileBatchCreateParams.kt @@ -126,10 +126,17 @@ constructor( * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. */ - fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam) = apply { + fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam?) = apply { this.chunkingStrategy = chunkingStrategy } + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. Only applicable if `file_ids` is non-empty. + */ + fun chunkingStrategy(chunkingStrategy: Optional) = + chunkingStrategy(chunkingStrategy.orElse(null)) + /** * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` * and `chunk_overlap_tokens` of `400`. @@ -240,10 +247,17 @@ constructor( * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. */ - fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam) = apply { + fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam?) = apply { body.chunkingStrategy(chunkingStrategy) } + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. Only applicable if `file_ids` is non-empty. + */ + fun chunkingStrategy(chunkingStrategy: Optional) = + chunkingStrategy(chunkingStrategy.orElse(null)) + /** * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and * `chunk_overlap_tokens` of `400`. diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileBatchListFilesParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileBatchListFilesParams.kt index f7c99e63d..be24c7716 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileBatchListFilesParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileBatchListFilesParams.kt @@ -131,7 +131,23 @@ constructor( * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page * of the list. */ - fun after(after: String) = apply { this.after = after } + fun after(after: String?) = apply { this.after = after } + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the + * list. For instance, if you make a list request and receive 100 objects, ending with + * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page + * of the list. + */ + fun after(after: Optional) = after(after.orElse(null)) + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the + * list. For instance, if you make a list request and receive 100 objects, starting with + * obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous + * page of the list. + */ + fun before(before: String?) = apply { this.before = before } /** * A cursor for use in pagination. `before` is an object ID that defines your place in the @@ -139,22 +155,44 @@ constructor( * obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous * page of the list. */ - fun before(before: String) = apply { this.before = before } + fun before(before: Optional) = before(before.orElse(null)) + + /** Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. */ + fun filter(filter: Filter?) = apply { this.filter = filter } /** Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. */ - fun filter(filter: Filter) = apply { this.filter = filter } + fun filter(filter: Optional) = filter(filter.orElse(null)) /** * A limit on the number of objects to be returned. Limit can range between 1 and 100, and * the default is 20. */ - fun limit(limit: Long) = apply { this.limit = limit } + fun limit(limit: Long?) = apply { this.limit = limit } + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and + * the default is 20. + */ + fun limit(limit: Long) = limit(limit as Long?) + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and + * the default is 20. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun limit(limit: Optional) = limit(limit.orElse(null) as Long?) + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and + * `desc` for descending order. + */ + fun order(order: Order?) = apply { this.order = order } /** * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and * `desc` for descending order. */ - fun order(order: Order) = apply { this.order = order } + fun order(order: Optional) = order(order.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileCreateParams.kt index 8fed88c16..e0021882b 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileCreateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileCreateParams.kt @@ -117,10 +117,17 @@ constructor( * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. */ - fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam) = apply { + fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam?) = apply { this.chunkingStrategy = chunkingStrategy } + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. Only applicable if `file_ids` is non-empty. + */ + fun chunkingStrategy(chunkingStrategy: Optional) = + chunkingStrategy(chunkingStrategy.orElse(null)) + /** * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` * and `chunk_overlap_tokens` of `400`. @@ -224,10 +231,17 @@ constructor( * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. */ - fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam) = apply { + fun chunkingStrategy(chunkingStrategy: FileChunkingStrategyParam?) = apply { body.chunkingStrategy(chunkingStrategy) } + /** + * The chunking strategy used to chunk the file(s). If not set, will use the `auto` + * strategy. Only applicable if `file_ids` is non-empty. + */ + fun chunkingStrategy(chunkingStrategy: Optional) = + chunkingStrategy(chunkingStrategy.orElse(null)) + /** * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and * `chunk_overlap_tokens` of `400`. diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileListParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileListParams.kt index 41f81f6c6..77c1daae1 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileListParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreFileListParams.kt @@ -119,7 +119,23 @@ constructor( * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page * of the list. */ - fun after(after: String) = apply { this.after = after } + fun after(after: String?) = apply { this.after = after } + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the + * list. For instance, if you make a list request and receive 100 objects, ending with + * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page + * of the list. + */ + fun after(after: Optional) = after(after.orElse(null)) + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the + * list. For instance, if you make a list request and receive 100 objects, starting with + * obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous + * page of the list. + */ + fun before(before: String?) = apply { this.before = before } /** * A cursor for use in pagination. `before` is an object ID that defines your place in the @@ -127,22 +143,44 @@ constructor( * obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous * page of the list. */ - fun before(before: String) = apply { this.before = before } + fun before(before: Optional) = before(before.orElse(null)) + + /** Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. */ + fun filter(filter: Filter?) = apply { this.filter = filter } /** Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`. */ - fun filter(filter: Filter) = apply { this.filter = filter } + fun filter(filter: Optional) = filter(filter.orElse(null)) /** * A limit on the number of objects to be returned. Limit can range between 1 and 100, and * the default is 20. */ - fun limit(limit: Long) = apply { this.limit = limit } + fun limit(limit: Long?) = apply { this.limit = limit } + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and + * the default is 20. + */ + fun limit(limit: Long) = limit(limit as Long?) + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and + * the default is 20. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun limit(limit: Optional) = limit(limit.orElse(null) as Long?) + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and + * `desc` for descending order. + */ + fun order(order: Order?) = apply { this.order = order } /** * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and * `desc` for descending order. */ - fun order(order: Order) = apply { this.order = order } + fun order(order: Optional) = order(order.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreListParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreListParams.kt index 1a5c1bd88..5cb67a927 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreListParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreListParams.kt @@ -98,7 +98,15 @@ constructor( * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page * of the list. */ - fun after(after: String) = apply { this.after = after } + fun after(after: String?) = apply { this.after = after } + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the + * list. For instance, if you make a list request and receive 100 objects, ending with + * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page + * of the list. + */ + fun after(after: Optional) = after(after.orElse(null)) /** * A cursor for use in pagination. `before` is an object ID that defines your place in the @@ -106,19 +114,46 @@ constructor( * obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous * page of the list. */ - fun before(before: String) = apply { this.before = before } + fun before(before: String?) = apply { this.before = before } + + /** + * A cursor for use in pagination. `before` is an object ID that defines your place in the + * list. For instance, if you make a list request and receive 100 objects, starting with + * obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous + * page of the list. + */ + fun before(before: Optional) = before(before.orElse(null)) /** * A limit on the number of objects to be returned. Limit can range between 1 and 100, and * the default is 20. */ - fun limit(limit: Long) = apply { this.limit = limit } + fun limit(limit: Long?) = apply { this.limit = limit } + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and + * the default is 20. + */ + fun limit(limit: Long) = limit(limit as Long?) + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 100, and + * the default is 20. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun limit(limit: Optional) = limit(limit.orElse(null) as Long?) + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and + * `desc` for descending order. + */ + fun order(order: Order?) = apply { this.order = order } /** * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and * `desc` for descending order. */ - fun order(order: Order) = apply { this.order = order } + fun order(order: Optional) = order(order.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreUpdateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreUpdateParams.kt index 7e5f30e90..2645c2338 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreUpdateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/BetaVectorStoreUpdateParams.kt @@ -114,19 +114,33 @@ constructor( } /** The expiration policy for a vector store. */ - fun expiresAfter(expiresAfter: ExpiresAfter) = apply { + fun expiresAfter(expiresAfter: ExpiresAfter?) = apply { this.expiresAfter = expiresAfter } + /** The expiration policy for a vector store. */ + fun expiresAfter(expiresAfter: Optional) = + expiresAfter(expiresAfter.orElse(null)) + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: JsonValue?) = apply { this.metadata = metadata } + /** * Set of 16 key-value pairs that can be attached to an object. This can be useful for * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { this.metadata = metadata } + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) + + /** The name of the vector store. */ + fun name(name: String?) = apply { this.name = name } /** The name of the vector store. */ - fun name(name: String) = apply { this.name = name } + fun name(name: Optional) = name(name.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -200,17 +214,31 @@ constructor( fun vectorStoreId(vectorStoreId: String) = apply { this.vectorStoreId = vectorStoreId } /** The expiration policy for a vector store. */ - fun expiresAfter(expiresAfter: ExpiresAfter) = apply { body.expiresAfter(expiresAfter) } + fun expiresAfter(expiresAfter: ExpiresAfter?) = apply { body.expiresAfter(expiresAfter) } + + /** The expiration policy for a vector store. */ + fun expiresAfter(expiresAfter: Optional) = + expiresAfter(expiresAfter.orElse(null)) + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful for + * storing additional information about the object in a structured format. Keys can be a + * maximum of 64 characters long and values can be a maximum of 512 characters long. + */ + fun metadata(metadata: JsonValue?) = apply { body.metadata(metadata) } /** * Set of 16 key-value pairs that can be attached to an object. This can be useful for * storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maximum of 512 characters long. */ - fun metadata(metadata: JsonValue) = apply { body.metadata(metadata) } + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) + + /** The name of the vector store. */ + fun name(name: String?) = apply { body.name(name) } /** The name of the vector store. */ - fun name(name: String) = apply { body.name(name) } + fun name(name: Optional) = name(name.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionCreateParams.kt index e0038a19f..3fb6fe050 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionCreateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/ChatCompletionCreateParams.kt @@ -723,17 +723,40 @@ constructor( * Parameters for audio output. Required when audio output is requested with * `modalities: ["audio"]`. [Learn more](https://platform.openai.com/docs/guides/audio). */ - fun audio(audio: ChatCompletionAudioParam) = apply { this.audio = audio } + fun audio(audio: ChatCompletionAudioParam?) = apply { this.audio = audio } + + /** + * Parameters for audio output. Required when audio output is requested with + * `modalities: ["audio"]`. [Learn more](https://platform.openai.com/docs/guides/audio). + */ + fun audio(audio: Optional) = audio(audio.orElse(null)) /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on their * existing frequency in the text so far, decreasing the model's likelihood to repeat * the same line verbatim. */ - fun frequencyPenalty(frequencyPenalty: Double) = apply { + fun frequencyPenalty(frequencyPenalty: Double?) = apply { this.frequencyPenalty = frequencyPenalty } + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their + * existing frequency in the text so far, decreasing the model's likelihood to repeat + * the same line verbatim. + */ + fun frequencyPenalty(frequencyPenalty: Double) = + frequencyPenalty(frequencyPenalty as Double?) + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their + * existing frequency in the text so far, decreasing the model's likelihood to repeat + * the same line verbatim. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun frequencyPenalty(frequencyPenalty: Optional) = + frequencyPenalty(frequencyPenalty.orElse(null) as Double?) + /** * Deprecated in favor of `tool_choice`. * @@ -749,10 +772,28 @@ constructor( * `none` is the default when no functions are present. `auto` is the default if * functions are present. */ - fun functionCall(functionCall: FunctionCall) = apply { + fun functionCall(functionCall: FunctionCall?) = apply { this.functionCall = functionCall } + /** + * Deprecated in favor of `tool_choice`. + * + * Controls which (if any) function is called by the model. + * + * `none` means the model will not call a function and instead generates a message. + * + * `auto` means the model can pick between generating a message or calling a function. + * + * Specifying a particular function via `{"name": "my_function"}` forces the model to + * call that function. + * + * `none` is the default when no functions are present. `auto` is the default if + * functions are present. + */ + fun functionCall(functionCall: Optional) = + functionCall(functionCall.orElse(null)) + /** * `none` means the model will not call a function and instead generates a message. * `auto` means the model can pick between generating a message or calling a function. @@ -774,10 +815,17 @@ constructor( * * A list of functions the model may generate JSON inputs for. */ - fun functions(functions: List) = apply { - this.functions = functions.toMutableList() + fun functions(functions: List?) = apply { + this.functions = functions?.toMutableList() } + /** + * Deprecated in favor of `tools`. + * + * A list of functions the model may generate JSON inputs for. + */ + fun functions(functions: Optional>) = functions(functions.orElse(null)) + /** * Deprecated in favor of `tools`. * @@ -797,23 +845,76 @@ constructor( * selection; values like -100 or 100 should result in a ban or exclusive selection of * the relevant token. */ - fun logitBias(logitBias: LogitBias) = apply { this.logitBias = logitBias } + fun logitBias(logitBias: LogitBias?) = apply { this.logitBias = logitBias } + + /** + * Modify the likelihood of specified tokens appearing in the completion. + * + * Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) + * to an associated bias value from -100 to 100. Mathematically, the bias is added to + * the logits generated by the model prior to sampling. The exact effect will vary per + * model, but values between -1 and 1 should decrease or increase likelihood of + * selection; values like -100 or 100 should result in a ban or exclusive selection of + * the relevant token. + */ + fun logitBias(logitBias: Optional) = logitBias(logitBias.orElse(null)) /** * Whether to return log probabilities of the output tokens or not. If true, returns the * log probabilities of each output token returned in the `content` of `message`. */ - fun logprobs(logprobs: Boolean) = apply { this.logprobs = logprobs } + fun logprobs(logprobs: Boolean?) = apply { this.logprobs = logprobs } + + /** + * Whether to return log probabilities of the output tokens or not. If true, returns the + * log probabilities of each output token returned in the `content` of `message`. + */ + fun logprobs(logprobs: Boolean) = logprobs(logprobs as Boolean?) + + /** + * Whether to return log probabilities of the output tokens or not. If true, returns the + * log probabilities of each output token returned in the `content` of `message`. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun logprobs(logprobs: Optional) = logprobs(logprobs.orElse(null) as Boolean?) /** * An upper bound for the number of tokens that can be generated for a completion, * including visible output tokens and * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). */ - fun maxCompletionTokens(maxCompletionTokens: Long) = apply { + fun maxCompletionTokens(maxCompletionTokens: Long?) = apply { this.maxCompletionTokens = maxCompletionTokens } + /** + * An upper bound for the number of tokens that can be generated for a completion, + * including visible output tokens and + * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + */ + fun maxCompletionTokens(maxCompletionTokens: Long) = + maxCompletionTokens(maxCompletionTokens as Long?) + + /** + * An upper bound for the number of tokens that can be generated for a completion, + * including visible output tokens and + * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun maxCompletionTokens(maxCompletionTokens: Optional) = + maxCompletionTokens(maxCompletionTokens.orElse(null) as Long?) + + /** + * The maximum number of [tokens](/tokenizer) that can be generated in the chat + * completion. This value can be used to control + * [costs](https://openai.com/api/pricing/) for text generated via API. + * + * This value is now deprecated in favor of `max_completion_tokens`, and is not + * compatible with + * [o1 series models](https://platform.openai.com/docs/guides/reasoning). + */ + fun maxTokens(maxTokens: Long?) = apply { this.maxTokens = maxTokens } + /** * The maximum number of [tokens](/tokenizer) that can be generated in the chat * completion. This value can be used to control @@ -823,13 +924,31 @@ constructor( * compatible with * [o1 series models](https://platform.openai.com/docs/guides/reasoning). */ - fun maxTokens(maxTokens: Long) = apply { this.maxTokens = maxTokens } + fun maxTokens(maxTokens: Long) = maxTokens(maxTokens as Long?) + + /** + * The maximum number of [tokens](/tokenizer) that can be generated in the chat + * completion. This value can be used to control + * [costs](https://openai.com/api/pricing/) for text generated via API. + * + * This value is now deprecated in favor of `max_completion_tokens`, and is not + * compatible with + * [o1 series models](https://platform.openai.com/docs/guides/reasoning). + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun maxTokens(maxTokens: Optional) = maxTokens(maxTokens.orElse(null) as Long?) /** * Developer-defined tags and values used for filtering completions in the * [dashboard](https://platform.openai.com/chat-completions). */ - fun metadata(metadata: Metadata) = apply { this.metadata = metadata } + fun metadata(metadata: Metadata?) = apply { this.metadata = metadata } + + /** + * Developer-defined tags and values used for filtering completions in the + * [dashboard](https://platform.openai.com/chat-completions). + */ + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) /** * Output types that you would like the model to generate for this request. Most models @@ -843,10 +962,25 @@ constructor( * * `["text", "audio"]` */ - fun modalities(modalities: List) = apply { - this.modalities = modalities.toMutableList() + fun modalities(modalities: List?) = apply { + this.modalities = modalities?.toMutableList() } + /** + * Output types that you would like the model to generate for this request. Most models + * are capable of generating text, which is the default: + * + * `["text"]` + * + * The `gpt-4o-audio-preview` model can also be used to + * [generate audio](https://platform.openai.com/docs/guides/audio). To request that this + * model generate both text and audio responses, you can use: + * + * `["text", "audio"]` + */ + fun modalities(modalities: Optional>) = + modalities(modalities.orElse(null)) + /** * Output types that you would like the model to generate for this request. Most models * are capable of generating text, which is the default: @@ -868,34 +1002,90 @@ constructor( * will be charged based on the number of generated tokens across all of the choices. * Keep `n` as `1` to minimize costs. */ - fun n(n: Long) = apply { this.n = n } + fun n(n: Long?) = apply { this.n = n } + + /** + * How many chat completion choices to generate for each input message. Note that you + * will be charged based on the number of generated tokens across all of the choices. + * Keep `n` as `1` to minimize costs. + */ + fun n(n: Long) = n(n as Long?) + + /** + * How many chat completion choices to generate for each input message. Note that you + * will be charged based on the number of generated tokens across all of the choices. + * Keep `n` as `1` to minimize costs. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun n(n: Optional) = n(n.orElse(null) as Long?) /** * Whether to enable * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) * during tool use. */ - fun parallelToolCalls(parallelToolCalls: Boolean) = apply { + fun parallelToolCalls(parallelToolCalls: Boolean?) = apply { this.parallelToolCalls = parallelToolCalls } + /** + * Whether to enable + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + * during tool use. + */ + fun parallelToolCalls(parallelToolCalls: Boolean) = + parallelToolCalls(parallelToolCalls as Boolean?) + + /** + * Whether to enable + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + * during tool use. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun parallelToolCalls(parallelToolCalls: Optional) = + parallelToolCalls(parallelToolCalls.orElse(null) as Boolean?) + /** * Static predicted output content, such as the content of a text file that is being * regenerated. */ - fun prediction(prediction: ChatCompletionPredictionContent) = apply { + fun prediction(prediction: ChatCompletionPredictionContent?) = apply { this.prediction = prediction } + /** + * Static predicted output content, such as the content of a text file that is being + * regenerated. + */ + fun prediction(prediction: Optional) = + prediction(prediction.orElse(null)) + /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether * they appear in the text so far, increasing the model's likelihood to talk about new * topics. */ - fun presencePenalty(presencePenalty: Double) = apply { + fun presencePenalty(presencePenalty: Double?) = apply { this.presencePenalty = presencePenalty } + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether + * they appear in the text so far, increasing the model's likelihood to talk about new + * topics. + */ + fun presencePenalty(presencePenalty: Double) = + presencePenalty(presencePenalty as Double?) + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether + * they appear in the text so far, increasing the model's likelihood to talk about new + * topics. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun presencePenalty(presencePenalty: Optional) = + presencePenalty(presencePenalty.orElse(null) as Double?) + /** * **o1 models only** * @@ -904,10 +1094,21 @@ constructor( * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - fun reasoningEffort(reasoningEffort: ChatCompletionReasoningEffort) = apply { + fun reasoningEffort(reasoningEffort: ChatCompletionReasoningEffort?) = apply { this.reasoningEffort = reasoningEffort } + /** + * **o1 models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + fun reasoningEffort(reasoningEffort: Optional) = + reasoningEffort(reasoningEffort.orElse(null)) + /** * An object specifying the format that the model must output. * @@ -926,10 +1127,31 @@ constructor( * may be partially cut off if `finish_reason="length"`, which indicates the generation * exceeded `max_tokens` or the conversation exceeded the max context length. */ - fun responseFormat(responseFormat: ResponseFormat) = apply { + fun responseFormat(responseFormat: ResponseFormat?) = apply { this.responseFormat = responseFormat } + /** + * An object specifying the format that the model must output. + * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + * Outputs which ensures the model will match your supplied JSON schema. Learn more in + * the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message + * the model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to produce + * JSON yourself via a system or user message. Without this, the model may generate an + * unending stream of whitespace until the generation reaches the token limit, resulting + * in a long-running and seemingly "stuck" request. Also note that the message content + * may be partially cut off if `finish_reason="length"`, which indicates the generation + * exceeded `max_tokens` or the conversation exceeded the max context length. + */ + fun responseFormat(responseFormat: Optional) = + responseFormat(responseFormat.orElse(null)) + fun responseFormat(responseFormatText: ResponseFormatText) = apply { this.responseFormat = ResponseFormat.ofResponseFormatText(responseFormatText) } @@ -950,7 +1172,41 @@ constructor( * should return the same result. Determinism is not guaranteed, and you should refer to * the `system_fingerprint` response parameter to monitor changes in the backend. */ - fun seed(seed: Long) = apply { this.seed = seed } + fun seed(seed: Long?) = apply { this.seed = seed } + + /** + * This feature is in Beta. If specified, our system will make a best effort to sample + * deterministically, such that repeated requests with the same `seed` and parameters + * should return the same result. Determinism is not guaranteed, and you should refer to + * the `system_fingerprint` response parameter to monitor changes in the backend. + */ + fun seed(seed: Long) = seed(seed as Long?) + + /** + * This feature is in Beta. If specified, our system will make a best effort to sample + * deterministically, such that repeated requests with the same `seed` and parameters + * should return the same result. Determinism is not guaranteed, and you should refer to + * the `system_fingerprint` response parameter to monitor changes in the backend. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun seed(seed: Optional) = seed(seed.orElse(null) as Long?) + + /** + * Specifies the latency tier to use for processing the request. This parameter is + * relevant for customers subscribed to the scale tier service: + * - If set to 'auto', and the Project is Scale tier enabled, the system will utilize + * scale tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will be + * processed using the default service tier with a lower uptime SLA and no latency + * guarentee. + * - If set to 'default', the request will be processed using the default service tier + * with a lower uptime SLA and no latency guarentee. + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. + */ + fun serviceTier(serviceTier: ServiceTier?) = apply { this.serviceTier = serviceTier } /** * Specifies the latency tier to use for processing the request. This parameter is @@ -967,10 +1223,14 @@ constructor( * When this parameter is set, the response body will include the `service_tier` * utilized. */ - fun serviceTier(serviceTier: ServiceTier) = apply { this.serviceTier = serviceTier } + fun serviceTier(serviceTier: Optional) = + serviceTier(serviceTier.orElse(null)) + + /** Up to 4 sequences where the API will stop generating further tokens. */ + fun stop(stop: Stop?) = apply { this.stop = stop } /** Up to 4 sequences where the API will stop generating further tokens. */ - fun stop(stop: Stop) = apply { this.stop = stop } + fun stop(stop: Optional) = stop(stop.orElse(null)) fun stop(string: String) = apply { this.stop = Stop.ofString(string) } @@ -981,19 +1241,54 @@ constructor( * [model distillation](https://platform.openai.com/docs/guides/distillation) or * [evals](https://platform.openai.com/docs/guides/evals) products. */ - fun store(store: Boolean) = apply { this.store = store } + fun store(store: Boolean?) = apply { this.store = store } + + /** + * Whether or not to store the output of this chat completion request for use in our + * [model distillation](https://platform.openai.com/docs/guides/distillation) or + * [evals](https://platform.openai.com/docs/guides/evals) products. + */ + fun store(store: Boolean) = store(store as Boolean?) + + /** + * Whether or not to store the output of this chat completion request for use in our + * [model distillation](https://platform.openai.com/docs/guides/distillation) or + * [evals](https://platform.openai.com/docs/guides/evals) products. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun store(store: Optional) = store(store.orElse(null) as Boolean?) /** Options for streaming response. Only set this when you set `stream: true`. */ - fun streamOptions(streamOptions: ChatCompletionStreamOptions) = apply { + fun streamOptions(streamOptions: ChatCompletionStreamOptions?) = apply { this.streamOptions = streamOptions } + /** Options for streaming response. Only set this when you set `stream: true`. */ + fun streamOptions(streamOptions: Optional) = + streamOptions(streamOptions.orElse(null)) + /** * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make * the output more random, while lower values like 0.2 will make it more focused and * deterministic. We generally recommend altering this or `top_p` but not both. */ - fun temperature(temperature: Double) = apply { this.temperature = temperature } + fun temperature(temperature: Double?) = apply { this.temperature = temperature } + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make + * the output more random, while lower values like 0.2 will make it more focused and + * deterministic. We generally recommend altering this or `top_p` but not both. + */ + fun temperature(temperature: Double) = temperature(temperature as Double?) + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make + * the output more random, while lower values like 0.2 will make it more focused and + * deterministic. We generally recommend altering this or `top_p` but not both. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun temperature(temperature: Optional) = + temperature(temperature.orElse(null) as Double?) /** * Controls which (if any) tool is called by the model. `none` means the model will not @@ -1005,10 +1300,23 @@ constructor( * `none` is the default when no tools are present. `auto` is the default if tools are * present. */ - fun toolChoice(toolChoice: ChatCompletionToolChoiceOption) = apply { + fun toolChoice(toolChoice: ChatCompletionToolChoiceOption?) = apply { this.toolChoice = toolChoice } + /** + * Controls which (if any) tool is called by the model. `none` means the model will not + * call any tool and instead generates a message. `auto` means the model can pick + * between generating a message or calling one or more tools. `required` means the model + * must call one or more tools. Specifying a particular tool via `{"type": "function", + * "function": {"name": "my_function"}}` forces the model to call that tool. + * + * `none` is the default when no tools are present. `auto` is the default if tools are + * present. + */ + fun toolChoice(toolChoice: Optional) = + toolChoice(toolChoice.orElse(null)) + /** * `none` means the model will not call any tool and instead generates a message. `auto` * means the model can pick between generating a message or calling one or more tools. @@ -1034,10 +1342,17 @@ constructor( * tool. Use this to provide a list of functions the model may generate JSON inputs for. * A max of 128 functions are supported. */ - fun tools(tools: List) = apply { - this.tools = tools.toMutableList() + fun tools(tools: List?) = apply { + this.tools = tools?.toMutableList() } + /** + * A list of tools the model may call. Currently, only functions are supported as a + * tool. Use this to provide a list of functions the model may generate JSON inputs for. + * A max of 128 functions are supported. + */ + fun tools(tools: Optional>) = tools(tools.orElse(null)) + /** * A list of tools the model may call. Currently, only functions are supported as a * tool. Use this to provide a list of functions the model may generate JSON inputs for. @@ -1052,7 +1367,23 @@ constructor( * each token position, each with an associated log probability. `logprobs` must be set * to `true` if this parameter is used. */ - fun topLogprobs(topLogprobs: Long) = apply { this.topLogprobs = topLogprobs } + fun topLogprobs(topLogprobs: Long?) = apply { this.topLogprobs = topLogprobs } + + /** + * An integer between 0 and 20 specifying the number of most likely tokens to return at + * each token position, each with an associated log probability. `logprobs` must be set + * to `true` if this parameter is used. + */ + fun topLogprobs(topLogprobs: Long) = topLogprobs(topLogprobs as Long?) + + /** + * An integer between 0 and 20 specifying the number of most likely tokens to return at + * each token position, each with an associated log probability. `logprobs` must be set + * to `true` if this parameter is used. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun topLogprobs(topLogprobs: Optional) = + topLogprobs(topLogprobs.orElse(null) as Long?) /** * An alternative to sampling with temperature, called nucleus sampling, where the model @@ -1061,14 +1392,40 @@ constructor( * * We generally recommend altering this or `temperature` but not both. */ - fun topP(topP: Double) = apply { this.topP = topP } + fun topP(topP: Double?) = apply { this.topP = topP } + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only + * the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + fun topP(topP: Double) = topP(topP as Double?) + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only + * the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun topP(topP: Optional) = topP(topP.orElse(null) as Double?) /** * A unique identifier representing your end-user, which can help OpenAI to monitor and * detect abuse. * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ - fun user(user: String) = apply { this.user = user } + fun user(user: String?) = apply { this.user = user } + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and + * detect abuse. + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + */ + fun user(user: Optional) = user(user.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -1202,17 +1559,57 @@ constructor( * Parameters for audio output. Required when audio output is requested with `modalities: * ["audio"]`. [Learn more](https://platform.openai.com/docs/guides/audio). */ - fun audio(audio: ChatCompletionAudioParam) = apply { body.audio(audio) } + fun audio(audio: ChatCompletionAudioParam?) = apply { body.audio(audio) } + + /** + * Parameters for audio output. Required when audio output is requested with `modalities: + * ["audio"]`. [Learn more](https://platform.openai.com/docs/guides/audio). + */ + fun audio(audio: Optional) = audio(audio.orElse(null)) /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing * frequency in the text so far, decreasing the model's likelihood to repeat the same line * verbatim. */ - fun frequencyPenalty(frequencyPenalty: Double) = apply { + fun frequencyPenalty(frequencyPenalty: Double?) = apply { body.frequencyPenalty(frequencyPenalty) } + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + * frequency in the text so far, decreasing the model's likelihood to repeat the same line + * verbatim. + */ + fun frequencyPenalty(frequencyPenalty: Double) = + frequencyPenalty(frequencyPenalty as Double?) + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + * frequency in the text so far, decreasing the model's likelihood to repeat the same line + * verbatim. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun frequencyPenalty(frequencyPenalty: Optional) = + frequencyPenalty(frequencyPenalty.orElse(null) as Double?) + + /** + * Deprecated in favor of `tool_choice`. + * + * Controls which (if any) function is called by the model. + * + * `none` means the model will not call a function and instead generates a message. + * + * `auto` means the model can pick between generating a message or calling a function. + * + * Specifying a particular function via `{"name": "my_function"}` forces the model to call + * that function. + * + * `none` is the default when no functions are present. `auto` is the default if functions + * are present. + */ + fun functionCall(functionCall: FunctionCall?) = apply { body.functionCall(functionCall) } + /** * Deprecated in favor of `tool_choice`. * @@ -1228,7 +1625,8 @@ constructor( * `none` is the default when no functions are present. `auto` is the default if functions * are present. */ - fun functionCall(functionCall: FunctionCall) = apply { body.functionCall(functionCall) } + fun functionCall(functionCall: Optional) = + functionCall(functionCall.orElse(null)) /** * `none` means the model will not call a function and instead generates a message. `auto` @@ -1249,7 +1647,14 @@ constructor( * * A list of functions the model may generate JSON inputs for. */ - fun functions(functions: List) = apply { body.functions(functions) } + fun functions(functions: List?) = apply { body.functions(functions) } + + /** + * Deprecated in favor of `tools`. + * + * A list of functions the model may generate JSON inputs for. + */ + fun functions(functions: Optional>) = functions(functions.orElse(null)) /** * Deprecated in favor of `tools`. @@ -1267,23 +1672,64 @@ constructor( * but values between -1 and 1 should decrease or increase likelihood of selection; values * like -100 or 100 should result in a ban or exclusive selection of the relevant token. */ - fun logitBias(logitBias: LogitBias) = apply { body.logitBias(logitBias) } + fun logitBias(logitBias: LogitBias?) = apply { body.logitBias(logitBias) } + + /** + * Modify the likelihood of specified tokens appearing in the completion. + * + * Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to + * an associated bias value from -100 to 100. Mathematically, the bias is added to the + * logits generated by the model prior to sampling. The exact effect will vary per model, + * but values between -1 and 1 should decrease or increase likelihood of selection; values + * like -100 or 100 should result in a ban or exclusive selection of the relevant token. + */ + fun logitBias(logitBias: Optional) = logitBias(logitBias.orElse(null)) + + /** + * Whether to return log probabilities of the output tokens or not. If true, returns the log + * probabilities of each output token returned in the `content` of `message`. + */ + fun logprobs(logprobs: Boolean?) = apply { body.logprobs(logprobs) } + + /** + * Whether to return log probabilities of the output tokens or not. If true, returns the log + * probabilities of each output token returned in the `content` of `message`. + */ + fun logprobs(logprobs: Boolean) = logprobs(logprobs as Boolean?) /** * Whether to return log probabilities of the output tokens or not. If true, returns the log * probabilities of each output token returned in the `content` of `message`. */ - fun logprobs(logprobs: Boolean) = apply { body.logprobs(logprobs) } + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun logprobs(logprobs: Optional) = logprobs(logprobs.orElse(null) as Boolean?) /** * An upper bound for the number of tokens that can be generated for a completion, including * visible output tokens and * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). */ - fun maxCompletionTokens(maxCompletionTokens: Long) = apply { + fun maxCompletionTokens(maxCompletionTokens: Long?) = apply { body.maxCompletionTokens(maxCompletionTokens) } + /** + * An upper bound for the number of tokens that can be generated for a completion, including + * visible output tokens and + * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + */ + fun maxCompletionTokens(maxCompletionTokens: Long) = + maxCompletionTokens(maxCompletionTokens as Long?) + + /** + * An upper bound for the number of tokens that can be generated for a completion, including + * visible output tokens and + * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun maxCompletionTokens(maxCompletionTokens: Optional) = + maxCompletionTokens(maxCompletionTokens.orElse(null) as Long?) + /** * The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. * This value can be used to control [costs](https://openai.com/api/pricing/) for text @@ -1292,13 +1738,40 @@ constructor( * This value is now deprecated in favor of `max_completion_tokens`, and is not compatible * with [o1 series models](https://platform.openai.com/docs/guides/reasoning). */ - fun maxTokens(maxTokens: Long) = apply { body.maxTokens(maxTokens) } + fun maxTokens(maxTokens: Long?) = apply { body.maxTokens(maxTokens) } + + /** + * The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + * This value can be used to control [costs](https://openai.com/api/pricing/) for text + * generated via API. + * + * This value is now deprecated in favor of `max_completion_tokens`, and is not compatible + * with [o1 series models](https://platform.openai.com/docs/guides/reasoning). + */ + fun maxTokens(maxTokens: Long) = maxTokens(maxTokens as Long?) + + /** + * The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + * This value can be used to control [costs](https://openai.com/api/pricing/) for text + * generated via API. + * + * This value is now deprecated in favor of `max_completion_tokens`, and is not compatible + * with [o1 series models](https://platform.openai.com/docs/guides/reasoning). + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun maxTokens(maxTokens: Optional) = maxTokens(maxTokens.orElse(null) as Long?) + + /** + * Developer-defined tags and values used for filtering completions in the + * [dashboard](https://platform.openai.com/chat-completions). + */ + fun metadata(metadata: Metadata?) = apply { body.metadata(metadata) } /** * Developer-defined tags and values used for filtering completions in the * [dashboard](https://platform.openai.com/chat-completions). */ - fun metadata(metadata: Metadata) = apply { body.metadata(metadata) } + fun metadata(metadata: Optional) = metadata(metadata.orElse(null)) /** * Output types that you would like the model to generate for this request. Most models are @@ -1312,10 +1785,25 @@ constructor( * * `["text", "audio"]` */ - fun modalities(modalities: List) = apply { + fun modalities(modalities: List?) = apply { body.modalities(modalities) } + /** + * Output types that you would like the model to generate for this request. Most models are + * capable of generating text, which is the default: + * + * `["text"]` + * + * The `gpt-4o-audio-preview` model can also be used to + * [generate audio](https://platform.openai.com/docs/guides/audio). To request that this + * model generate both text and audio responses, you can use: + * + * `["text", "audio"]` + */ + fun modalities(modalities: Optional>) = + modalities(modalities.orElse(null)) + /** * Output types that you would like the model to generate for this request. Most models are * capable of generating text, which is the default: @@ -1335,33 +1823,86 @@ constructor( * be charged based on the number of generated tokens across all of the choices. Keep `n` as * `1` to minimize costs. */ - fun n(n: Long) = apply { body.n(n) } + fun n(n: Long?) = apply { body.n(n) } + + /** + * How many chat completion choices to generate for each input message. Note that you will + * be charged based on the number of generated tokens across all of the choices. Keep `n` as + * `1` to minimize costs. + */ + fun n(n: Long) = n(n as Long?) + + /** + * How many chat completion choices to generate for each input message. Note that you will + * be charged based on the number of generated tokens across all of the choices. Keep `n` as + * `1` to minimize costs. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun n(n: Optional) = n(n.orElse(null) as Long?) /** * Whether to enable * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) * during tool use. */ - fun parallelToolCalls(parallelToolCalls: Boolean) = apply { + fun parallelToolCalls(parallelToolCalls: Boolean?) = apply { body.parallelToolCalls(parallelToolCalls) } + /** + * Whether to enable + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + * during tool use. + */ + fun parallelToolCalls(parallelToolCalls: Boolean) = + parallelToolCalls(parallelToolCalls as Boolean?) + + /** + * Whether to enable + * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + * during tool use. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun parallelToolCalls(parallelToolCalls: Optional) = + parallelToolCalls(parallelToolCalls.orElse(null) as Boolean?) + /** * Static predicted output content, such as the content of a text file that is being * regenerated. */ - fun prediction(prediction: ChatCompletionPredictionContent) = apply { + fun prediction(prediction: ChatCompletionPredictionContent?) = apply { body.prediction(prediction) } + /** + * Static predicted output content, such as the content of a text file that is being + * regenerated. + */ + fun prediction(prediction: Optional) = + prediction(prediction.orElse(null)) + /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they * appear in the text so far, increasing the model's likelihood to talk about new topics. */ - fun presencePenalty(presencePenalty: Double) = apply { + fun presencePenalty(presencePenalty: Double?) = apply { body.presencePenalty(presencePenalty) } + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they + * appear in the text so far, increasing the model's likelihood to talk about new topics. + */ + fun presencePenalty(presencePenalty: Double) = presencePenalty(presencePenalty as Double?) + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they + * appear in the text so far, increasing the model's likelihood to talk about new topics. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun presencePenalty(presencePenalty: Optional) = + presencePenalty(presencePenalty.orElse(null) as Double?) + /** * **o1 models only** * @@ -1370,10 +1911,21 @@ constructor( * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can result in * faster responses and fewer tokens used on reasoning in a response. */ - fun reasoningEffort(reasoningEffort: ChatCompletionReasoningEffort) = apply { + fun reasoningEffort(reasoningEffort: ChatCompletionReasoningEffort?) = apply { body.reasoningEffort(reasoningEffort) } + /** + * **o1 models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can result in + * faster responses and fewer tokens used on reasoning in a response. + */ + fun reasoningEffort(reasoningEffort: Optional) = + reasoningEffort(reasoningEffort.orElse(null)) + /** * An object specifying the format that the model must output. * @@ -1391,10 +1943,30 @@ constructor( * partially cut off if `finish_reason="length"`, which indicates the generation exceeded * `max_tokens` or the conversation exceeded the max context length. */ - fun responseFormat(responseFormat: ResponseFormat) = apply { + fun responseFormat(responseFormat: ResponseFormat?) = apply { body.responseFormat(responseFormat) } + /** + * An object specifying the format that the model must output. + * + * Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs + * which ensures the model will match your supplied JSON schema. Learn more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the message the + * model generates is valid JSON. + * + * **Important:** when using JSON mode, you **must** also instruct the model to produce JSON + * yourself via a system or user message. Without this, the model may generate an unending + * stream of whitespace until the generation reaches the token limit, resulting in a + * long-running and seemingly "stuck" request. Also note that the message content may be + * partially cut off if `finish_reason="length"`, which indicates the generation exceeded + * `max_tokens` or the conversation exceeded the max context length. + */ + fun responseFormat(responseFormat: Optional) = + responseFormat(responseFormat.orElse(null)) + fun responseFormat(responseFormatText: ResponseFormatText) = apply { body.responseFormat(responseFormatText) } @@ -1413,7 +1985,40 @@ constructor( * return the same result. Determinism is not guaranteed, and you should refer to the * `system_fingerprint` response parameter to monitor changes in the backend. */ - fun seed(seed: Long) = apply { body.seed(seed) } + fun seed(seed: Long?) = apply { body.seed(seed) } + + /** + * This feature is in Beta. If specified, our system will make a best effort to sample + * deterministically, such that repeated requests with the same `seed` and parameters should + * return the same result. Determinism is not guaranteed, and you should refer to the + * `system_fingerprint` response parameter to monitor changes in the backend. + */ + fun seed(seed: Long) = seed(seed as Long?) + + /** + * This feature is in Beta. If specified, our system will make a best effort to sample + * deterministically, such that repeated requests with the same `seed` and parameters should + * return the same result. Determinism is not guaranteed, and you should refer to the + * `system_fingerprint` response parameter to monitor changes in the backend. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun seed(seed: Optional) = seed(seed.orElse(null) as Long?) + + /** + * Specifies the latency tier to use for processing the request. This parameter is relevant + * for customers subscribed to the scale tier service: + * - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale + * tier credits until they are exhausted. + * - If set to 'auto', and the Project is not Scale tier enabled, the request will be + * processed using the default service tier with a lower uptime SLA and no latency + * guarentee. + * - If set to 'default', the request will be processed using the default service tier with + * a lower uptime SLA and no latency guarentee. + * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` utilized. + */ + fun serviceTier(serviceTier: ServiceTier?) = apply { body.serviceTier(serviceTier) } /** * Specifies the latency tier to use for processing the request. This parameter is relevant @@ -1429,10 +2034,13 @@ constructor( * * When this parameter is set, the response body will include the `service_tier` utilized. */ - fun serviceTier(serviceTier: ServiceTier) = apply { body.serviceTier(serviceTier) } + fun serviceTier(serviceTier: Optional) = serviceTier(serviceTier.orElse(null)) + + /** Up to 4 sequences where the API will stop generating further tokens. */ + fun stop(stop: Stop?) = apply { body.stop(stop) } /** Up to 4 sequences where the API will stop generating further tokens. */ - fun stop(stop: Stop) = apply { body.stop(stop) } + fun stop(stop: Optional) = stop(stop.orElse(null)) fun stop(string: String) = apply { body.stop(string) } @@ -1443,19 +2051,54 @@ constructor( * [model distillation](https://platform.openai.com/docs/guides/distillation) or * [evals](https://platform.openai.com/docs/guides/evals) products. */ - fun store(store: Boolean) = apply { body.store(store) } + fun store(store: Boolean?) = apply { body.store(store) } + + /** + * Whether or not to store the output of this chat completion request for use in our + * [model distillation](https://platform.openai.com/docs/guides/distillation) or + * [evals](https://platform.openai.com/docs/guides/evals) products. + */ + fun store(store: Boolean) = store(store as Boolean?) + + /** + * Whether or not to store the output of this chat completion request for use in our + * [model distillation](https://platform.openai.com/docs/guides/distillation) or + * [evals](https://platform.openai.com/docs/guides/evals) products. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun store(store: Optional) = store(store.orElse(null) as Boolean?) /** Options for streaming response. Only set this when you set `stream: true`. */ - fun streamOptions(streamOptions: ChatCompletionStreamOptions) = apply { + fun streamOptions(streamOptions: ChatCompletionStreamOptions?) = apply { body.streamOptions(streamOptions) } + /** Options for streaming response. Only set this when you set `stream: true`. */ + fun streamOptions(streamOptions: Optional) = + streamOptions(streamOptions.orElse(null)) + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the + * output more random, while lower values like 0.2 will make it more focused and + * deterministic. We generally recommend altering this or `top_p` but not both. + */ + fun temperature(temperature: Double?) = apply { body.temperature(temperature) } + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the + * output more random, while lower values like 0.2 will make it more focused and + * deterministic. We generally recommend altering this or `top_p` but not both. + */ + fun temperature(temperature: Double) = temperature(temperature as Double?) + /** * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the * output more random, while lower values like 0.2 will make it more focused and * deterministic. We generally recommend altering this or `top_p` but not both. */ - fun temperature(temperature: Double) = apply { body.temperature(temperature) } + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun temperature(temperature: Optional) = + temperature(temperature.orElse(null) as Double?) /** * Controls which (if any) tool is called by the model. `none` means the model will not call @@ -1467,10 +2110,23 @@ constructor( * `none` is the default when no tools are present. `auto` is the default if tools are * present. */ - fun toolChoice(toolChoice: ChatCompletionToolChoiceOption) = apply { + fun toolChoice(toolChoice: ChatCompletionToolChoiceOption?) = apply { body.toolChoice(toolChoice) } + /** + * Controls which (if any) tool is called by the model. `none` means the model will not call + * any tool and instead generates a message. `auto` means the model can pick between + * generating a message or calling one or more tools. `required` means the model must call + * one or more tools. Specifying a particular tool via `{"type": "function", "function": + * {"name": "my_function"}}` forces the model to call that tool. + * + * `none` is the default when no tools are present. `auto` is the default if tools are + * present. + */ + fun toolChoice(toolChoice: Optional) = + toolChoice(toolChoice.orElse(null)) + /** * `none` means the model will not call any tool and instead generates a message. `auto` * means the model can pick between generating a message or calling one or more tools. @@ -1493,7 +2149,14 @@ constructor( * Use this to provide a list of functions the model may generate JSON inputs for. A max of * 128 functions are supported. */ - fun tools(tools: List) = apply { body.tools(tools) } + fun tools(tools: List?) = apply { body.tools(tools) } + + /** + * A list of tools the model may call. Currently, only functions are supported as a tool. + * Use this to provide a list of functions the model may generate JSON inputs for. A max of + * 128 functions are supported. + */ + fun tools(tools: Optional>) = tools(tools.orElse(null)) /** * A list of tools the model may call. Currently, only functions are supported as a tool. @@ -1507,7 +2170,41 @@ constructor( * token position, each with an associated log probability. `logprobs` must be set to `true` * if this parameter is used. */ - fun topLogprobs(topLogprobs: Long) = apply { body.topLogprobs(topLogprobs) } + fun topLogprobs(topLogprobs: Long?) = apply { body.topLogprobs(topLogprobs) } + + /** + * An integer between 0 and 20 specifying the number of most likely tokens to return at each + * token position, each with an associated log probability. `logprobs` must be set to `true` + * if this parameter is used. + */ + fun topLogprobs(topLogprobs: Long) = topLogprobs(topLogprobs as Long?) + + /** + * An integer between 0 and 20 specifying the number of most likely tokens to return at each + * token position, each with an associated log probability. `logprobs` must be set to `true` + * if this parameter is used. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun topLogprobs(topLogprobs: Optional) = + topLogprobs(topLogprobs.orElse(null) as Long?) + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the + * tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + fun topP(topP: Double?) = apply { body.topP(topP) } + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the + * tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + fun topP(topP: Double) = topP(topP as Double?) /** * An alternative to sampling with temperature, called nucleus sampling, where the model @@ -1516,14 +2213,22 @@ constructor( * * We generally recommend altering this or `temperature` but not both. */ - fun topP(topP: Double) = apply { body.topP(topP) } + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun topP(topP: Optional) = topP(topP.orElse(null) as Double?) + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and + * detect abuse. + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + */ + fun user(user: String?) = apply { body.user(user) } /** * A unique identifier representing your end-user, which can help OpenAI to monitor and * detect abuse. * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ - fun user(user: String) = apply { body.user(user) } + fun user(user: Optional) = user(user.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() @@ -1920,7 +2625,23 @@ constructor( * A description of what the function does, used by the model to choose when and how to * call the function. */ - fun description(description: String) = apply { this.description = description } + fun description(description: String?) = apply { this.description = description } + + /** + * A description of what the function does, used by the model to choose when and how to + * call the function. + */ + fun description(description: Optional) = description(description.orElse(null)) + + /** + * The parameters the functions accepts, described as a JSON Schema object. See the + * [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and + * the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + * documentation about the format. + * + * Omitting `parameters` defines a function with an empty parameter list. + */ + fun parameters(parameters: FunctionParameters?) = apply { this.parameters = parameters } /** * The parameters the functions accepts, described as a JSON Schema object. See the @@ -1930,7 +2651,8 @@ constructor( * * Omitting `parameters` defines a function with an empty parameter list. */ - fun parameters(parameters: FunctionParameters) = apply { this.parameters = parameters } + fun parameters(parameters: Optional) = + parameters(parameters.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/CompletionCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/CompletionCreateParams.kt index ac4788e87..7f48a1a00 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/CompletionCreateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/CompletionCreateParams.kt @@ -449,7 +449,17 @@ constructor( * training, so if a prompt is not specified the model will generate as if from the * beginning of a new document. */ - fun prompt(prompt: Prompt) = apply { this.prompt = prompt } + fun prompt(prompt: Prompt?) = apply { this.prompt = prompt } + + /** + * The prompt(s) to generate completions for, encoded as a string, array of strings, + * array of tokens, or array of token arrays. + * + * Note that <|endoftext|> is the document separator that the model sees during + * training, so if a prompt is not specified the model will generate as if from the + * beginning of a new document. + */ + fun prompt(prompt: Optional) = prompt(prompt.orElse(null)) fun prompt(string: String) = apply { this.prompt = Prompt.ofString(string) } @@ -476,10 +486,44 @@ constructor( * your token quota. Use carefully and ensure that you have reasonable settings for * `max_tokens` and `stop`. */ - fun bestOf(bestOf: Long) = apply { this.bestOf = bestOf } + fun bestOf(bestOf: Long?) = apply { this.bestOf = bestOf } + + /** + * Generates `best_of` completions server-side and returns the "best" (the one with the + * highest log probability per token). Results cannot be streamed. + * + * When used with `n`, `best_of` controls the number of candidate completions and `n` + * specifies how many to return – `best_of` must be greater than `n`. + * + * **Note:** Because this parameter generates many completions, it can quickly consume + * your token quota. Use carefully and ensure that you have reasonable settings for + * `max_tokens` and `stop`. + */ + fun bestOf(bestOf: Long) = bestOf(bestOf as Long?) + + /** + * Generates `best_of` completions server-side and returns the "best" (the one with the + * highest log probability per token). Results cannot be streamed. + * + * When used with `n`, `best_of` controls the number of candidate completions and `n` + * specifies how many to return – `best_of` must be greater than `n`. + * + * **Note:** Because this parameter generates many completions, it can quickly consume + * your token quota. Use carefully and ensure that you have reasonable settings for + * `max_tokens` and `stop`. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun bestOf(bestOf: Optional) = bestOf(bestOf.orElse(null) as Long?) /** Echo back the prompt in addition to the completion */ - fun echo(echo: Boolean) = apply { this.echo = echo } + fun echo(echo: Boolean?) = apply { this.echo = echo } + + /** Echo back the prompt in addition to the completion */ + fun echo(echo: Boolean) = echo(echo as Boolean?) + + /** Echo back the prompt in addition to the completion */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun echo(echo: Optional) = echo(echo.orElse(null) as Boolean?) /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on their @@ -489,10 +533,49 @@ constructor( * [See more information about frequency and presence * penalties.](https://platform.openai.com/docs/guides/text-generation) */ - fun frequencyPenalty(frequencyPenalty: Double) = apply { + fun frequencyPenalty(frequencyPenalty: Double?) = apply { this.frequencyPenalty = frequencyPenalty } + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their + * existing frequency in the text so far, decreasing the model's likelihood to repeat + * the same line verbatim. + * + * [See more information about frequency and presence + * penalties.](https://platform.openai.com/docs/guides/text-generation) + */ + fun frequencyPenalty(frequencyPenalty: Double) = + frequencyPenalty(frequencyPenalty as Double?) + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their + * existing frequency in the text so far, decreasing the model's likelihood to repeat + * the same line verbatim. + * + * [See more information about frequency and presence + * penalties.](https://platform.openai.com/docs/guides/text-generation) + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun frequencyPenalty(frequencyPenalty: Optional) = + frequencyPenalty(frequencyPenalty.orElse(null) as Double?) + + /** + * Modify the likelihood of specified tokens appearing in the completion. + * + * Accepts a JSON object that maps tokens (specified by their token ID in the GPT + * tokenizer) to an associated bias value from -100 to 100. You can use this + * [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, + * the bias is added to the logits generated by the model prior to sampling. The exact + * effect will vary per model, but values between -1 and 1 should decrease or increase + * likelihood of selection; values like -100 or 100 should result in a ban or exclusive + * selection of the relevant token. + * + * As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from + * being generated. + */ + fun logitBias(logitBias: LogitBias?) = apply { this.logitBias = logitBias } + /** * Modify the likelihood of specified tokens appearing in the completion. * @@ -507,7 +590,17 @@ constructor( * As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from * being generated. */ - fun logitBias(logitBias: LogitBias) = apply { this.logitBias = logitBias } + fun logitBias(logitBias: Optional) = logitBias(logitBias.orElse(null)) + + /** + * Include the log probabilities on the `logprobs` most likely output tokens, as well + * the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the + * 5 most likely tokens. The API will always return the `logprob` of the sampled token, + * so there may be up to `logprobs+1` elements in the response. + * + * The maximum value for `logprobs` is 5. + */ + fun logprobs(logprobs: Long?) = apply { this.logprobs = logprobs } /** * Include the log probabilities on the `logprobs` most likely output tokens, as well @@ -517,7 +610,28 @@ constructor( * * The maximum value for `logprobs` is 5. */ - fun logprobs(logprobs: Long) = apply { this.logprobs = logprobs } + fun logprobs(logprobs: Long) = logprobs(logprobs as Long?) + + /** + * Include the log probabilities on the `logprobs` most likely output tokens, as well + * the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the + * 5 most likely tokens. The API will always return the `logprob` of the sampled token, + * so there may be up to `logprobs+1` elements in the response. + * + * The maximum value for `logprobs` is 5. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun logprobs(logprobs: Optional) = logprobs(logprobs.orElse(null) as Long?) + + /** + * The maximum number of [tokens](/tokenizer) that can be generated in the completion. + * + * The token count of your prompt plus `max_tokens` cannot exceed the model's context + * length. + * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + * for counting tokens. + */ + fun maxTokens(maxTokens: Long?) = apply { this.maxTokens = maxTokens } /** * The maximum number of [tokens](/tokenizer) that can be generated in the completion. @@ -527,7 +641,36 @@ constructor( * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) * for counting tokens. */ - fun maxTokens(maxTokens: Long) = apply { this.maxTokens = maxTokens } + fun maxTokens(maxTokens: Long) = maxTokens(maxTokens as Long?) + + /** + * The maximum number of [tokens](/tokenizer) that can be generated in the completion. + * + * The token count of your prompt plus `max_tokens` cannot exceed the model's context + * length. + * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + * for counting tokens. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun maxTokens(maxTokens: Optional) = maxTokens(maxTokens.orElse(null) as Long?) + + /** + * How many completions to generate for each prompt. + * + * **Note:** Because this parameter generates many completions, it can quickly consume + * your token quota. Use carefully and ensure that you have reasonable settings for + * `max_tokens` and `stop`. + */ + fun n(n: Long?) = apply { this.n = n } + + /** + * How many completions to generate for each prompt. + * + * **Note:** Because this parameter generates many completions, it can quickly consume + * your token quota. Use carefully and ensure that you have reasonable settings for + * `max_tokens` and `stop`. + */ + fun n(n: Long) = n(n as Long?) /** * How many completions to generate for each prompt. @@ -536,7 +679,8 @@ constructor( * your token quota. Use carefully and ensure that you have reasonable settings for * `max_tokens` and `stop`. */ - fun n(n: Long) = apply { this.n = n } + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun n(n: Optional) = n(n.orElse(null) as Long?) /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether @@ -546,10 +690,43 @@ constructor( * [See more information about frequency and presence * penalties.](https://platform.openai.com/docs/guides/text-generation) */ - fun presencePenalty(presencePenalty: Double) = apply { + fun presencePenalty(presencePenalty: Double?) = apply { this.presencePenalty = presencePenalty } + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether + * they appear in the text so far, increasing the model's likelihood to talk about new + * topics. + * + * [See more information about frequency and presence + * penalties.](https://platform.openai.com/docs/guides/text-generation) + */ + fun presencePenalty(presencePenalty: Double) = + presencePenalty(presencePenalty as Double?) + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether + * they appear in the text so far, increasing the model's likelihood to talk about new + * topics. + * + * [See more information about frequency and presence + * penalties.](https://platform.openai.com/docs/guides/text-generation) + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun presencePenalty(presencePenalty: Optional) = + presencePenalty(presencePenalty.orElse(null) as Double?) + + /** + * If specified, our system will make a best effort to sample deterministically, such + * that repeated requests with the same `seed` and parameters should return the same + * result. + * + * Determinism is not guaranteed, and you should refer to the `system_fingerprint` + * response parameter to monitor changes in the backend. + */ + fun seed(seed: Long?) = apply { this.seed = seed } + /** * If specified, our system will make a best effort to sample deterministically, such * that repeated requests with the same `seed` and parameters should return the same @@ -558,29 +735,57 @@ constructor( * Determinism is not guaranteed, and you should refer to the `system_fingerprint` * response parameter to monitor changes in the backend. */ - fun seed(seed: Long) = apply { this.seed = seed } + fun seed(seed: Long) = seed(seed as Long?) + + /** + * If specified, our system will make a best effort to sample deterministically, such + * that repeated requests with the same `seed` and parameters should return the same + * result. + * + * Determinism is not guaranteed, and you should refer to the `system_fingerprint` + * response parameter to monitor changes in the backend. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun seed(seed: Optional) = seed(seed.orElse(null) as Long?) + + /** + * Up to 4 sequences where the API will stop generating further tokens. The returned + * text will not contain the stop sequence. + */ + fun stop(stop: Stop?) = apply { this.stop = stop } /** * Up to 4 sequences where the API will stop generating further tokens. The returned * text will not contain the stop sequence. */ - fun stop(stop: Stop) = apply { this.stop = stop } + fun stop(stop: Optional) = stop(stop.orElse(null)) fun stop(string: String) = apply { this.stop = Stop.ofString(string) } fun stopOfStrings(strings: List) = apply { this.stop = Stop.ofStrings(strings) } /** Options for streaming response. Only set this when you set `stream: true`. */ - fun streamOptions(streamOptions: ChatCompletionStreamOptions) = apply { + fun streamOptions(streamOptions: ChatCompletionStreamOptions?) = apply { this.streamOptions = streamOptions } + /** Options for streaming response. Only set this when you set `stream: true`. */ + fun streamOptions(streamOptions: Optional) = + streamOptions(streamOptions.orElse(null)) + + /** + * The suffix that comes after a completion of inserted text. + * + * This parameter is only supported for `gpt-3.5-turbo-instruct`. + */ + fun suffix(suffix: String?) = apply { this.suffix = suffix } + /** * The suffix that comes after a completion of inserted text. * * This parameter is only supported for `gpt-3.5-turbo-instruct`. */ - fun suffix(suffix: String) = apply { this.suffix = suffix } + fun suffix(suffix: Optional) = suffix(suffix.orElse(null)) /** * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make @@ -589,7 +794,36 @@ constructor( * * We generally recommend altering this or `top_p` but not both. */ - fun temperature(temperature: Double) = apply { this.temperature = temperature } + fun temperature(temperature: Double?) = apply { this.temperature = temperature } + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make + * the output more random, while lower values like 0.2 will make it more focused and + * deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + fun temperature(temperature: Double) = temperature(temperature as Double?) + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make + * the output more random, while lower values like 0.2 will make it more focused and + * deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun temperature(temperature: Optional) = + temperature(temperature.orElse(null) as Double?) + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only + * the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + fun topP(topP: Double?) = apply { this.topP = topP } /** * An alternative to sampling with temperature, called nucleus sampling, where the model @@ -598,14 +832,31 @@ constructor( * * We generally recommend altering this or `temperature` but not both. */ - fun topP(topP: Double) = apply { this.topP = topP } + fun topP(topP: Double) = topP(topP as Double?) + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only + * the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun topP(topP: Optional) = topP(topP.orElse(null) as Double?) + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and + * detect abuse. + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + */ + fun user(user: String?) = apply { this.user = user } /** * A unique identifier representing your end-user, which can help OpenAI to monitor and * detect abuse. * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ - fun user(user: String) = apply { this.user = user } + fun user(user: Optional) = user(user.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -712,7 +963,17 @@ constructor( * if a prompt is not specified the model will generate as if from the beginning of a new * document. */ - fun prompt(prompt: Prompt) = apply { body.prompt(prompt) } + fun prompt(prompt: Prompt?) = apply { body.prompt(prompt) } + + /** + * The prompt(s) to generate completions for, encoded as a string, array of strings, array + * of tokens, or array of token arrays. + * + * Note that <|endoftext|> is the document separator that the model sees during training, so + * if a prompt is not specified the model will generate as if from the beginning of a new + * document. + */ + fun prompt(prompt: Optional) = prompt(prompt.orElse(null)) fun prompt(string: String) = apply { body.prompt(string) } @@ -739,10 +1000,44 @@ constructor( * token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` * and `stop`. */ - fun bestOf(bestOf: Long) = apply { body.bestOf(bestOf) } + fun bestOf(bestOf: Long?) = apply { body.bestOf(bestOf) } + + /** + * Generates `best_of` completions server-side and returns the "best" (the one with the + * highest log probability per token). Results cannot be streamed. + * + * When used with `n`, `best_of` controls the number of candidate completions and `n` + * specifies how many to return – `best_of` must be greater than `n`. + * + * **Note:** Because this parameter generates many completions, it can quickly consume your + * token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` + * and `stop`. + */ + fun bestOf(bestOf: Long) = bestOf(bestOf as Long?) + + /** + * Generates `best_of` completions server-side and returns the "best" (the one with the + * highest log probability per token). Results cannot be streamed. + * + * When used with `n`, `best_of` controls the number of candidate completions and `n` + * specifies how many to return – `best_of` must be greater than `n`. + * + * **Note:** Because this parameter generates many completions, it can quickly consume your + * token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` + * and `stop`. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun bestOf(bestOf: Optional) = bestOf(bestOf.orElse(null) as Long?) + + /** Echo back the prompt in addition to the completion */ + fun echo(echo: Boolean?) = apply { body.echo(echo) } + + /** Echo back the prompt in addition to the completion */ + fun echo(echo: Boolean) = echo(echo as Boolean?) /** Echo back the prompt in addition to the completion */ - fun echo(echo: Boolean) = apply { body.echo(echo) } + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun echo(echo: Optional) = echo(echo.orElse(null) as Boolean?) /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing @@ -752,10 +1047,49 @@ constructor( * [See more information about frequency and presence * penalties.](https://platform.openai.com/docs/guides/text-generation) */ - fun frequencyPenalty(frequencyPenalty: Double) = apply { + fun frequencyPenalty(frequencyPenalty: Double?) = apply { body.frequencyPenalty(frequencyPenalty) } + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + * frequency in the text so far, decreasing the model's likelihood to repeat the same line + * verbatim. + * + * [See more information about frequency and presence + * penalties.](https://platform.openai.com/docs/guides/text-generation) + */ + fun frequencyPenalty(frequencyPenalty: Double) = + frequencyPenalty(frequencyPenalty as Double?) + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + * frequency in the text so far, decreasing the model's likelihood to repeat the same line + * verbatim. + * + * [See more information about frequency and presence + * penalties.](https://platform.openai.com/docs/guides/text-generation) + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun frequencyPenalty(frequencyPenalty: Optional) = + frequencyPenalty(frequencyPenalty.orElse(null) as Double?) + + /** + * Modify the likelihood of specified tokens appearing in the completion. + * + * Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) + * to an associated bias value from -100 to 100. You can use this + * [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the + * bias is added to the logits generated by the model prior to sampling. The exact effect + * will vary per model, but values between -1 and 1 should decrease or increase likelihood + * of selection; values like -100 or 100 should result in a ban or exclusive selection of + * the relevant token. + * + * As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from + * being generated. + */ + fun logitBias(logitBias: LogitBias?) = apply { body.logitBias(logitBias) } + /** * Modify the likelihood of specified tokens appearing in the completion. * @@ -770,7 +1104,17 @@ constructor( * As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from * being generated. */ - fun logitBias(logitBias: LogitBias) = apply { body.logitBias(logitBias) } + fun logitBias(logitBias: Optional) = logitBias(logitBias.orElse(null)) + + /** + * Include the log probabilities on the `logprobs` most likely output tokens, as well the + * chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most + * likely tokens. The API will always return the `logprob` of the sampled token, so there + * may be up to `logprobs+1` elements in the response. + * + * The maximum value for `logprobs` is 5. + */ + fun logprobs(logprobs: Long?) = apply { body.logprobs(logprobs) } /** * Include the log probabilities on the `logprobs` most likely output tokens, as well the @@ -780,7 +1124,28 @@ constructor( * * The maximum value for `logprobs` is 5. */ - fun logprobs(logprobs: Long) = apply { body.logprobs(logprobs) } + fun logprobs(logprobs: Long) = logprobs(logprobs as Long?) + + /** + * Include the log probabilities on the `logprobs` most likely output tokens, as well the + * chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most + * likely tokens. The API will always return the `logprob` of the sampled token, so there + * may be up to `logprobs+1` elements in the response. + * + * The maximum value for `logprobs` is 5. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun logprobs(logprobs: Optional) = logprobs(logprobs.orElse(null) as Long?) + + /** + * The maximum number of [tokens](/tokenizer) that can be generated in the completion. + * + * The token count of your prompt plus `max_tokens` cannot exceed the model's context + * length. + * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + * for counting tokens. + */ + fun maxTokens(maxTokens: Long?) = apply { body.maxTokens(maxTokens) } /** * The maximum number of [tokens](/tokenizer) that can be generated in the completion. @@ -790,7 +1155,36 @@ constructor( * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) * for counting tokens. */ - fun maxTokens(maxTokens: Long) = apply { body.maxTokens(maxTokens) } + fun maxTokens(maxTokens: Long) = maxTokens(maxTokens as Long?) + + /** + * The maximum number of [tokens](/tokenizer) that can be generated in the completion. + * + * The token count of your prompt plus `max_tokens` cannot exceed the model's context + * length. + * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) + * for counting tokens. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun maxTokens(maxTokens: Optional) = maxTokens(maxTokens.orElse(null) as Long?) + + /** + * How many completions to generate for each prompt. + * + * **Note:** Because this parameter generates many completions, it can quickly consume your + * token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` + * and `stop`. + */ + fun n(n: Long?) = apply { body.n(n) } + + /** + * How many completions to generate for each prompt. + * + * **Note:** Because this parameter generates many completions, it can quickly consume your + * token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` + * and `stop`. + */ + fun n(n: Long) = n(n as Long?) /** * How many completions to generate for each prompt. @@ -799,7 +1193,8 @@ constructor( * token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` * and `stop`. */ - fun n(n: Long) = apply { body.n(n) } + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun n(n: Optional) = n(n.orElse(null) as Long?) /** * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they @@ -808,10 +1203,48 @@ constructor( * [See more information about frequency and presence * penalties.](https://platform.openai.com/docs/guides/text-generation) */ - fun presencePenalty(presencePenalty: Double) = apply { + fun presencePenalty(presencePenalty: Double?) = apply { body.presencePenalty(presencePenalty) } + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they + * appear in the text so far, increasing the model's likelihood to talk about new topics. + * + * [See more information about frequency and presence + * penalties.](https://platform.openai.com/docs/guides/text-generation) + */ + fun presencePenalty(presencePenalty: Double) = presencePenalty(presencePenalty as Double?) + + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they + * appear in the text so far, increasing the model's likelihood to talk about new topics. + * + * [See more information about frequency and presence + * penalties.](https://platform.openai.com/docs/guides/text-generation) + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun presencePenalty(presencePenalty: Optional) = + presencePenalty(presencePenalty.orElse(null) as Double?) + + /** + * If specified, our system will make a best effort to sample deterministically, such that + * repeated requests with the same `seed` and parameters should return the same result. + * + * Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + * parameter to monitor changes in the backend. + */ + fun seed(seed: Long?) = apply { body.seed(seed) } + + /** + * If specified, our system will make a best effort to sample deterministically, such that + * repeated requests with the same `seed` and parameters should return the same result. + * + * Determinism is not guaranteed, and you should refer to the `system_fingerprint` response + * parameter to monitor changes in the backend. + */ + fun seed(seed: Long) = seed(seed as Long?) + /** * If specified, our system will make a best effort to sample deterministically, such that * repeated requests with the same `seed` and parameters should return the same result. @@ -819,29 +1252,47 @@ constructor( * Determinism is not guaranteed, and you should refer to the `system_fingerprint` response * parameter to monitor changes in the backend. */ - fun seed(seed: Long) = apply { body.seed(seed) } + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun seed(seed: Optional) = seed(seed.orElse(null) as Long?) /** * Up to 4 sequences where the API will stop generating further tokens. The returned text * will not contain the stop sequence. */ - fun stop(stop: Stop) = apply { body.stop(stop) } + fun stop(stop: Stop?) = apply { body.stop(stop) } + + /** + * Up to 4 sequences where the API will stop generating further tokens. The returned text + * will not contain the stop sequence. + */ + fun stop(stop: Optional) = stop(stop.orElse(null)) fun stop(string: String) = apply { body.stop(string) } fun stopOfStrings(strings: List) = apply { body.stopOfStrings(strings) } /** Options for streaming response. Only set this when you set `stream: true`. */ - fun streamOptions(streamOptions: ChatCompletionStreamOptions) = apply { + fun streamOptions(streamOptions: ChatCompletionStreamOptions?) = apply { body.streamOptions(streamOptions) } + /** Options for streaming response. Only set this when you set `stream: true`. */ + fun streamOptions(streamOptions: Optional) = + streamOptions(streamOptions.orElse(null)) + + /** + * The suffix that comes after a completion of inserted text. + * + * This parameter is only supported for `gpt-3.5-turbo-instruct`. + */ + fun suffix(suffix: String?) = apply { body.suffix(suffix) } + /** * The suffix that comes after a completion of inserted text. * * This parameter is only supported for `gpt-3.5-turbo-instruct`. */ - fun suffix(suffix: String) = apply { body.suffix(suffix) } + fun suffix(suffix: Optional) = suffix(suffix.orElse(null)) /** * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the @@ -850,7 +1301,27 @@ constructor( * * We generally recommend altering this or `top_p` but not both. */ - fun temperature(temperature: Double) = apply { body.temperature(temperature) } + fun temperature(temperature: Double?) = apply { body.temperature(temperature) } + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the + * output more random, while lower values like 0.2 will make it more focused and + * deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + fun temperature(temperature: Double) = temperature(temperature as Double?) + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the + * output more random, while lower values like 0.2 will make it more focused and + * deterministic. + * + * We generally recommend altering this or `top_p` but not both. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun temperature(temperature: Optional) = + temperature(temperature.orElse(null) as Double?) /** * An alternative to sampling with temperature, called nucleus sampling, where the model @@ -859,14 +1330,40 @@ constructor( * * We generally recommend altering this or `temperature` but not both. */ - fun topP(topP: Double) = apply { body.topP(topP) } + fun topP(topP: Double?) = apply { body.topP(topP) } + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the + * tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + fun topP(topP: Double) = topP(topP as Double?) + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the model + * considers the results of the tokens with top_p probability mass. So 0.1 means only the + * tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun topP(topP: Optional) = topP(topP.orElse(null) as Double?) + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and + * detect abuse. + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + */ + fun user(user: String?) = apply { body.user(user) } /** * A unique identifier representing your end-user, which can help OpenAI to monitor and * detect abuse. * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ - fun user(user: String) = apply { body.user(user) } + fun user(user: Optional) = user(user.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/EmbeddingCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/EmbeddingCreateParams.kt index 28a1d4bd5..2cdd139c8 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/EmbeddingCreateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/EmbeddingCreateParams.kt @@ -216,22 +216,50 @@ constructor( * The number of dimensions the resulting output embeddings should have. Only supported * in `text-embedding-3` and later models. */ - fun dimensions(dimensions: Long) = apply { this.dimensions = dimensions } + fun dimensions(dimensions: Long?) = apply { this.dimensions = dimensions } + + /** + * The number of dimensions the resulting output embeddings should have. Only supported + * in `text-embedding-3` and later models. + */ + fun dimensions(dimensions: Long) = dimensions(dimensions as Long?) + + /** + * The number of dimensions the resulting output embeddings should have. Only supported + * in `text-embedding-3` and later models. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun dimensions(dimensions: Optional) = + dimensions(dimensions.orElse(null) as Long?) /** * The format to return the embeddings in. Can be either `float` or * [`base64`](https://pypi.org/project/pybase64/). */ - fun encodingFormat(encodingFormat: EncodingFormat) = apply { + fun encodingFormat(encodingFormat: EncodingFormat?) = apply { this.encodingFormat = encodingFormat } + /** + * The format to return the embeddings in. Can be either `float` or + * [`base64`](https://pypi.org/project/pybase64/). + */ + fun encodingFormat(encodingFormat: Optional) = + encodingFormat(encodingFormat.orElse(null)) + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and + * detect abuse. + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + */ + fun user(user: String?) = apply { this.user = user } + /** * A unique identifier representing your end-user, which can help OpenAI to monitor and * detect abuse. * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ - fun user(user: String) = apply { this.user = user } + fun user(user: Optional) = user(user.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -350,22 +378,49 @@ constructor( * The number of dimensions the resulting output embeddings should have. Only supported in * `text-embedding-3` and later models. */ - fun dimensions(dimensions: Long) = apply { body.dimensions(dimensions) } + fun dimensions(dimensions: Long?) = apply { body.dimensions(dimensions) } + + /** + * The number of dimensions the resulting output embeddings should have. Only supported in + * `text-embedding-3` and later models. + */ + fun dimensions(dimensions: Long) = dimensions(dimensions as Long?) + + /** + * The number of dimensions the resulting output embeddings should have. Only supported in + * `text-embedding-3` and later models. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun dimensions(dimensions: Optional) = dimensions(dimensions.orElse(null) as Long?) /** * The format to return the embeddings in. Can be either `float` or * [`base64`](https://pypi.org/project/pybase64/). */ - fun encodingFormat(encodingFormat: EncodingFormat) = apply { + fun encodingFormat(encodingFormat: EncodingFormat?) = apply { body.encodingFormat(encodingFormat) } + /** + * The format to return the embeddings in. Can be either `float` or + * [`base64`](https://pypi.org/project/pybase64/). + */ + fun encodingFormat(encodingFormat: Optional) = + encodingFormat(encodingFormat.orElse(null)) + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor and + * detect abuse. + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + */ + fun user(user: String?) = apply { body.user(user) } + /** * A unique identifier representing your end-user, which can help OpenAI to monitor and * detect abuse. * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ - fun user(user: String) = apply { body.user(user) } + fun user(user: Optional) = user(user.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/FileListParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/FileListParams.kt index c65e94073..6deee87f7 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/FileListParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/FileListParams.kt @@ -94,22 +94,52 @@ constructor( * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page * of the list. */ - fun after(after: String) = apply { this.after = after } + fun after(after: String?) = apply { this.after = after } + + /** + * A cursor for use in pagination. `after` is an object ID that defines your place in the + * list. For instance, if you make a list request and receive 100 objects, ending with + * obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page + * of the list. + */ + fun after(after: Optional) = after(after.orElse(null)) + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 10,000, + * and the default is 10,000. + */ + fun limit(limit: Long?) = apply { this.limit = limit } /** * A limit on the number of objects to be returned. Limit can range between 1 and 10,000, * and the default is 10,000. */ - fun limit(limit: Long) = apply { this.limit = limit } + fun limit(limit: Long) = limit(limit as Long?) + + /** + * A limit on the number of objects to be returned. Limit can range between 1 and 10,000, + * and the default is 10,000. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun limit(limit: Optional) = limit(limit.orElse(null) as Long?) + + /** + * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and + * `desc` for descending order. + */ + fun order(order: Order?) = apply { this.order = order } /** * Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and * `desc` for descending order. */ - fun order(order: Order) = apply { this.order = order } + fun order(order: Optional) = order(order.orElse(null)) + + /** Only return files with the given purpose. */ + fun purpose(purpose: String?) = apply { this.purpose = purpose } /** Only return files with the given purpose. */ - fun purpose(purpose: String) = apply { this.purpose = purpose } + fun purpose(purpose: Optional) = purpose(purpose.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobCheckpointListParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobCheckpointListParams.kt index 7ae492736..95b555efb 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobCheckpointListParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobCheckpointListParams.kt @@ -79,10 +79,20 @@ constructor( } /** Identifier for the last checkpoint ID from the previous pagination request. */ - fun after(after: String) = apply { this.after = after } + fun after(after: String?) = apply { this.after = after } + + /** Identifier for the last checkpoint ID from the previous pagination request. */ + fun after(after: Optional) = after(after.orElse(null)) + + /** Number of checkpoints to retrieve. */ + fun limit(limit: Long?) = apply { this.limit = limit } + + /** Number of checkpoints to retrieve. */ + fun limit(limit: Long) = limit(limit as Long?) /** Number of checkpoints to retrieve. */ - fun limit(limit: Long) = apply { this.limit = limit } + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun limit(limit: Optional) = limit(limit.orElse(null) as Long?) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobCreateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobCreateParams.kt index 68ef12b5d..d620542f1 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobCreateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobCreateParams.kt @@ -278,29 +278,58 @@ constructor( * The hyperparameters used for the fine-tuning job. This value is now deprecated in * favor of `method`, and should be passed in under the `method` parameter. */ - fun hyperparameters(hyperparameters: Hyperparameters) = apply { + fun hyperparameters(hyperparameters: Hyperparameters?) = apply { this.hyperparameters = hyperparameters } + /** + * The hyperparameters used for the fine-tuning job. This value is now deprecated in + * favor of `method`, and should be passed in under the `method` parameter. + */ + fun hyperparameters(hyperparameters: Optional) = + hyperparameters(hyperparameters.orElse(null)) + /** A list of integrations to enable for your fine-tuning job. */ - fun integrations(integrations: List) = apply { - this.integrations = integrations.toMutableList() + fun integrations(integrations: List?) = apply { + this.integrations = integrations?.toMutableList() } + /** A list of integrations to enable for your fine-tuning job. */ + fun integrations(integrations: Optional>) = + integrations(integrations.orElse(null)) + /** A list of integrations to enable for your fine-tuning job. */ fun addIntegration(integration: Integration) = apply { integrations = (integrations ?: mutableListOf()).apply { add(integration) } } /** The method used for fine-tuning. */ - fun method(method: Method) = apply { this.method = method } + fun method(method: Method?) = apply { this.method = method } + + /** The method used for fine-tuning. */ + fun method(method: Optional) = method(method.orElse(null)) + + /** + * The seed controls the reproducibility of the job. Passing in the same seed and job + * parameters should produce the same results, but may differ in rare cases. If a seed + * is not specified, one will be generated for you. + */ + fun seed(seed: Long?) = apply { this.seed = seed } + + /** + * The seed controls the reproducibility of the job. Passing in the same seed and job + * parameters should produce the same results, but may differ in rare cases. If a seed + * is not specified, one will be generated for you. + */ + fun seed(seed: Long) = seed(seed as Long?) /** * The seed controls the reproducibility of the job. Passing in the same seed and job * parameters should produce the same results, but may differ in rare cases. If a seed * is not specified, one will be generated for you. */ - fun seed(seed: Long) = apply { this.seed = seed } + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun seed(seed: Optional) = seed(seed.orElse(null) as Long?) /** * A string of up to 64 characters that will be added to your fine-tuned model name. @@ -308,7 +337,15 @@ constructor( * For example, a `suffix` of "custom-model-name" would produce a model name like * `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. */ - fun suffix(suffix: String) = apply { this.suffix = suffix } + fun suffix(suffix: String?) = apply { this.suffix = suffix } + + /** + * A string of up to 64 characters that will be added to your fine-tuned model name. + * + * For example, a `suffix` of "custom-model-name" would produce a model name like + * `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + */ + fun suffix(suffix: Optional) = suffix(suffix.orElse(null)) /** * The ID of an uploaded file that contains validation data. @@ -323,10 +360,26 @@ constructor( * See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for * more details. */ - fun validationFile(validationFile: String) = apply { + fun validationFile(validationFile: String?) = apply { this.validationFile = validationFile } + /** + * The ID of an uploaded file that contains validation data. + * + * If you provide this file, the data is used to generate validation metrics + * periodically during fine-tuning. These metrics can be viewed in the fine-tuning + * results file. The same data should not be present in both train and validation files. + * + * Your dataset must be formatted as a JSONL file. You must upload your file with the + * purpose `fine-tune`. + * + * See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for + * more details. + */ + fun validationFile(validationFile: Optional) = + validationFile(validationFile.orElse(null)) + fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() putAllAdditionalProperties(additionalProperties) @@ -436,27 +489,64 @@ constructor( * The hyperparameters used for the fine-tuning job. This value is now deprecated in favor * of `method`, and should be passed in under the `method` parameter. */ - fun hyperparameters(hyperparameters: Hyperparameters) = apply { + fun hyperparameters(hyperparameters: Hyperparameters?) = apply { body.hyperparameters(hyperparameters) } + /** + * The hyperparameters used for the fine-tuning job. This value is now deprecated in favor + * of `method`, and should be passed in under the `method` parameter. + */ + fun hyperparameters(hyperparameters: Optional) = + hyperparameters(hyperparameters.orElse(null)) + /** A list of integrations to enable for your fine-tuning job. */ - fun integrations(integrations: List) = apply { + fun integrations(integrations: List?) = apply { body.integrations(integrations) } + /** A list of integrations to enable for your fine-tuning job. */ + fun integrations(integrations: Optional>) = + integrations(integrations.orElse(null)) + /** A list of integrations to enable for your fine-tuning job. */ fun addIntegration(integration: Integration) = apply { body.addIntegration(integration) } /** The method used for fine-tuning. */ - fun method(method: Method) = apply { body.method(method) } + fun method(method: Method?) = apply { body.method(method) } + + /** The method used for fine-tuning. */ + fun method(method: Optional) = method(method.orElse(null)) + + /** + * The seed controls the reproducibility of the job. Passing in the same seed and job + * parameters should produce the same results, but may differ in rare cases. If a seed is + * not specified, one will be generated for you. + */ + fun seed(seed: Long?) = apply { body.seed(seed) } + + /** + * The seed controls the reproducibility of the job. Passing in the same seed and job + * parameters should produce the same results, but may differ in rare cases. If a seed is + * not specified, one will be generated for you. + */ + fun seed(seed: Long) = seed(seed as Long?) /** * The seed controls the reproducibility of the job. Passing in the same seed and job * parameters should produce the same results, but may differ in rare cases. If a seed is * not specified, one will be generated for you. */ - fun seed(seed: Long) = apply { body.seed(seed) } + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun seed(seed: Optional) = seed(seed.orElse(null) as Long?) + + /** + * A string of up to 64 characters that will be added to your fine-tuned model name. + * + * For example, a `suffix` of "custom-model-name" would produce a model name like + * `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. + */ + fun suffix(suffix: String?) = apply { body.suffix(suffix) } /** * A string of up to 64 characters that will be added to your fine-tuned model name. @@ -464,7 +554,22 @@ constructor( * For example, a `suffix` of "custom-model-name" would produce a model name like * `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`. */ - fun suffix(suffix: String) = apply { body.suffix(suffix) } + fun suffix(suffix: Optional) = suffix(suffix.orElse(null)) + + /** + * The ID of an uploaded file that contains validation data. + * + * If you provide this file, the data is used to generate validation metrics periodically + * during fine-tuning. These metrics can be viewed in the fine-tuning results file. The same + * data should not be present in both train and validation files. + * + * Your dataset must be formatted as a JSONL file. You must upload your file with the + * purpose `fine-tune`. + * + * See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more + * details. + */ + fun validationFile(validationFile: String?) = apply { body.validationFile(validationFile) } /** * The ID of an uploaded file that contains validation data. @@ -479,7 +584,8 @@ constructor( * See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more * details. */ - fun validationFile(validationFile: String) = apply { body.validationFile(validationFile) } + fun validationFile(validationFile: Optional) = + validationFile(validationFile.orElse(null)) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() @@ -742,7 +848,13 @@ constructor( * Number of examples in each batch. A larger batch size means that model parameters are * updated less frequently, but with lower variance. */ - fun batchSize(batchSize: BatchSize) = apply { this.batchSize = batchSize } + fun batchSize(batchSize: BatchSize?) = apply { this.batchSize = batchSize } + + /** + * Number of examples in each batch. A larger batch size means that model parameters are + * updated less frequently, but with lower variance. + */ + fun batchSize(batchSize: Optional) = batchSize(batchSize.orElse(null)) fun batchSize(behavior: BatchSize.Behavior) = apply { this.batchSize = BatchSize.ofBehavior(behavior) @@ -754,10 +866,17 @@ constructor( * Scaling factor for the learning rate. A smaller learning rate may be useful to avoid * overfitting. */ - fun learningRateMultiplier(learningRateMultiplier: LearningRateMultiplier) = apply { + fun learningRateMultiplier(learningRateMultiplier: LearningRateMultiplier?) = apply { this.learningRateMultiplier = learningRateMultiplier } + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful to avoid + * overfitting. + */ + fun learningRateMultiplier(learningRateMultiplier: Optional) = + learningRateMultiplier(learningRateMultiplier.orElse(null)) + fun learningRateMultiplier(behavior: LearningRateMultiplier.Behavior) = apply { this.learningRateMultiplier = LearningRateMultiplier.ofBehavior(behavior) } @@ -770,7 +889,13 @@ constructor( * The number of epochs to train the model for. An epoch refers to one full cycle * through the training dataset. */ - fun nEpochs(nEpochs: NEpochs) = apply { this.nEpochs = nEpochs } + fun nEpochs(nEpochs: NEpochs?) = apply { this.nEpochs = nEpochs } + + /** + * The number of epochs to train the model for. An epoch refers to one full cycle + * through the training dataset. + */ + fun nEpochs(nEpochs: Optional) = nEpochs(nEpochs.orElse(null)) fun nEpochs(behavior: NEpochs.Behavior) = apply { this.nEpochs = NEpochs.ofBehavior(behavior) @@ -1508,20 +1633,40 @@ constructor( * WandB user that you would like associated with the run. If not set, the default * entity for the registered WandB API key is used. */ - fun entity(entity: String) = apply { this.entity = entity } + fun entity(entity: String?) = apply { this.entity = entity } + + /** + * The entity to use for the run. This allows you to set the team or username of the + * WandB user that you would like associated with the run. If not set, the default + * entity for the registered WandB API key is used. + */ + fun entity(entity: Optional) = entity(entity.orElse(null)) + + /** + * A display name to set for the run. If not set, we will use the Job ID as the + * name. + */ + fun name(name: String?) = apply { this.name = name } /** * A display name to set for the run. If not set, we will use the Job ID as the * name. */ - fun name(name: String) = apply { this.name = name } + fun name(name: Optional) = name(name.orElse(null)) + + /** + * A list of tags to be attached to the newly created run. These tags are passed + * through directly to WandB. Some default tags are generated by OpenAI: + * "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + */ + fun tags(tags: List?) = apply { this.tags = tags?.toMutableList() } /** * A list of tags to be attached to the newly created run. These tags are passed * through directly to WandB. Some default tags are generated by OpenAI: * "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". */ - fun tags(tags: List) = apply { this.tags = tags.toMutableList() } + fun tags(tags: Optional>) = tags(tags.orElse(null)) /** * A list of tags to be attached to the newly created run. These tags are passed @@ -1649,13 +1794,22 @@ constructor( } /** Configuration for the DPO fine-tuning method. */ - fun dpo(dpo: Dpo) = apply { this.dpo = dpo } + fun dpo(dpo: Dpo?) = apply { this.dpo = dpo } + + /** Configuration for the DPO fine-tuning method. */ + fun dpo(dpo: Optional) = dpo(dpo.orElse(null)) + + /** Configuration for the supervised fine-tuning method. */ + fun supervised(supervised: Supervised?) = apply { this.supervised = supervised } /** Configuration for the supervised fine-tuning method. */ - fun supervised(supervised: Supervised) = apply { this.supervised = supervised } + fun supervised(supervised: Optional) = supervised(supervised.orElse(null)) /** The type of method. Is either `supervised` or `dpo`. */ - fun type(type: Type) = apply { this.type = type } + fun type(type: Type?) = apply { this.type = type } + + /** The type of method. Is either `supervised` or `dpo`. */ + fun type(type: Optional) = type(type.orElse(null)) fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() @@ -1722,10 +1876,14 @@ constructor( } /** The hyperparameters used for the fine-tuning job. */ - fun hyperparameters(hyperparameters: Hyperparameters) = apply { + fun hyperparameters(hyperparameters: Hyperparameters?) = apply { this.hyperparameters = hyperparameters } + /** The hyperparameters used for the fine-tuning job. */ + fun hyperparameters(hyperparameters: Optional) = + hyperparameters(hyperparameters.orElse(null)) + fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() putAllAdditionalProperties(additionalProperties) @@ -1825,7 +1983,14 @@ constructor( * Number of examples in each batch. A larger batch size means that model * parameters are updated less frequently, but with lower variance. */ - fun batchSize(batchSize: BatchSize) = apply { this.batchSize = batchSize } + fun batchSize(batchSize: BatchSize?) = apply { this.batchSize = batchSize } + + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + fun batchSize(batchSize: Optional) = + batchSize(batchSize.orElse(null)) fun batchSize(auto: BatchSize.Auto) = apply { this.batchSize = BatchSize.ofAuto(auto) @@ -1839,7 +2004,13 @@ constructor( * The beta value for the DPO method. A higher beta value will increase the * weight of the penalty between the policy and reference model. */ - fun beta(beta: Beta) = apply { this.beta = beta } + fun beta(beta: Beta?) = apply { this.beta = beta } + + /** + * The beta value for the DPO method. A higher beta value will increase the + * weight of the penalty between the policy and reference model. + */ + fun beta(beta: Optional) = beta(beta.orElse(null)) fun beta(auto: Beta.Auto) = apply { this.beta = Beta.ofAuto(auto) } @@ -1849,11 +2020,19 @@ constructor( * Scaling factor for the learning rate. A smaller learning rate may be useful * to avoid overfitting. */ - fun learningRateMultiplier(learningRateMultiplier: LearningRateMultiplier) = + fun learningRateMultiplier(learningRateMultiplier: LearningRateMultiplier?) = apply { this.learningRateMultiplier = learningRateMultiplier } + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful + * to avoid overfitting. + */ + fun learningRateMultiplier( + learningRateMultiplier: Optional + ) = learningRateMultiplier(learningRateMultiplier.orElse(null)) + fun learningRateMultiplier(auto: LearningRateMultiplier.Auto) = apply { this.learningRateMultiplier = LearningRateMultiplier.ofAuto(auto) } @@ -1866,7 +2045,13 @@ constructor( * The number of epochs to train the model for. An epoch refers to one full * cycle through the training dataset. */ - fun nEpochs(nEpochs: NEpochs) = apply { this.nEpochs = nEpochs } + fun nEpochs(nEpochs: NEpochs?) = apply { this.nEpochs = nEpochs } + + /** + * The number of epochs to train the model for. An epoch refers to one full + * cycle through the training dataset. + */ + fun nEpochs(nEpochs: Optional) = nEpochs(nEpochs.orElse(null)) fun nEpochs(auto: NEpochs.Auto) = apply { this.nEpochs = NEpochs.ofAuto(auto) } @@ -2609,10 +2794,14 @@ constructor( } /** The hyperparameters used for the fine-tuning job. */ - fun hyperparameters(hyperparameters: Hyperparameters) = apply { + fun hyperparameters(hyperparameters: Hyperparameters?) = apply { this.hyperparameters = hyperparameters } + /** The hyperparameters used for the fine-tuning job. */ + fun hyperparameters(hyperparameters: Optional) = + hyperparameters(hyperparameters.orElse(null)) + fun additionalProperties(additionalProperties: Map) = apply { this.additionalProperties.clear() putAllAdditionalProperties(additionalProperties) @@ -2704,7 +2893,14 @@ constructor( * Number of examples in each batch. A larger batch size means that model * parameters are updated less frequently, but with lower variance. */ - fun batchSize(batchSize: BatchSize) = apply { this.batchSize = batchSize } + fun batchSize(batchSize: BatchSize?) = apply { this.batchSize = batchSize } + + /** + * Number of examples in each batch. A larger batch size means that model + * parameters are updated less frequently, but with lower variance. + */ + fun batchSize(batchSize: Optional) = + batchSize(batchSize.orElse(null)) fun batchSize(auto: BatchSize.Auto) = apply { this.batchSize = BatchSize.ofAuto(auto) @@ -2718,11 +2914,19 @@ constructor( * Scaling factor for the learning rate. A smaller learning rate may be useful * to avoid overfitting. */ - fun learningRateMultiplier(learningRateMultiplier: LearningRateMultiplier) = + fun learningRateMultiplier(learningRateMultiplier: LearningRateMultiplier?) = apply { this.learningRateMultiplier = learningRateMultiplier } + /** + * Scaling factor for the learning rate. A smaller learning rate may be useful + * to avoid overfitting. + */ + fun learningRateMultiplier( + learningRateMultiplier: Optional + ) = learningRateMultiplier(learningRateMultiplier.orElse(null)) + fun learningRateMultiplier(auto: LearningRateMultiplier.Auto) = apply { this.learningRateMultiplier = LearningRateMultiplier.ofAuto(auto) } @@ -2735,7 +2939,13 @@ constructor( * The number of epochs to train the model for. An epoch refers to one full * cycle through the training dataset. */ - fun nEpochs(nEpochs: NEpochs) = apply { this.nEpochs = nEpochs } + fun nEpochs(nEpochs: NEpochs?) = apply { this.nEpochs = nEpochs } + + /** + * The number of epochs to train the model for. An epoch refers to one full + * cycle through the training dataset. + */ + fun nEpochs(nEpochs: Optional) = nEpochs(nEpochs.orElse(null)) fun nEpochs(auto: NEpochs.Auto) = apply { this.nEpochs = NEpochs.ofAuto(auto) } diff --git a/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobListEventsParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobListEventsParams.kt index fa6115f28..89d79e0a4 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobListEventsParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobListEventsParams.kt @@ -77,10 +77,20 @@ constructor( } /** Identifier for the last event from the previous pagination request. */ - fun after(after: String) = apply { this.after = after } + fun after(after: String?) = apply { this.after = after } + + /** Identifier for the last event from the previous pagination request. */ + fun after(after: Optional) = after(after.orElse(null)) + + /** Number of events to retrieve. */ + fun limit(limit: Long?) = apply { this.limit = limit } + + /** Number of events to retrieve. */ + fun limit(limit: Long) = limit(limit as Long?) /** Number of events to retrieve. */ - fun limit(limit: Long) = apply { this.limit = limit } + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun limit(limit: Optional) = limit(limit.orElse(null) as Long?) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobListParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobListParams.kt index ad39df8eb..37439863d 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobListParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/FineTuningJobListParams.kt @@ -61,10 +61,20 @@ constructor( } /** Identifier for the last job from the previous pagination request. */ - fun after(after: String) = apply { this.after = after } + fun after(after: String?) = apply { this.after = after } + + /** Identifier for the last job from the previous pagination request. */ + fun after(after: Optional) = after(after.orElse(null)) + + /** Number of fine-tuning jobs to retrieve. */ + fun limit(limit: Long?) = apply { this.limit = limit } + + /** Number of fine-tuning jobs to retrieve. */ + fun limit(limit: Long) = limit(limit as Long?) /** Number of fine-tuning jobs to retrieve. */ - fun limit(limit: Long) = apply { this.limit = limit } + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun limit(limit: Optional) = limit(limit.orElse(null) as Long?) fun additionalHeaders(additionalHeaders: Headers) = apply { this.additionalHeaders.clear() diff --git a/openai-java-core/src/main/kotlin/com/openai/models/ImageGenerateParams.kt b/openai-java-core/src/main/kotlin/com/openai/models/ImageGenerateParams.kt index a3d22fcf7..425554787 100644 --- a/openai-java-core/src/main/kotlin/com/openai/models/ImageGenerateParams.kt +++ b/openai-java-core/src/main/kotlin/com/openai/models/ImageGenerateParams.kt @@ -195,7 +195,10 @@ constructor( fun prompt(prompt: String) = apply { this.prompt = prompt } /** The model to use for image generation. */ - fun model(model: ImageModel) = apply { this.model = model } + fun model(model: ImageModel?) = apply { this.model = model } + + /** The model to use for image generation. */ + fun model(model: Optional) = model(model.orElse(null)) /** The model to use for image generation. */ fun model(value: String) = apply { model = ImageModel.of(value) } @@ -204,29 +207,63 @@ constructor( * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only * `n=1` is supported. */ - fun n(n: Long) = apply { this.n = n } + fun n(n: Long?) = apply { this.n = n } + + /** + * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + * `n=1` is supported. + */ + fun n(n: Long) = n(n as Long?) + + /** + * The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + * `n=1` is supported. + */ + @Suppress("USELESS_CAST") // See https://youtrack.jetbrains.com/issue/KT-74228 + fun n(n: Optional) = n(n.orElse(null) as Long?) + + /** + * The quality of the image that will be generated. `hd` creates images with finer + * details and greater consistency across the image. This param is only supported for + * `dall-e-3`. + */ + fun quality(quality: Quality?) = apply { this.quality = quality } /** * The quality of the image that will be generated. `hd` creates images with finer * details and greater consistency across the image. This param is only supported for * `dall-e-3`. */ - fun quality(quality: Quality) = apply { this.quality = quality } + fun quality(quality: Optional) = quality(quality.orElse(null)) /** * The format in which the generated images are returned. Must be one of `url` or * `b64_json`. URLs are only valid for 60 minutes after the image has been generated. */ - fun responseFormat(responseFormat: ResponseFormat) = apply { + fun responseFormat(responseFormat: ResponseFormat?) = apply { this.responseFormat = responseFormat } + /** + * The format in which the generated images are returned. Must be one of `url` or + * `b64_json`. URLs are only valid for 60 minutes after the image has been generated. + */ + fun responseFormat(responseFormat: Optional) = + responseFormat(responseFormat.orElse(null)) + /** * The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` * for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for * `dall-e-3` models. */ - fun size(size: Size) = apply { this.size = size } + fun size(size: Size?) = apply { this.size = size } + + /** + * The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` + * for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for + * `dall-e-3` models. + */ + fun size(size: Optional) = size(size.orElse(null)) /** * The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes @@ -234,14 +271,29 @@ constructor( * the model to produce more natural, less hyper-real looking images. This param is only * supported for `dall-e-3`. */ - fun style(style: Style) = apply { this.style = style } + fun style(style: Style?) = apply { this.style = style } + + /** + * The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes + * the model to lean towards generating hyper-real and dramatic images. Natural causes + * the model to produce more natural, less hyper-real looking images. This param is only + * supported for `dall-e-3`. + */ + fun style(style: Optional