diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 6c3b02fed..b295c3f54 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.70.3"
+ ".": "4.71.0"
}
diff --git a/.stats.yml b/.stats.yml
index 39413df44..f368bc881 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
configured_endpoints: 68
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-7b0a5d715d94f75ac7795bd4d2175a0e3243af9b935a86c273f371e45583140f.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-2f8ca92b9b1879fd535b685e4767338413fcd533d42f3baac13a9c41da3fce35.yml
diff --git a/CHANGELOG.md b/CHANGELOG.md
index abe273b81..bb769c53e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,13 @@
# Changelog
+## 4.71.0 (2024-11-04)
+
+Full Changelog: [v4.70.3...v4.71.0](https://github.com/openai/openai-node/compare/v4.70.3...v4.71.0)
+
+### Features
+
+* **api:** add support for predicted outputs ([#1172](https://github.com/openai/openai-node/issues/1172)) ([08a7bb4](https://github.com/openai/openai-node/commit/08a7bb4d4b751aeed9655bfcb9fa27fc79a767c4))
+
## 4.70.3 (2024-11-04)
Full Changelog: [v4.70.2...v4.70.3](https://github.com/openai/openai-node/compare/v4.70.2...v4.70.3)
diff --git a/api.md b/api.md
index da60f65bd..465730de8 100644
--- a/api.md
+++ b/api.md
@@ -48,6 +48,7 @@ Types:
- ChatCompletionMessageToolCall
- ChatCompletionModality
- ChatCompletionNamedToolChoice
+- ChatCompletionPredictionContent
- ChatCompletionRole
- ChatCompletionStreamOptions
- ChatCompletionSystemMessageParam
diff --git a/package.json b/package.json
index e9d130380..501d4f31e 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "4.70.3",
+ "version": "4.71.0",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/src/index.ts b/src/index.ts
index 33b0848e4..c3299e00d 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -87,6 +87,7 @@ import {
ChatCompletionMessageToolCall,
ChatCompletionModality,
ChatCompletionNamedToolChoice,
+ ChatCompletionPredictionContent,
ChatCompletionRole,
ChatCompletionStreamOptions,
ChatCompletionSystemMessageParam,
@@ -379,6 +380,7 @@ export declare namespace OpenAI {
type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall,
type ChatCompletionModality as ChatCompletionModality,
type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice,
+ type ChatCompletionPredictionContent as ChatCompletionPredictionContent,
type ChatCompletionRole as ChatCompletionRole,
type ChatCompletionStreamOptions as ChatCompletionStreamOptions,
type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam,
diff --git a/src/resources/audio/speech.ts b/src/resources/audio/speech.ts
index da99bf649..1cda80f79 100644
--- a/src/resources/audio/speech.ts
+++ b/src/resources/audio/speech.ts
@@ -22,7 +22,7 @@ export interface SpeechCreateParams {
input: string;
/**
- * One of the available [TTS models](https://platform.openai.com/docs/models/tts):
+ * One of the available [TTS models](https://platform.openai.com/docs/models#tts):
* `tts-1` or `tts-1-hd`
*/
model: (string & {}) | SpeechModel;
@@ -31,7 +31,7 @@ export interface SpeechCreateParams {
* The voice to use when generating the audio. Supported voices are `alloy`,
* `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are
* available in the
- * [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options).
+ * [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
*/
voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';
diff --git a/src/resources/audio/transcriptions.ts b/src/resources/audio/transcriptions.ts
index dd4258787..0b6da4620 100644
--- a/src/resources/audio/transcriptions.ts
+++ b/src/resources/audio/transcriptions.ts
@@ -174,7 +174,7 @@ export interface TranscriptionCreateParams<
/**
* An optional text to guide the model's style or continue a previous audio
* segment. The
- * [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting)
+ * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
* should match the audio language.
*/
prompt?: string;
diff --git a/src/resources/audio/translations.ts b/src/resources/audio/translations.ts
index b98a95044..c6bf7c870 100644
--- a/src/resources/audio/translations.ts
+++ b/src/resources/audio/translations.ts
@@ -76,7 +76,7 @@ export interface TranslationCreateParams<
/**
* An optional text to guide the model's style or continue a previous audio
* segment. The
- * [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting)
+ * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
* should be in English.
*/
prompt?: string;
diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts
index 6d48089ce..0e657b1d4 100644
--- a/src/resources/beta/assistants.ts
+++ b/src/resources/beta/assistants.ts
@@ -121,8 +121,8 @@ export interface Assistant {
* ID of the model to use. You can use the
* [List models](https://platform.openai.com/docs/api-reference/models/list) API to
* see all of your available models, or see our
- * [Model overview](https://platform.openai.com/docs/models/overview) for
- * descriptions of them.
+ * [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ * them.
*/
model: string;
@@ -145,8 +145,8 @@ export interface Assistant {
/**
* Specifies the format that the model must output. Compatible with
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -620,7 +620,7 @@ export namespace AssistantStreamEvent {
/**
* Occurs when an
- * [error](https://platform.openai.com/docs/guides/error-codes/api-errors) occurs.
+ * [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs.
* This can happen due to an internal server error or a timeout.
*/
export interface ErrorEvent {
@@ -663,7 +663,7 @@ export namespace FileSearchTool {
*
* Note that the file search tool may output fewer than `max_num_results` results.
* See the
- * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
* for more information.
*/
max_num_results?: number;
@@ -673,7 +673,7 @@ export namespace FileSearchTool {
* will use the `auto` ranker and a score_threshold of 0.
*
* See the
- * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
* for more information.
*/
ranking_options?: FileSearch.RankingOptions;
@@ -685,7 +685,7 @@ export namespace FileSearchTool {
* will use the `auto` ranker and a score_threshold of 0.
*
* See the
- * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
* for more information.
*/
export interface RankingOptions {
@@ -1100,8 +1100,8 @@ export interface AssistantCreateParams {
* ID of the model to use. You can use the
* [List models](https://platform.openai.com/docs/api-reference/models/list) API to
* see all of your available models, or see our
- * [Model overview](https://platform.openai.com/docs/models/overview) for
- * descriptions of them.
+ * [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ * them.
*/
model: (string & {}) | ChatAPI.ChatModel;
@@ -1131,8 +1131,8 @@ export interface AssistantCreateParams {
/**
* Specifies the format that the model must output. Compatible with
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -1277,8 +1277,8 @@ export interface AssistantUpdateParams {
* ID of the model to use. You can use the
* [List models](https://platform.openai.com/docs/api-reference/models/list) API to
* see all of your available models, or see our
- * [Model overview](https://platform.openai.com/docs/models/overview) for
- * descriptions of them.
+ * [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ * them.
*/
model?: string;
@@ -1289,8 +1289,8 @@ export interface AssistantUpdateParams {
/**
* Specifies the format that the model must output. Compatible with
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -1383,8 +1383,8 @@ export interface AssistantListParams extends CursorPageParams {
/**
* A cursor for use in pagination. `before` is an object ID that defines your place
* in the list. For instance, if you make a list request and receive 100 objects,
- * ending with obj_foo, your subsequent call can include before=obj_foo in order to
- * fetch the previous page of the list.
+ * starting with obj_foo, your subsequent call can include before=obj_foo in order
+ * to fetch the previous page of the list.
*/
before?: string;
diff --git a/src/resources/beta/threads/messages.ts b/src/resources/beta/threads/messages.ts
index af7977667..8124f56cd 100644
--- a/src/resources/beta/threads/messages.ts
+++ b/src/resources/beta/threads/messages.ts
@@ -704,8 +704,8 @@ export interface MessageListParams extends CursorPageParams {
/**
* A cursor for use in pagination. `before` is an object ID that defines your place
* in the list. For instance, if you make a list request and receive 100 objects,
- * ending with obj_foo, your subsequent call can include before=obj_foo in order to
- * fetch the previous page of the list.
+ * starting with obj_foo, your subsequent call can include before=obj_foo in order
+ * to fetch the previous page of the list.
*/
before?: string;
diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts
index 83a447a91..814ad3e89 100644
--- a/src/resources/beta/threads/runs/runs.ts
+++ b/src/resources/beta/threads/runs/runs.ts
@@ -435,7 +435,7 @@ export interface Run {
/**
* Whether to enable
- * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
* during tool use.
*/
parallel_tool_calls: boolean;
@@ -448,8 +448,8 @@ export interface Run {
/**
* Specifies the format that the model must output. Compatible with
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -660,7 +660,7 @@ export interface RunCreateParamsBase {
* search result content.
*
* See the
- * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
* for more information.
*/
include?: Array;
@@ -721,15 +721,15 @@ export interface RunCreateParamsBase {
/**
* Body param: Whether to enable
- * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
* during tool use.
*/
parallel_tool_calls?: boolean;
/**
* Body param: Specifies the format that the model must output. Compatible with
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -909,8 +909,8 @@ export interface RunListParams extends CursorPageParams {
/**
* A cursor for use in pagination. `before` is an object ID that defines your place
* in the list. For instance, if you make a list request and receive 100 objects,
- * ending with obj_foo, your subsequent call can include before=obj_foo in order to
- * fetch the previous page of the list.
+ * starting with obj_foo, your subsequent call can include before=obj_foo in order
+ * to fetch the previous page of the list.
*/
before?: string;
diff --git a/src/resources/beta/threads/runs/steps.ts b/src/resources/beta/threads/runs/steps.ts
index b10bcb868..6c6722b62 100644
--- a/src/resources/beta/threads/runs/steps.ts
+++ b/src/resources/beta/threads/runs/steps.ts
@@ -705,7 +705,7 @@ export interface StepRetrieveParams {
* to fetch the file search result content.
*
* See the
- * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
* for more information.
*/
include?: Array;
@@ -715,8 +715,8 @@ export interface StepListParams extends CursorPageParams {
/**
* A cursor for use in pagination. `before` is an object ID that defines your place
* in the list. For instance, if you make a list request and receive 100 objects,
- * ending with obj_foo, your subsequent call can include before=obj_foo in order to
- * fetch the previous page of the list.
+ * starting with obj_foo, your subsequent call can include before=obj_foo in order
+ * to fetch the previous page of the list.
*/
before?: string;
@@ -726,7 +726,7 @@ export interface StepListParams extends CursorPageParams {
* to fetch the file search result content.
*
* See the
- * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search/customizing-file-search-settings)
+ * [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
* for more information.
*/
include?: Array;
diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts
index 899645508..453d8fa10 100644
--- a/src/resources/beta/threads/threads.ts
+++ b/src/resources/beta/threads/threads.ts
@@ -176,8 +176,8 @@ export class Threads extends APIResource {
/**
* Specifies the format that the model must output. Compatible with
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -565,15 +565,15 @@ export interface ThreadCreateAndRunParamsBase {
/**
* Whether to enable
- * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
* during tool use.
*/
parallel_tool_calls?: boolean;
/**
* Specifies the format that the model must output. Compatible with
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
* and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/beta/vector-stores/file-batches.ts
index 533e6ce03..2c47cb9c2 100644
--- a/src/resources/beta/vector-stores/file-batches.ts
+++ b/src/resources/beta/vector-stores/file-batches.ts
@@ -276,8 +276,8 @@ export interface FileBatchListFilesParams extends CursorPageParams {
/**
* A cursor for use in pagination. `before` is an object ID that defines your place
* in the list. For instance, if you make a list request and receive 100 objects,
- * ending with obj_foo, your subsequent call can include before=obj_foo in order to
- * fetch the previous page of the list.
+ * starting with obj_foo, your subsequent call can include before=obj_foo in order
+ * to fetch the previous page of the list.
*/
before?: string;
diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/beta/vector-stores/files.ts
index a263a0491..1fda9a99b 100644
--- a/src/resources/beta/vector-stores/files.ts
+++ b/src/resources/beta/vector-stores/files.ts
@@ -268,8 +268,8 @@ export interface FileListParams extends CursorPageParams {
/**
* A cursor for use in pagination. `before` is an object ID that defines your place
* in the list. For instance, if you make a list request and receive 100 objects,
- * ending with obj_foo, your subsequent call can include before=obj_foo in order to
- * fetch the previous page of the list.
+ * starting with obj_foo, your subsequent call can include before=obj_foo in order
+ * to fetch the previous page of the list.
*/
before?: string;
diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/beta/vector-stores/vector-stores.ts
index 4d1e83dce..35ad8c369 100644
--- a/src/resources/beta/vector-stores/vector-stores.ts
+++ b/src/resources/beta/vector-stores/vector-stores.ts
@@ -372,8 +372,8 @@ export interface VectorStoreListParams extends CursorPageParams {
/**
* A cursor for use in pagination. `before` is an object ID that defines your place
* in the list. For instance, if you make a list request and receive 100 objects,
- * ending with obj_foo, your subsequent call can include before=obj_foo in order to
- * fetch the previous page of the list.
+ * starting with obj_foo, your subsequent call can include before=obj_foo in order
+ * to fetch the previous page of the list.
*/
before?: string;
diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts
index afe4dd08e..351430f8c 100644
--- a/src/resources/chat/chat.ts
+++ b/src/resources/chat/chat.ts
@@ -23,6 +23,7 @@ import {
ChatCompletionMessageToolCall,
ChatCompletionModality,
ChatCompletionNamedToolChoice,
+ ChatCompletionPredictionContent,
ChatCompletionRole,
ChatCompletionStreamOptions,
ChatCompletionSystemMessageParam,
@@ -101,6 +102,7 @@ export declare namespace Chat {
type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall,
type ChatCompletionModality as ChatCompletionModality,
type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice,
+ type ChatCompletionPredictionContent as ChatCompletionPredictionContent,
type ChatCompletionRole as ChatCompletionRole,
type ChatCompletionStreamOptions as ChatCompletionStreamOptions,
type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam,
diff --git a/src/resources/chat/completions.ts b/src/resources/chat/completions.ts
index 430e52bb2..9d344744a 100644
--- a/src/resources/chat/completions.ts
+++ b/src/resources/chat/completions.ts
@@ -469,7 +469,7 @@ export namespace ChatCompletionContentPartImage {
/**
* Specifies the detail level of the image. Learn more in the
- * [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding).
+ * [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
*/
detail?: 'auto' | 'low' | 'high';
}
@@ -687,6 +687,25 @@ export namespace ChatCompletionNamedToolChoice {
}
}
+/**
+ * Static predicted output content, such as the content of a text file that is
+ * being regenerated.
+ */
+export interface ChatCompletionPredictionContent {
+ /**
+ * The content that should be matched when generating a model response. If
+ * generated tokens would match this content, the entire model response can be
+ * returned much more quickly.
+ */
+ content: string | Array;
+
+ /**
+ * The type of the predicted content you want to provide. This type is currently
+ * always `content`.
+ */
+ type: 'content';
+}
+
/**
* The role of the author of a message
*/
@@ -855,7 +874,7 @@ export interface ChatCompletionCreateParamsBase {
/**
* ID of the model to use. See the
- * [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
+ * [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility)
* table for details on which models work with the Chat API.
*/
model: (string & {}) | ChatAPI.ChatModel;
@@ -872,7 +891,7 @@ export interface ChatCompletionCreateParamsBase {
* existing frequency in the text so far, decreasing the model's likelihood to
* repeat the same line verbatim.
*
- * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
*/
frequency_penalty?: number | null;
@@ -963,25 +982,31 @@ export interface ChatCompletionCreateParamsBase {
/**
* Whether to enable
- * [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
+ * [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
* during tool use.
*/
parallel_tool_calls?: boolean;
+ /**
+ * Static predicted output content, such as the content of a text file that is
+ * being regenerated.
+ */
+ prediction?: ChatCompletionPredictionContent | null;
+
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on
* whether they appear in the text so far, increasing the model's likelihood to
* talk about new topics.
*
- * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
*/
presence_penalty?: number | null;
/**
* An object specifying the format that the model must output. Compatible with
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
- * [GPT-4o mini](https://platform.openai.com/docs/models/gpt-4o-mini),
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
+ * [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
+ * [GPT-4o mini](https://platform.openai.com/docs/models#gpt-4o-mini),
+ * [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4) and
* all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
*
* Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
@@ -1107,7 +1132,7 @@ export interface ChatCompletionCreateParamsBase {
/**
* A unique identifier representing your end-user, which can help OpenAI to monitor
* and detect abuse.
- * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
*/
user?: string;
}
@@ -1204,6 +1229,7 @@ export declare namespace Completions {
type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall,
type ChatCompletionModality as ChatCompletionModality,
type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice,
+ type ChatCompletionPredictionContent as ChatCompletionPredictionContent,
type ChatCompletionRole as ChatCompletionRole,
type ChatCompletionStreamOptions as ChatCompletionStreamOptions,
type ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam,
diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts
index d9366bf74..262bf75a2 100644
--- a/src/resources/chat/index.ts
+++ b/src/resources/chat/index.ts
@@ -20,6 +20,7 @@ export {
type ChatCompletionMessageToolCall,
type ChatCompletionModality,
type ChatCompletionNamedToolChoice,
+ type ChatCompletionPredictionContent,
type ChatCompletionRole,
type ChatCompletionStreamOptions,
type ChatCompletionSystemMessageParam,
diff --git a/src/resources/completions.ts b/src/resources/completions.ts
index 94c4581a1..be75a46f0 100644
--- a/src/resources/completions.ts
+++ b/src/resources/completions.ts
@@ -137,6 +137,12 @@ export namespace CompletionUsage {
* Breakdown of tokens used in a completion.
*/
export interface CompletionTokensDetails {
+ /**
+ * When using Predicted Outputs, the number of tokens in the prediction that
+ * appeared in the completion.
+ */
+ accepted_prediction_tokens?: number;
+
/**
* Audio input tokens generated by the model.
*/
@@ -146,6 +152,14 @@ export namespace CompletionUsage {
* Tokens generated by the model for reasoning.
*/
reasoning_tokens?: number;
+
+ /**
+ * When using Predicted Outputs, the number of tokens in the prediction that did
+ * not appear in the completion. However, like reasoning tokens, these tokens are
+ * still counted in the total completion tokens for purposes of billing, output,
+ * and context window limits.
+ */
+ rejected_prediction_tokens?: number;
}
/**
@@ -171,8 +185,8 @@ export interface CompletionCreateParamsBase {
* ID of the model to use. You can use the
* [List models](https://platform.openai.com/docs/api-reference/models/list) API to
* see all of your available models, or see our
- * [Model overview](https://platform.openai.com/docs/models/overview) for
- * descriptions of them.
+ * [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ * them.
*/
model: (string & {}) | 'gpt-3.5-turbo-instruct' | 'davinci-002' | 'babbage-002';
@@ -209,7 +223,7 @@ export interface CompletionCreateParamsBase {
* existing frequency in the text so far, decreasing the model's likelihood to
* repeat the same line verbatim.
*
- * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
*/
frequency_penalty?: number | null;
@@ -264,7 +278,7 @@ export interface CompletionCreateParamsBase {
* whether they appear in the text so far, increasing the model's likelihood to
* talk about new topics.
*
- * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
+ * [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
*/
presence_penalty?: number | null;
@@ -327,7 +341,7 @@ export interface CompletionCreateParamsBase {
/**
* A unique identifier representing your end-user, which can help OpenAI to monitor
* and detect abuse.
- * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
*/
user?: string;
}
diff --git a/src/resources/embeddings.ts b/src/resources/embeddings.ts
index e2b35f530..4b1644a68 100644
--- a/src/resources/embeddings.ts
+++ b/src/resources/embeddings.ts
@@ -94,8 +94,8 @@ export interface EmbeddingCreateParams {
* ID of the model to use. You can use the
* [List models](https://platform.openai.com/docs/api-reference/models/list) API to
* see all of your available models, or see our
- * [Model overview](https://platform.openai.com/docs/models/overview) for
- * descriptions of them.
+ * [Model overview](https://platform.openai.com/docs/models) for descriptions of
+ * them.
*/
model: (string & {}) | EmbeddingModel;
@@ -114,7 +114,7 @@ export interface EmbeddingCreateParams {
/**
* A unique identifier representing your end-user, which can help OpenAI to monitor
* and detect abuse.
- * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
*/
user?: string;
}
diff --git a/src/resources/files.ts b/src/resources/files.ts
index dec815a28..48d8f8747 100644
--- a/src/resources/files.ts
+++ b/src/resources/files.ts
@@ -5,7 +5,7 @@ import { isRequestOptions } from '../core';
import { sleep } from '../core';
import { APIConnectionTimeoutError } from '../error';
import * as Core from '../core';
-import { Page } from '../pagination';
+import { CursorPage, type CursorPageParams } from '../pagination';
import { type Response } from '../_shims/index';
export class Files extends APIResource {
@@ -44,7 +44,7 @@ export class Files extends APIResource {
}
/**
- * Returns a list of files that belong to the user's organization.
+ * Returns a list of files.
*/
list(query?: FileListParams, options?: Core.RequestOptions): Core.PagePromise;
list(options?: Core.RequestOptions): Core.PagePromise;
@@ -111,10 +111,7 @@ export class Files extends APIResource {
}
}
-/**
- * Note: no pagination actually occurs yet, this is for forwards-compatibility.
- */
-export class FileObjectsPage extends Page {}
+export class FileObjectsPage extends CursorPage {}
export type FileContent = string;
@@ -213,7 +210,13 @@ export interface FileCreateParams {
purpose: FilePurpose;
}
-export interface FileListParams {
+export interface FileListParams extends CursorPageParams {
+ /**
+ * Sort order by the `created_at` timestamp of the objects. `asc` for ascending
+ * order and `desc` for descending order.
+ */
+ order?: 'asc' | 'desc';
+
/**
* Only return files with the given purpose.
*/
diff --git a/src/resources/fine-tuning/jobs/jobs.ts b/src/resources/fine-tuning/jobs/jobs.ts
index 275fad869..0c320e028 100644
--- a/src/resources/fine-tuning/jobs/jobs.ts
+++ b/src/resources/fine-tuning/jobs/jobs.ts
@@ -304,7 +304,7 @@ export interface FineTuningJobWandbIntegrationObject {
export interface JobCreateParams {
/**
* The name of the model to fine-tune. You can select one of the
- * [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned).
+ * [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
*/
model: (string & {}) | 'babbage-002' | 'davinci-002' | 'gpt-3.5-turbo' | 'gpt-4o-mini';
diff --git a/src/resources/images.ts b/src/resources/images.ts
index f4d59b941..8e1c6d92e 100644
--- a/src/resources/images.ts
+++ b/src/resources/images.ts
@@ -94,7 +94,7 @@ export interface ImageCreateVariationParams {
/**
* A unique identifier representing your end-user, which can help OpenAI to monitor
* and detect abuse.
- * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
*/
user?: string;
}
@@ -146,7 +146,7 @@ export interface ImageEditParams {
/**
* A unique identifier representing your end-user, which can help OpenAI to monitor
* and detect abuse.
- * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
*/
user?: string;
}
@@ -201,7 +201,7 @@ export interface ImageGenerateParams {
/**
* A unique identifier representing your end-user, which can help OpenAI to monitor
* and detect abuse.
- * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
+ * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
*/
user?: string;
}
diff --git a/src/resources/moderations.ts b/src/resources/moderations.ts
index cdde12a62..f7b16166d 100644
--- a/src/resources/moderations.ts
+++ b/src/resources/moderations.ts
@@ -351,7 +351,7 @@ export interface ModerationCreateParams {
* The content moderation model you would like to use. Learn more in
* [the moderation guide](https://platform.openai.com/docs/guides/moderation), and
* learn about available models
- * [here](https://platform.openai.com/docs/models/moderation).
+ * [here](https://platform.openai.com/docs/models#moderation).
*/
model?: (string & {}) | ModerationModel;
}
diff --git a/src/resources/uploads/uploads.ts b/src/resources/uploads/uploads.ts
index 78fa3a7b5..8491d0fe2 100644
--- a/src/resources/uploads/uploads.ts
+++ b/src/resources/uploads/uploads.ts
@@ -25,7 +25,7 @@ export class Uploads extends APIResource {
* For certain `purpose`s, the correct `mime_type` must be specified. Please refer
* to documentation for the supported MIME types for your use case:
*
- * - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search/supported-files)
+ * - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search#supported-files)
*
* For guidance on the proper filename extensions for each purpose, please follow
* the documentation on
diff --git a/src/version.ts b/src/version.ts
index 04f8abf02..273878132 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '4.70.3'; // x-release-please-version
+export const VERSION = '4.71.0'; // x-release-please-version
diff --git a/tests/api-resources/chat/completions.test.ts b/tests/api-resources/chat/completions.test.ts
index 77d4a251c..180a1d77f 100644
--- a/tests/api-resources/chat/completions.test.ts
+++ b/tests/api-resources/chat/completions.test.ts
@@ -39,6 +39,7 @@ describe('resource completions', () => {
modalities: ['text', 'audio'],
n: 1,
parallel_tool_calls: true,
+ prediction: { content: 'string', type: 'content' },
presence_penalty: -2,
response_format: { type: 'text' },
seed: -9007199254740991,
diff --git a/tests/api-resources/files.test.ts b/tests/api-resources/files.test.ts
index bbaa45a65..c907c4987 100644
--- a/tests/api-resources/files.test.ts
+++ b/tests/api-resources/files.test.ts
@@ -69,7 +69,10 @@ describe('resource files', () => {
test('list: request options and params are passed correctly', async () => {
// ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
await expect(
- client.files.list({ purpose: 'purpose' }, { path: '/_stainless_unknown_path' }),
+ client.files.list(
+ { after: 'after', limit: 0, order: 'asc', purpose: 'purpose' },
+ { path: '/_stainless_unknown_path' },
+ ),
).rejects.toThrow(OpenAI.NotFoundError);
});