diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index fe24c0dcb..627f5954f 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -3,6 +3,7 @@ on:
push:
branches:
- master
+ - update-specs
pull_request:
branches:
- master
@@ -12,7 +13,7 @@ jobs:
lint:
name: lint
runs-on: ubuntu-latest
- if: github.repository == 'openai/openai-node'
+
steps:
- uses: actions/checkout@v4
@@ -31,7 +32,7 @@ jobs:
build:
name: build
runs-on: ubuntu-latest
- if: github.repository == 'openai/openai-node'
+
steps:
- uses: actions/checkout@v4
@@ -49,7 +50,6 @@ jobs:
test:
name: test
runs-on: ubuntu-latest
- if: github.repository == 'openai/openai-node'
steps:
- uses: actions/checkout@v4
@@ -88,7 +88,6 @@ jobs:
ecosystem_tests:
name: ecosystem tests (v${{ matrix.node-version }})
runs-on: ubuntu-latest
- if: github.repository == 'openai/openai-node'
timeout-minutes: 20
strategy:
fail-fast: false
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 0c7a85094..a3649b199 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "4.87.3"
+ ".": "4.87.4"
}
diff --git a/.stats.yml b/.stats.yml
index 455874212..b03256223 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
configured_endpoints: 81
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-be834d63e326a82494e819085137f5eb15866f3fc787db1f3afe7168d419e18a.yml
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f3bce04386c4fcfd5037e0477fbaa39010003fd1558eb5185fe4a71dd6a05fdd.yml
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 46a595495..d820d8fcd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,24 @@
# Changelog
+## 4.87.4 (2025-03-18)
+
+Full Changelog: [v4.87.3...v4.87.4](https://github.com/openai/openai-node/compare/v4.87.3...v4.87.4)
+
+### Bug Fixes
+
+* **api:** correct some Responses types ([#1391](https://github.com/openai/openai-node/issues/1391)) ([af45876](https://github.com/openai/openai-node/commit/af458766ac721fb6cf18e7d78c458506c8bfc4e1))
+* **types:** ignore missing `id` in responses pagination ([1b9d20e](https://github.com/openai/openai-node/commit/1b9d20e71f5afbd4999f1999fe4810175476c5d2))
+* **types:** improve responses type names ([#1392](https://github.com/openai/openai-node/issues/1392)) ([164f476](https://github.com/openai/openai-node/commit/164f47606b41fd3e2850f8209eb1c6e2996a81ff))
+
+
+### Chores
+
+* add missing type alias exports ([#1390](https://github.com/openai/openai-node/issues/1390)) ([16c5e22](https://github.com/openai/openai-node/commit/16c5e2261c8c1a0ba96c2d5f475e8b1bc67387d7))
+* **internal:** add back release workflow ([dddf29b](https://github.com/openai/openai-node/commit/dddf29bd914a02d4586b239ec06217389a4409f9))
+* **internal:** remove CI condition ([#1381](https://github.com/openai/openai-node/issues/1381)) ([ef17981](https://github.com/openai/openai-node/commit/ef17981a0bd6b3e971986ece829c5d260d7392d4))
+* **internal:** run CI on update-specs branch ([9fc2130](https://github.com/openai/openai-node/commit/9fc2130b74a5919a3bbd41926903bdb310de4446))
+* **internal:** update release workflows ([90b77d0](https://github.com/openai/openai-node/commit/90b77d09c04d21487aa38fe775c79ae632136813))
+
## 4.87.3 (2025-03-11)
Full Changelog: [v4.87.2...v4.87.3](https://github.com/openai/openai-node/compare/v4.87.2...v4.87.3)
diff --git a/api.md b/api.md
index 2fac07f38..fd8482bf2 100644
--- a/api.md
+++ b/api.md
@@ -548,6 +548,8 @@ Types:
- ResponseCodeInterpreterToolCall
- ResponseCompletedEvent
- ResponseComputerToolCall
+- ResponseComputerToolCallOutputItem
+- ResponseComputerToolCallOutputScreenshot
- ResponseContent
- ResponseContentPartAddedEvent
- ResponseContentPartDoneEvent
@@ -564,6 +566,8 @@ Types:
- ResponseFunctionCallArgumentsDeltaEvent
- ResponseFunctionCallArgumentsDoneEvent
- ResponseFunctionToolCall
+- ResponseFunctionToolCallItem
+- ResponseFunctionToolCallOutputItem
- ResponseFunctionWebSearch
- ResponseInProgressEvent
- ResponseIncludable
@@ -575,7 +579,9 @@ Types:
- ResponseInputImage
- ResponseInputItem
- ResponseInputMessageContentList
+- ResponseInputMessageItem
- ResponseInputText
+- ResponseItem
- ResponseOutputAudio
- ResponseOutputItem
- ResponseOutputItemAddedEvent
@@ -616,4 +622,4 @@ Types:
Methods:
-- client.responses.inputItems.list(responseId, { ...params }) -> ResponseItemListDataPage
+- client.responses.inputItems.list(responseId, { ...params }) -> ResponseItemsPage
diff --git a/bin/check-release-environment b/bin/check-release-environment
index dbfd546bf..e51564b7d 100644
--- a/bin/check-release-environment
+++ b/bin/check-release-environment
@@ -2,10 +2,6 @@
errors=()
-if [ -z "${STAINLESS_API_KEY}" ]; then
- errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.")
-fi
-
if [ -z "${NPM_TOKEN}" ]; then
errors+=("The OPENAI_NPM_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets")
fi
diff --git a/examples/yarn.lock b/examples/yarn.lock
deleted file mode 100644
index e69de29bb..000000000
diff --git a/jsr.json b/jsr.json
index 1051fade0..3e7c40d5f 100644
--- a/jsr.json
+++ b/jsr.json
@@ -1,6 +1,6 @@
{
"name": "@openai/openai",
- "version": "4.87.3",
+ "version": "4.87.4",
"exports": {
".": "./index.ts",
"./helpers/zod": "./helpers/zod.ts",
diff --git a/package.json b/package.json
index 9967a814d..baddade77 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "4.87.3",
+ "version": "4.87.4",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
diff --git a/src/index.ts b/src/index.ts
index c3abed2db..34cc3e84d 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -118,6 +118,7 @@ import {
ChatCompletionModality,
ChatCompletionNamedToolChoice,
ChatCompletionPredictionContent,
+ ChatCompletionReasoningEffort,
ChatCompletionRole,
ChatCompletionStoreMessage,
ChatCompletionStreamOptions,
@@ -129,6 +130,7 @@ import {
ChatCompletionUpdateParams,
ChatCompletionUserMessageParam,
ChatCompletionsPage,
+ CreateChatCompletionRequestMessage,
} from './resources/chat/completions/completions';
export interface ClientOptions {
@@ -404,6 +406,8 @@ export declare namespace OpenAI {
type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption,
type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam,
type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam,
+ type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage,
+ type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort,
ChatCompletionsPage as ChatCompletionsPage,
type ChatCompletionCreateParams as ChatCompletionCreateParams,
type ChatCompletionCreateParamsNonStreaming as ChatCompletionCreateParamsNonStreaming,
diff --git a/src/resources/batches.ts b/src/resources/batches.ts
index aadda83a6..2cf2ac566 100644
--- a/src/resources/batches.ts
+++ b/src/resources/batches.ts
@@ -220,11 +220,11 @@ export interface BatchCreateParams {
/**
* The endpoint to be used for all requests in the batch. Currently
- * `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions` are supported.
- * Note that `/v1/embeddings` batches are also restricted to a maximum of 50,000
- * embedding inputs across all requests in the batch.
+ * `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
+ * are supported. Note that `/v1/embeddings` batches are also restricted to a
+ * maximum of 50,000 embedding inputs across all requests in the batch.
*/
- endpoint: '/v1/chat/completions' | '/v1/embeddings' | '/v1/completions';
+ endpoint: '/v1/responses' | '/v1/chat/completions' | '/v1/embeddings' | '/v1/completions';
/**
* The ID of an uploaded file that contains requests for the new batch.
diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts
index 7b1c353e2..f54c01597 100644
--- a/src/resources/chat/completions/completions.ts
+++ b/src/resources/chat/completions/completions.ts
@@ -377,10 +377,13 @@ export interface ChatCompletionChunk {
/**
* An optional field that will only be present when you set
* `stream_options: {"include_usage": true}` in your request. When present, it
- * contains a null value except for the last chunk which contains the token usage
- * statistics for the entire request.
+ * contains a null value **except for the last chunk** which contains the token
+ * usage statistics for the entire request.
+ *
+ * **NOTE:** If the stream is interrupted or cancelled, you may not receive the
+ * final usage chunk which contains the total token usage for the request.
*/
- usage?: CompletionsAPI.CompletionUsage | null;
+ usage?: CompletionsAPI.CompletionUsage;
}
export namespace ChatCompletionChunk {
@@ -551,7 +554,7 @@ export namespace ChatCompletionContentPart {
/**
* The name of the file, used when passing the file to the model as a string.
*/
- file_name?: string;
+ filename?: string;
}
}
}
@@ -930,8 +933,11 @@ export interface ChatCompletionStreamOptions {
/**
* If set, an additional chunk will be streamed before the `data: [DONE]` message.
* The `usage` field on this chunk shows the token usage statistics for the entire
- * request, and the `choices` field will always be an empty array. All other chunks
- * will also include a `usage` field, but with a null value.
+ * request, and the `choices` field will always be an empty array.
+ *
+ * All other chunks will also include a `usage` field, but with a null value.
+ * **NOTE:** If the stream is interrupted, you may not receive the final usage
+ * chunk which contains the total token usage for the request.
*/
include_usage?: boolean;
}
diff --git a/src/resources/responses/index.ts b/src/resources/responses/index.ts
index 84f761a93..ad3f9a386 100644
--- a/src/resources/responses/index.ts
+++ b/src/resources/responses/index.ts
@@ -1,9 +1,4 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-export {
- ResponseItemListDataPage,
- InputItems,
- type ResponseItemList,
- type InputItemListParams,
-} from './input-items';
+export { InputItems, type ResponseItemList, type InputItemListParams } from './input-items';
export { Responses } from './responses';
diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts
index 9704be89a..f2292e5c6 100644
--- a/src/resources/responses/input-items.ts
+++ b/src/resources/responses/input-items.ts
@@ -4,7 +4,8 @@ import { APIResource } from '../../resource';
import { isRequestOptions } from '../../core';
import * as Core from '../../core';
import * as ResponsesAPI from './responses';
-import { CursorPage, type CursorPageParams } from '../../pagination';
+import { ResponseItemsPage } from './responses';
+import { type CursorPageParams } from '../../pagination';
export class InputItems extends APIResource {
/**
@@ -14,67 +15,26 @@ export class InputItems extends APIResource {
responseId: string,
query?: InputItemListParams,
options?: Core.RequestOptions,
- ): Core.PagePromise<
- ResponseItemListDataPage,
- | ResponseItemList.Message
- | ResponsesAPI.ResponseOutputMessage
- | ResponsesAPI.ResponseFileSearchToolCall
- | ResponsesAPI.ResponseComputerToolCall
- | ResponseItemList.ComputerCallOutput
- | ResponsesAPI.ResponseFunctionWebSearch
- | ResponsesAPI.ResponseFunctionToolCall
- | ResponseItemList.FunctionCallOutput
- >;
+ ): Core.PagePromise;
list(
responseId: string,
options?: Core.RequestOptions,
- ): Core.PagePromise<
- ResponseItemListDataPage,
- | ResponseItemList.Message
- | ResponsesAPI.ResponseOutputMessage
- | ResponsesAPI.ResponseFileSearchToolCall
- | ResponsesAPI.ResponseComputerToolCall
- | ResponseItemList.ComputerCallOutput
- | ResponsesAPI.ResponseFunctionWebSearch
- | ResponsesAPI.ResponseFunctionToolCall
- | ResponseItemList.FunctionCallOutput
- >;
+ ): Core.PagePromise;
list(
responseId: string,
query: InputItemListParams | Core.RequestOptions = {},
options?: Core.RequestOptions,
- ): Core.PagePromise<
- ResponseItemListDataPage,
- | ResponseItemList.Message
- | ResponsesAPI.ResponseOutputMessage
- | ResponsesAPI.ResponseFileSearchToolCall
- | ResponsesAPI.ResponseComputerToolCall
- | ResponseItemList.ComputerCallOutput
- | ResponsesAPI.ResponseFunctionWebSearch
- | ResponsesAPI.ResponseFunctionToolCall
- | ResponseItemList.FunctionCallOutput
- > {
+ ): Core.PagePromise {
if (isRequestOptions(query)) {
return this.list(responseId, {}, query);
}
- return this._client.getAPIList(`/responses/${responseId}/input_items`, ResponseItemListDataPage, {
+ return this._client.getAPIList(`/responses/${responseId}/input_items`, ResponseItemsPage, {
query,
...options,
});
}
}
-export class ResponseItemListDataPage extends CursorPage<
- | ResponseItemList.Message
- | ResponsesAPI.ResponseOutputMessage
- | ResponsesAPI.ResponseFileSearchToolCall
- | ResponsesAPI.ResponseComputerToolCall
- | ResponseItemList.ComputerCallOutput
- | ResponsesAPI.ResponseFunctionWebSearch
- | ResponsesAPI.ResponseFunctionToolCall
- | ResponseItemList.FunctionCallOutput
-> {}
-
/**
* A list of Response items.
*/
@@ -82,16 +42,7 @@ export interface ResponseItemList {
/**
* A list of items used to generate this response.
*/
- data: Array<
- | ResponseItemList.Message
- | ResponsesAPI.ResponseOutputMessage
- | ResponsesAPI.ResponseFileSearchToolCall
- | ResponsesAPI.ResponseComputerToolCall
- | ResponseItemList.ComputerCallOutput
- | ResponsesAPI.ResponseFunctionWebSearch
- | ResponsesAPI.ResponseFunctionToolCall
- | ResponseItemList.FunctionCallOutput
- >;
+ data: Array;
/**
* The ID of the first item in the list.
@@ -114,142 +65,6 @@ export interface ResponseItemList {
object: 'list';
}
-export namespace ResponseItemList {
- export interface Message {
- /**
- * The unique ID of the message input.
- */
- id: string;
-
- /**
- * A list of one or many input items to the model, containing different content
- * types.
- */
- content: ResponsesAPI.ResponseInputMessageContentList;
-
- /**
- * The role of the message input. One of `user`, `system`, or `developer`.
- */
- role: 'user' | 'system' | 'developer';
-
- /**
- * The status of item. One of `in_progress`, `completed`, or `incomplete`.
- * Populated when items are returned via API.
- */
- status?: 'in_progress' | 'completed' | 'incomplete';
-
- /**
- * The type of the message input. Always set to `message`.
- */
- type?: 'message';
- }
-
- export interface ComputerCallOutput {
- /**
- * The unique ID of the computer call tool output.
- */
- id: string;
-
- /**
- * The ID of the computer tool call that produced the output.
- */
- call_id: string;
-
- /**
- * A computer screenshot image used with the computer use tool.
- */
- output: ComputerCallOutput.Output;
-
- /**
- * The type of the computer tool call output. Always `computer_call_output`.
- */
- type: 'computer_call_output';
-
- /**
- * The safety checks reported by the API that have been acknowledged by the
- * developer.
- */
- acknowledged_safety_checks?: Array;
-
- /**
- * The status of the message input. One of `in_progress`, `completed`, or
- * `incomplete`. Populated when input items are returned via API.
- */
- status?: 'in_progress' | 'completed' | 'incomplete';
- }
-
- export namespace ComputerCallOutput {
- /**
- * A computer screenshot image used with the computer use tool.
- */
- export interface Output {
- /**
- * Specifies the event type. For a computer screenshot, this property is always set
- * to `computer_screenshot`.
- */
- type: 'computer_screenshot';
-
- /**
- * The identifier of an uploaded file that contains the screenshot.
- */
- file_id?: string;
-
- /**
- * The URL of the screenshot image.
- */
- image_url?: string;
- }
-
- /**
- * A pending safety check for the computer call.
- */
- export interface AcknowledgedSafetyCheck {
- /**
- * The ID of the pending safety check.
- */
- id: string;
-
- /**
- * The type of the pending safety check.
- */
- code: string;
-
- /**
- * Details about the pending safety check.
- */
- message: string;
- }
- }
-
- export interface FunctionCallOutput {
- /**
- * The unique ID of the function call tool output.
- */
- id: string;
-
- /**
- * The unique ID of the function tool call generated by the model.
- */
- call_id: string;
-
- /**
- * A JSON string of the output of the function tool call.
- */
- output: string;
-
- /**
- * The type of the function tool call output. Always `function_call_output`.
- */
- type: 'function_call_output';
-
- /**
- * The status of the item. One of `in_progress`, `completed`, or `incomplete`.
- * Populated when items are returned via API.
- */
- status?: 'in_progress' | 'completed' | 'incomplete';
- }
-}
-
export interface InputItemListParams extends CursorPageParams {
/**
* An item ID to list items before, used in pagination.
@@ -265,12 +80,8 @@ export interface InputItemListParams extends CursorPageParams {
order?: 'asc' | 'desc';
}
-InputItems.ResponseItemListDataPage = ResponseItemListDataPage;
-
export declare namespace InputItems {
- export {
- type ResponseItemList as ResponseItemList,
- ResponseItemListDataPage as ResponseItemListDataPage,
- type InputItemListParams as InputItemListParams,
- };
+ export { type ResponseItemList as ResponseItemList, type InputItemListParams as InputItemListParams };
}
+
+export { ResponseItemsPage };
diff --git a/src/resources/responses/input-items.ts.orig b/src/resources/responses/input-items.ts.orig
new file mode 100644
index 000000000..470740b61
--- /dev/null
+++ b/src/resources/responses/input-items.ts.orig
@@ -0,0 +1,114 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import { isRequestOptions } from '../../core';
+import * as Core from '../../core';
+import * as ResponsesAPI from './responses';
+import { ResponseItemsPage } from './responses';
+import { type CursorPageParams } from '../../pagination';
+
+export class InputItems extends APIResource {
+ /**
+ * Returns a list of input items for a given response.
+ */
+ list(
+ responseId: string,
+ query?: InputItemListParams,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(
+ responseId: string,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(
+ responseId: string,
+ query: InputItemListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.list(responseId, {}, query);
+ }
+ return this._client.getAPIList(`/responses/${responseId}/input_items`, ResponseItemsPage, {
+ query,
+ ...options,
+ });
+ }
+}
+
+<<<<<<< HEAD
+export class ResponseItemListDataPage extends CursorPage<
+ // @ts-ignore some items don't necessarily have the `id` property
+ | ResponseItemList.Message
+ | ResponsesAPI.ResponseOutputMessage
+ | ResponsesAPI.ResponseFileSearchToolCall
+ | ResponsesAPI.ResponseComputerToolCall
+ | ResponseItemList.ComputerCallOutput
+ | ResponsesAPI.ResponseFunctionWebSearch
+ | ResponsesAPI.ResponseFunctionToolCall
+ | ResponseItemList.FunctionCallOutput
+> {}
+
+||||||| parent of e5ea4a71 (fix(types): improve responses type names (#1392))
+export class ResponseItemListDataPage extends CursorPage<
+ | ResponseItemList.Message
+ | ResponsesAPI.ResponseOutputMessage
+ | ResponsesAPI.ResponseFileSearchToolCall
+ | ResponsesAPI.ResponseComputerToolCall
+ | ResponseItemList.ComputerCallOutput
+ | ResponsesAPI.ResponseFunctionWebSearch
+ | ResponsesAPI.ResponseFunctionToolCall
+ | ResponseItemList.FunctionCallOutput
+> {}
+
+=======
+>>>>>>> e5ea4a71 (fix(types): improve responses type names (#1392))
+/**
+ * A list of Response items.
+ */
+export interface ResponseItemList {
+ /**
+ * A list of items used to generate this response.
+ */
+ data: Array;
+
+ /**
+ * The ID of the first item in the list.
+ */
+ first_id: string;
+
+ /**
+ * Whether there are more items available.
+ */
+ has_more: boolean;
+
+ /**
+ * The ID of the last item in the list.
+ */
+ last_id: string;
+
+ /**
+ * The type of object returned, must be `list`.
+ */
+ object: 'list';
+}
+
+export interface InputItemListParams extends CursorPageParams {
+ /**
+ * An item ID to list items before, used in pagination.
+ */
+ before?: string;
+
+ /**
+ * The order to return the input items in. Default is `asc`.
+ *
+ * - `asc`: Return the input items in ascending order.
+ * - `desc`: Return the input items in descending order.
+ */
+ order?: 'asc' | 'desc';
+}
+
+export declare namespace InputItems {
+ export { type ResponseItemList as ResponseItemList, type InputItemListParams as InputItemListParams };
+}
+
+export { ResponseItemsPage };
diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts
index 72adf0696..b2cd6b56c 100644
--- a/src/resources/responses/responses.ts
+++ b/src/resources/responses/responses.ts
@@ -9,12 +9,13 @@ import {
import * as Core from '../../core';
import { APIPromise, isRequestOptions } from '../../core';
import { APIResource } from '../../resource';
-import { Stream } from '../../streaming';
import * as Shared from '../shared';
import * as InputItemsAPI from './input-items';
-import { InputItemListParams, InputItems, ResponseItemList, ResponseItemListDataPage } from './input-items';
+import { InputItemListParams, InputItems, ResponseItemList } from './input-items';
import * as ResponsesAPI from './responses';
import { ResponseStream, ResponseStreamParams } from '../../lib/responses/ResponseStream';
+import { CursorPage } from '../../pagination';
+import { Stream } from '../../streaming';
export interface ParsedResponseOutputText extends ResponseOutputText {
parsed: ParsedT | null;
@@ -137,6 +138,8 @@ export class Responses extends APIResource {
}
}
+export class ResponseItemsPage extends CursorPage {}
+
/**
* A tool that controls a virtual computer. Learn more about the
* [computer tool](https://platform.openai.com/docs/guides/tools-computer-use).
@@ -966,6 +969,83 @@ export namespace ResponseComputerToolCall {
}
}
+export interface ResponseComputerToolCallOutputItem {
+ /**
+ * The unique ID of the computer call tool output.
+ */
+ id: string;
+
+ /**
+ * The ID of the computer tool call that produced the output.
+ */
+ call_id: string;
+
+ /**
+ * A computer screenshot image used with the computer use tool.
+ */
+ output: ResponseComputerToolCallOutputScreenshot;
+
+ /**
+ * The type of the computer tool call output. Always `computer_call_output`.
+ */
+ type: 'computer_call_output';
+
+ /**
+ * The safety checks reported by the API that have been acknowledged by the
+ * developer.
+ */
+ acknowledged_safety_checks?: Array;
+
+ /**
+ * The status of the message input. One of `in_progress`, `completed`, or
+ * `incomplete`. Populated when input items are returned via API.
+ */
+ status?: 'in_progress' | 'completed' | 'incomplete';
+}
+
+export namespace ResponseComputerToolCallOutputItem {
+ /**
+ * A pending safety check for the computer call.
+ */
+ export interface AcknowledgedSafetyCheck {
+ /**
+ * The ID of the pending safety check.
+ */
+ id: string;
+
+ /**
+ * The type of the pending safety check.
+ */
+ code: string;
+
+ /**
+ * Details about the pending safety check.
+ */
+ message: string;
+ }
+}
+
+/**
+ * A computer screenshot image used with the computer use tool.
+ */
+export interface ResponseComputerToolCallOutputScreenshot {
+ /**
+ * Specifies the event type. For a computer screenshot, this property is always set
+ * to `computer_screenshot`.
+ */
+ type: 'computer_screenshot';
+
+ /**
+ * The identifier of an uploaded file that contains the screenshot.
+ */
+ file_id?: string;
+
+ /**
+ * The URL of the screenshot image.
+ */
+ image_url?: string;
+}
+
/**
* Multi-modal input and output contents.
*/
@@ -1362,11 +1442,6 @@ export interface ResponseFunctionCallArgumentsDoneEvent {
* for more information.
*/
export interface ResponseFunctionToolCall {
- /**
- * The unique ID of the function tool call.
- */
- id: string;
-
/**
* A JSON string of the arguments to pass to the function.
*/
@@ -1387,6 +1462,51 @@ export interface ResponseFunctionToolCall {
*/
type: 'function_call';
+ /**
+ * The unique ID of the function tool call.
+ */
+ id?: string;
+
+ /**
+ * The status of the item. One of `in_progress`, `completed`, or `incomplete`.
+ * Populated when items are returned via API.
+ */
+ status?: 'in_progress' | 'completed' | 'incomplete';
+}
+
+/**
+ * A tool call to run a function. See the
+ * [function calling guide](https://platform.openai.com/docs/guides/function-calling)
+ * for more information.
+ */
+export interface ResponseFunctionToolCallItem extends ResponseFunctionToolCall {
+ /**
+ * The unique ID of the function call tool output.
+ */
+ id: string;
+}
+
+export interface ResponseFunctionToolCallOutputItem {
+ /**
+ * The unique ID of the function call tool output.
+ */
+ id: string;
+
+ /**
+ * The unique ID of the function tool call generated by the model.
+ */
+ call_id: string;
+
+ /**
+ * A JSON string of the output of the function tool call.
+ */
+ output: string;
+
+ /**
+ * The type of the function tool call output. Always `function_call_output`.
+ */
+ type: 'function_call_output';
+
/**
* The status of the item. One of `in_progress`, `completed`, or `incomplete`.
* Populated when items are returned via API.
@@ -1607,7 +1727,7 @@ export namespace ResponseInputItem {
/**
* A computer screenshot image used with the computer use tool.
*/
- output: ComputerCallOutput.Output;
+ output: ResponsesAPI.ResponseComputerToolCallOutputScreenshot;
/**
* The type of the computer tool call output. Always `computer_call_output`.
@@ -1633,27 +1753,6 @@ export namespace ResponseInputItem {
}
export namespace ComputerCallOutput {
- /**
- * A computer screenshot image used with the computer use tool.
- */
- export interface Output {
- /**
- * Specifies the event type. For a computer screenshot, this property is always set
- * to `computer_screenshot`.
- */
- type: 'computer_screenshot';
-
- /**
- * The identifier of an uploaded file that contains the screenshot.
- */
- file_id?: string;
-
- /**
- * The URL of the screenshot image.
- */
- image_url?: string;
- }
-
/**
* A pending safety check for the computer call.
*/
@@ -1729,6 +1828,35 @@ export namespace ResponseInputItem {
*/
export type ResponseInputMessageContentList = Array;
+export interface ResponseInputMessageItem {
+ /**
+ * The unique ID of the message input.
+ */
+ id: string;
+
+ /**
+ * A list of one or many input items to the model, containing different content
+ * types.
+ */
+ content: ResponseInputMessageContentList;
+
+ /**
+ * The role of the message input. One of `user`, `system`, or `developer`.
+ */
+ role: 'user' | 'system' | 'developer';
+
+ /**
+ * The status of item. One of `in_progress`, `completed`, or `incomplete`.
+ * Populated when items are returned via API.
+ */
+ status?: 'in_progress' | 'completed' | 'incomplete';
+
+ /**
+ * The type of the message input. Always set to `message`.
+ */
+ type?: 'message';
+}
+
/**
* A text input to the model.
*/
@@ -1744,6 +1872,19 @@ export interface ResponseInputText {
type: 'input_text';
}
+/**
+ * Content item used to generate a response.
+ */
+export type ResponseItem =
+ | ResponseInputMessageItem
+ | ResponseOutputMessage
+ | ResponseFileSearchToolCall
+ | ResponseComputerToolCall
+ | ResponseComputerToolCallOutputItem
+ | ResponseFunctionWebSearch
+ | ResponseFunctionToolCallItem
+ | ResponseFunctionToolCallOutputItem;
+
/**
* An audio output from the model.
*/
@@ -2305,6 +2446,11 @@ export interface ResponseUsage {
*/
input_tokens: number;
+ /**
+ * A detailed breakdown of the input tokens.
+ */
+ input_tokens_details: ResponseUsage.InputTokensDetails;
+
/**
* The number of output tokens.
*/
@@ -2322,6 +2468,17 @@ export interface ResponseUsage {
}
export namespace ResponseUsage {
+ /**
+ * A detailed breakdown of the input tokens.
+ */
+ export interface InputTokensDetails {
+ /**
+ * The number of tokens that were retrieved from the cache.
+ * [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
+ */
+ cached_tokens: number;
+ }
+
/**
* A detailed breakdown of the output tokens.
*/
@@ -2706,13 +2863,11 @@ export interface ResponseRetrieveParams {
}
Responses.InputItems = InputItems;
-Responses.ResponseItemListDataPage = ResponseItemListDataPage;
export declare namespace Responses {
export {
InputItems as InputItems,
type ResponseItemList as ResponseItemList,
- ResponseItemListDataPage as ResponseItemListDataPage,
type InputItemListParams as InputItemListParams,
};
}
diff --git a/src/resources/shared.ts b/src/resources/shared.ts
index 86b2d2dee..5fbdbba6a 100644
--- a/src/resources/shared.ts
+++ b/src/resources/shared.ts
@@ -171,10 +171,10 @@ export interface Reasoning {
* supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
* result in faster responses and fewer tokens used on reasoning in a response.
*/
- effort: ReasoningEffort | null;
+ effort?: ReasoningEffort | null;
/**
- * **o-series models only**
+ * **computer_use_preview only**
*
* A summary of the reasoning performed by the model. This can be useful for
* debugging and understanding the model's reasoning process. One of `concise` or
diff --git a/src/version.ts b/src/version.ts
index e84192528..172c899ea 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '4.87.3'; // x-release-please-version
+export const VERSION = '4.87.4'; // x-release-please-version
diff --git a/tests/api-resources/batches.test.ts b/tests/api-resources/batches.test.ts
index 96e200fb9..7c7397d06 100644
--- a/tests/api-resources/batches.test.ts
+++ b/tests/api-resources/batches.test.ts
@@ -12,7 +12,7 @@ describe('resource batches', () => {
test('create: only required params', async () => {
const responsePromise = client.batches.create({
completion_window: '24h',
- endpoint: '/v1/chat/completions',
+ endpoint: '/v1/responses',
input_file_id: 'input_file_id',
});
const rawResponse = await responsePromise.asResponse();
@@ -27,7 +27,7 @@ describe('resource batches', () => {
test('create: required and optional params', async () => {
const response = await client.batches.create({
completion_window: '24h',
- endpoint: '/v1/chat/completions',
+ endpoint: '/v1/responses',
input_file_id: 'input_file_id',
metadata: { foo: 'string' },
});