From 119b5843a18b8014167c8d2031d75c08dbf400a3 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 11 Mar 2025 11:29:02 -0400 Subject: [PATCH 1/2] feat(api): add /v1/responses and built-in tools [platform.openai.com/docs/changelog](http://platform.openai.com/docs/changelog) --- .stats.yml | 4 +- README.md | 151 +- api.md | 224 +- examples/responses/stream.ts | 24 + examples/responses/streaming-tools.ts | 52 + .../responses/structured-outputs-tools.ts | 60 + examples/responses/structured-outputs.ts | 32 + examples/yarn.lock | 0 scripts/bootstrap | 2 +- src/core.ts | 4 +- src/helpers/zod.ts | 46 + src/index.ts | 58 +- src/lib/ResponsesParser.ts | 262 ++ src/lib/parser.ts | 28 + src/lib/responses/EventTypes.ts | 76 + src/lib/responses/ResponseStream.ts | 298 ++ src/resources/beta/assistants.ts | 55 +- src/resources/beta/beta.ts | 37 - src/resources/beta/index.ts | 16 - src/resources/beta/threads/runs/runs.ts | 7 +- src/resources/beta/threads/threads.ts | 90 +- src/resources/chat/chat.ts | 46 +- src/resources/chat/completions/completions.ts | 290 +- src/resources/chat/completions/index.ts | 1 - src/resources/chat/completions/messages.ts | 2 +- src/resources/chat/index.ts | 3 +- src/resources/files.ts | 26 +- src/resources/index.ts | 20 + src/resources/responses/index.ts | 9 + src/resources/responses/input-items.ts | 276 ++ src/resources/responses/responses.ts | 2761 +++++++++++++++++ src/resources/shared.ts | 156 +- src/resources/uploads/uploads.ts | 7 +- .../{beta => }/vector-stores/file-batches.ts | 23 +- .../{beta => }/vector-stores/files.ts | 89 +- .../{beta => }/vector-stores/index.ts | 6 + .../{beta => }/vector-stores/vector-stores.ts | 130 +- src/streaming.ts | 2 +- .../chat/completions/completions.test.ts | 11 +- .../responses/input-items.test.ts | 40 + .../responses.test.ts} | 68 +- .../vector-stores/file-batches.test.ts | 21 +- .../api-resources/vector-stores/files.test.ts | 132 + .../vector-stores/vector-stores.test.ts | 39 +- 44 files changed, 5226 insertions(+), 458 deletions(-) create mode 100755 examples/responses/stream.ts create mode 100755 examples/responses/streaming-tools.ts create mode 100755 examples/responses/structured-outputs-tools.ts create mode 100755 examples/responses/structured-outputs.ts create mode 100644 examples/yarn.lock create mode 100644 src/lib/ResponsesParser.ts create mode 100644 src/lib/responses/EventTypes.ts create mode 100644 src/lib/responses/ResponseStream.ts create mode 100644 src/resources/responses/index.ts create mode 100644 src/resources/responses/input-items.ts create mode 100644 src/resources/responses/responses.ts rename src/resources/{beta => }/vector-stores/file-batches.ts (92%) rename src/resources/{beta => }/vector-stores/files.ts (74%) rename src/resources/{beta => }/vector-stores/index.ts (82%) rename src/resources/{beta => }/vector-stores/vector-stores.ts (77%) create mode 100644 tests/api-resources/responses/input-items.test.ts rename tests/api-resources/{beta/vector-stores/files.test.ts => responses/responses.test.ts} (58%) rename tests/api-resources/{beta => }/vector-stores/file-batches.test.ts (81%) create mode 100644 tests/api-resources/vector-stores/files.test.ts rename tests/api-resources/{beta => }/vector-stores/vector-stores.test.ts (71%) diff --git a/.stats.yml b/.stats.yml index 163146e38..455874212 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ -configured_endpoints: 74 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-5d30684c3118d049682ea30cdb4dbef39b97d51667da484689193dc40162af32.yml +configured_endpoints: 81 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-be834d63e326a82494e819085137f5eb15866f3fc787db1f3afe7168d419e18a.yml diff --git a/README.md b/README.md index 166e35e22..8515c81ed 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,3 @@ -> [!IMPORTANT] -> We're actively working on a new alpha version that migrates from `node-fetch` to builtin fetch. -> -> Please try it out and let us know if you run into any issues! -> https://community.openai.com/t/your-feedback-requested-node-js-sdk-5-0-0-alpha/1063774 - # OpenAI TypeScript and JavaScript API Library [![NPM version](https://img.shields.io/npm/v/openai.svg)](https://npmjs.org/package/openai) ![npm bundle size](https://img.shields.io/bundlephobia/minzip/openai) [![JSR Version](https://jsr.io/badges/@openai/openai)](https://jsr.io/@openai/openai) @@ -27,9 +21,7 @@ deno add jsr:@openai/openai npx jsr add @openai/openai ``` -These commands will make the module importable from the `@openai/openai` scope: - -You can also [import directly from JSR](https://jsr.io/docs/using-packages#importing-with-jsr-specifiers) without an install step if you're using the Deno JavaScript runtime: +These commands will make the module importable from the `@openai/openai` scope. You can also [import directly from JSR](https://jsr.io/docs/using-packages#importing-with-jsr-specifiers) without an install step if you're using the Deno JavaScript runtime: ```ts import OpenAI from 'jsr:@openai/openai'; @@ -37,9 +29,10 @@ import OpenAI from 'jsr:@openai/openai'; ## Usage -The full API of this library can be found in [api.md file](api.md) along with many [code examples](https://github.com/openai/openai-node/tree/master/examples). The code below shows how to get started using the chat completions API. +The full API of this library can be found in [api.md file](api.md) along with many [code examples](https://github.com/openai/openai-node/tree/master/examples). + +The primary API for interacting with OpenAI models is the [Responses API](https://platform.openai.com/docs/api-reference/responses). You can generate text from the model with the code below. - ```ts import OpenAI from 'openai'; @@ -47,100 +40,55 @@ const client = new OpenAI({ apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted }); -async function main() { - const chatCompletion = await client.chat.completions.create({ - messages: [{ role: 'user', content: 'Say this is a test' }], - model: 'gpt-4o', - }); -} +const response = await client.responses.create({ + model: 'gpt-4o', + instructions: 'You are a coding assistant that talks like a pirate', + input: 'Are semicolons optional in JavaScript?', +}); -main(); +console.log(response.output_text); ``` -## Streaming responses - -We provide support for streaming responses using Server Sent Events (SSE). +The previous standard (supported indefinitely) for generating text is the [Chat Completions API](https://platform.openai.com/docs/api-reference/chat). You can use that API to generate text from the model with the code below. ```ts import OpenAI from 'openai'; -const client = new OpenAI(); +const client = new OpenAI({ + apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted +}); -async function main() { - const stream = await client.chat.completions.create({ - model: 'gpt-4o', - messages: [{ role: 'user', content: 'Say this is a test' }], - stream: true, - }); - for await (const chunk of stream) { - process.stdout.write(chunk.choices[0]?.delta?.content || ''); - } -} +const completion = await client.chat.completions.create({ + model: 'gpt-4o', + messages: [ + { role: 'developer', content: 'Talk like a pirate.' }, + { role: 'user', content: 'Are semicolons optional in JavaScript?' }, + ], +}); -main(); +console.log(completion.choices[0].message.content); ``` -If you need to cancel a stream, you can `break` from the loop or call `stream.controller.abort()`. - -### Chat Completion streaming helpers +## Streaming responses -This library also provides several conveniences for streaming chat completions, for example: +We provide support for streaming responses using Server Sent Events (SSE). ```ts import OpenAI from 'openai'; -const openai = new OpenAI(); - -async function main() { - const stream = await openai.beta.chat.completions.stream({ - model: 'gpt-4o', - messages: [{ role: 'user', content: 'Say this is a test' }], - stream: true, - }); - - stream.on('content', (delta, snapshot) => { - process.stdout.write(delta); - }); - - // or, equivalently: - for await (const chunk of stream) { - process.stdout.write(chunk.choices[0]?.delta?.content || ''); - } - - const chatCompletion = await stream.finalChatCompletion(); - console.log(chatCompletion); // {id: "…", choices: […], …} -} - -main(); -``` - -See [helpers.md](helpers.md#chat-events) for more details. - -### Request & Response types - -This library includes TypeScript definitions for all request params and response fields. You may import and use them like so: - - -```ts -import OpenAI from 'openai'; +const client = new OpenAI(); -const client = new OpenAI({ - apiKey: process.env['OPENAI_API_KEY'], // This is the default and can be omitted +const stream = await client.responses.create({ + model: 'gpt-4o', + input: 'Say "Sheep sleep deep" ten times fast!', + stream: true, }); -async function main() { - const params: OpenAI.Chat.ChatCompletionCreateParams = { - messages: [{ role: 'user', content: 'Say this is a test' }], - model: 'gpt-4o', - }; - const chatCompletion: OpenAI.Chat.ChatCompletion = await client.chat.completions.create(params); +for await (const event of stream) { + console.log(event); } - -main(); ``` -Documentation for each method, request param, and response field are available in docstrings and will appear on hover in most modern editors. - ## File uploads Request parameters that correspond to file uploads can be passed in many different forms: @@ -265,17 +213,17 @@ Note that requests which time out will be [retried twice by default](#retries). All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI. ```ts -const completion = await client.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }); -console.log(completion._request_id) // req_123 +const response = await client.responses.create({ model: 'gpt-4o', input: 'testing 123' }); +console.log(response._request_id) // req_123 ``` You can also access the Request ID using the `.withResponse()` method: ```ts -const { data: stream, request_id } = await openai.chat.completions +const { data: stream, request_id } = await openai.responses .create({ - model: 'gpt-4', - messages: [{ role: 'user', content: 'Say this is a test' }], + model: 'gpt-4o', + input: 'Say this is a test', stream: true, }) .withResponse(); @@ -355,12 +303,6 @@ console.log(result.choices[0]!.message?.content); For more information on support for the Azure API, see [azure.md](azure.md). -## Automated function calls - -We provide the `openai.beta.chat.completions.runTools({…})` convenience helper for using function tool calls with the `/chat/completions` endpoint which automatically call the JavaScript functions you provide and sends their results back to the `/chat/completions` endpoint, looping as long as the model requests tool calls. - -For more information see [helpers.md](helpers.md#automated-function-calls). - ## Advanced Usage ### Accessing raw Response data (e.g., headers) @@ -373,17 +315,19 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi ```ts const client = new OpenAI(); -const response = await client.chat.completions - .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }) +const httpResponse = await client.responses + .create({ model: 'gpt-4o', input: 'say this is a test.' }) .asResponse(); -console.log(response.headers.get('X-My-Header')); -console.log(response.statusText); // access the underlying Response object -const { data: chatCompletion, response: raw } = await client.chat.completions - .create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'gpt-4o' }) +// access the underlying web standard Response object +console.log(httpResponse.headers.get('X-My-Header')); +console.log(httpResponse.statusText); + +const { data: modelResponse, response: raw } = await client.responses + .create({ model: 'gpt-4o', input: 'say this is a test.' }) .withResponse(); console.log(raw.headers.get('X-My-Header')); -console.log(chatCompletion); +console.log(modelResponse); ``` ### Making custom/undocumented requests @@ -432,6 +376,11 @@ validate or strip extra properties from the response from the API. ### Customizing the fetch client +> We're actively working on a new alpha version that migrates from `node-fetch` to builtin fetch. +> +> Please try it out and let us know if you run into any issues! +> https://community.openai.com/t/your-feedback-requested-node-js-sdk-5-0-0-alpha/1063774 + By default, this library uses `node-fetch` in Node, and expects a global `fetch` function in other environments. If you would prefer to use a global, web-standards-compliant `fetch` function even in a Node environment, diff --git a/api.md b/api.md index 63f239628..b21ac2d5f 100644 --- a/api.md +++ b/api.md @@ -2,10 +2,15 @@ Types: +- ChatModel +- ComparisonFilter +- CompoundFilter - ErrorObject - FunctionDefinition - FunctionParameters - Metadata +- Reasoning +- ReasoningEffort - ResponseFormatJSONObject - ResponseFormatJSONSchema - ResponseFormatText @@ -52,7 +57,6 @@ Types: - ChatCompletionModality - ChatCompletionNamedToolChoice - ChatCompletionPredictionContent -- ChatCompletionReasoningEffort - ChatCompletionRole - ChatCompletionStoreMessage - ChatCompletionStreamOptions @@ -63,6 +67,7 @@ Types: - ChatCompletionToolMessageParam - ChatCompletionUserMessageParam - CreateChatCompletionRequestMessage +- ChatCompletionReasoningEffort Methods: @@ -224,6 +229,67 @@ Methods: - client.fineTuning.jobs.checkpoints.list(fineTuningJobId, { ...params }) -> FineTuningJobCheckpointsPage +# VectorStores + +Types: + +- AutoFileChunkingStrategyParam +- FileChunkingStrategy +- FileChunkingStrategyParam +- OtherFileChunkingStrategyObject +- StaticFileChunkingStrategy +- StaticFileChunkingStrategyObject +- StaticFileChunkingStrategyObjectParam +- VectorStore +- VectorStoreDeleted +- VectorStoreSearchResponse + +Methods: + +- client.vectorStores.create({ ...params }) -> VectorStore +- client.vectorStores.retrieve(vectorStoreId) -> VectorStore +- client.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore +- client.vectorStores.list({ ...params }) -> VectorStoresPage +- client.vectorStores.del(vectorStoreId) -> VectorStoreDeleted +- client.vectorStores.search(vectorStoreId, { ...params }) -> VectorStoreSearchResponsesPage + +## Files + +Types: + +- VectorStoreFile +- VectorStoreFileDeleted +- FileContentResponse + +Methods: + +- client.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile +- client.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile +- client.vectorStores.files.update(vectorStoreId, fileId, { ...params }) -> VectorStoreFile +- client.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesPage +- client.vectorStores.files.del(vectorStoreId, fileId) -> VectorStoreFileDeleted +- client.vectorStores.files.content(vectorStoreId, fileId) -> FileContentResponsesPage +- client.beta.vectorStores.files.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFile> +- client.beta.vectorStores.files.poll(vectorStoreId, fileId, options?) -> Promise<VectorStoreFile> +- client.beta.vectorStores.files.upload(vectorStoreId, file, options?) -> Promise<VectorStoreFile> +- client.beta.vectorStores.files.uploadAndPoll(vectorStoreId, file, options?) -> Promise<VectorStoreFile> + +## FileBatches + +Types: + +- VectorStoreFileBatch + +Methods: + +- client.vectorStores.fileBatches.create(vectorStoreId, { ...params }) -> VectorStoreFileBatch +- client.vectorStores.fileBatches.retrieve(vectorStoreId, batchId) -> VectorStoreFileBatch +- client.vectorStores.fileBatches.cancel(vectorStoreId, batchId) -> VectorStoreFileBatch +- client.vectorStores.fileBatches.listFiles(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesPage +- client.beta.vectorStores.fileBatches.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFileBatch> +- client.beta.vectorStores.fileBatches.poll(vectorStoreId, batchId, options?) -> Promise<VectorStoreFileBatch> +- client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, { files, fileIds = [] }, options?) -> Promise<VectorStoreFileBatch> + # Beta ## Realtime @@ -287,72 +353,6 @@ Methods: - client.beta.realtime.sessions.create({ ...params }) -> SessionCreateResponse -## VectorStores - -Types: - -- AutoFileChunkingStrategyParam -- FileChunkingStrategy -- FileChunkingStrategyParam -- OtherFileChunkingStrategyObject -- StaticFileChunkingStrategy -- StaticFileChunkingStrategyObject -- StaticFileChunkingStrategyObjectParam -- VectorStore -- VectorStoreDeleted - -Methods: - -- client.beta.vectorStores.create({ ...params }) -> VectorStore -- client.beta.vectorStores.retrieve(vectorStoreId) -> VectorStore -- client.beta.vectorStores.update(vectorStoreId, { ...params }) -> VectorStore -- client.beta.vectorStores.list({ ...params }) -> VectorStoresPage -- client.beta.vectorStores.del(vectorStoreId) -> VectorStoreDeleted - -### Files - -Types: - -- VectorStoreFile -- VectorStoreFileDeleted - -Methods: - -- client.beta.vectorStores.files.create(vectorStoreId, { ...params }) -> VectorStoreFile -- client.beta.vectorStores.files.retrieve(vectorStoreId, fileId) -> VectorStoreFile -- client.beta.vectorStores.files.list(vectorStoreId, { ...params }) -> VectorStoreFilesPage -- client.beta.vectorStores.files.del(vectorStoreId, fileId) -> VectorStoreFileDeleted -- client.beta.vectorStores.files.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFile> -- client.beta.vectorStores.files.poll(vectorStoreId, fileId, options?) -> Promise<VectorStoreFile> -- client.beta.vectorStores.files.upload(vectorStoreId, file, options?) -> Promise<VectorStoreFile> -- client.beta.vectorStores.files.uploadAndPoll(vectorStoreId, file, options?) -> Promise<VectorStoreFile> - -### FileBatches - -Types: - -- VectorStoreFileBatch - -Methods: - -- client.beta.vectorStores.fileBatches.create(vectorStoreId, { ...params }) -> VectorStoreFileBatch -- client.beta.vectorStores.fileBatches.retrieve(vectorStoreId, batchId) -> VectorStoreFileBatch -- client.beta.vectorStores.fileBatches.cancel(vectorStoreId, batchId) -> VectorStoreFileBatch -- client.beta.vectorStores.fileBatches.listFiles(vectorStoreId, batchId, { ...params }) -> VectorStoreFilesPage -- client.beta.vectorStores.fileBatches.createAndPoll(vectorStoreId, body, options?) -> Promise<VectorStoreFileBatch> -- client.beta.vectorStores.fileBatches.poll(vectorStoreId, batchId, options?) -> Promise<VectorStoreFileBatch> -- client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, { files, fileIds = [] }, options?) -> Promise<VectorStoreFileBatch> - -## Chat - -### Completions - -Methods: - -- client.beta.chat.completions.runFunctions(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner -- client.beta.chat.completions.runTools(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner -- client.beta.chat.completions.stream(body, options?) -> ChatCompletionStream - ## Assistants Types: @@ -526,3 +526,93 @@ Types: Methods: - client.uploads.parts.create(uploadId, { ...params }) -> UploadPart + +# Responses + +Types: + +- ComputerTool +- EasyInputMessage +- FileSearchTool +- FunctionTool +- Response +- ResponseAudioDeltaEvent +- ResponseAudioDoneEvent +- ResponseAudioTranscriptDeltaEvent +- ResponseAudioTranscriptDoneEvent +- ResponseCodeInterpreterCallCodeDeltaEvent +- ResponseCodeInterpreterCallCodeDoneEvent +- ResponseCodeInterpreterCallCompletedEvent +- ResponseCodeInterpreterCallInProgressEvent +- ResponseCodeInterpreterCallInterpretingEvent +- ResponseCodeInterpreterToolCall +- ResponseCompletedEvent +- ResponseComputerToolCall +- ResponseContent +- ResponseContentPartAddedEvent +- ResponseContentPartDoneEvent +- ResponseCreatedEvent +- ResponseError +- ResponseErrorEvent +- ResponseFailedEvent +- ResponseFileSearchCallCompletedEvent +- ResponseFileSearchCallInProgressEvent +- ResponseFileSearchCallSearchingEvent +- ResponseFileSearchToolCall +- ResponseFormatTextConfig +- ResponseFormatTextJSONSchemaConfig +- ResponseFunctionCallArgumentsDeltaEvent +- ResponseFunctionCallArgumentsDoneEvent +- ResponseFunctionToolCall +- ResponseFunctionWebSearch +- ResponseInProgressEvent +- ResponseIncludable +- ResponseIncompleteEvent +- ResponseInput +- ResponseInputAudio +- ResponseInputContent +- ResponseInputFile +- ResponseInputImage +- ResponseInputItem +- ResponseInputMessageContentList +- ResponseInputText +- ResponseOutputAudio +- ResponseOutputItem +- ResponseOutputItemAddedEvent +- ResponseOutputItemDoneEvent +- ResponseOutputMessage +- ResponseOutputRefusal +- ResponseOutputText +- ResponseRefusalDeltaEvent +- ResponseRefusalDoneEvent +- ResponseStatus +- ResponseStreamEvent +- ResponseTextAnnotationDeltaEvent +- ResponseTextConfig +- ResponseTextDeltaEvent +- ResponseTextDoneEvent +- ResponseUsage +- ResponseWebSearchCallCompletedEvent +- ResponseWebSearchCallInProgressEvent +- ResponseWebSearchCallSearchingEvent +- Tool +- ToolChoiceFunction +- ToolChoiceOptions +- ToolChoiceTypes +- WebSearchTool + +Methods: + +- client.responses.create({ ...params }) -> Response +- client.responses.retrieve(responseId, { ...params }) -> Response +- client.responses.del(responseId) -> void + +## InputItems + +Types: + +- ResponseItemList + +Methods: + +- client.responses.inputItems.list(responseId, { ...params }) -> ResponseItemListDataPage diff --git a/examples/responses/stream.ts b/examples/responses/stream.ts new file mode 100755 index 000000000..ea3d0849e --- /dev/null +++ b/examples/responses/stream.ts @@ -0,0 +1,24 @@ +#!/usr/bin/env -S npm run tsn -T + +import OpenAI from 'openai'; + +const openai = new OpenAI(); + +async function main() { + const runner = openai.responses + .stream({ + model: 'gpt-4o-2024-08-06', + input: 'solve 8x + 31 = 2', + }) + .on('event', (event) => console.log(event)) + .on('response.output_text.delta', (diff) => process.stdout.write(diff.delta)); + + for await (const event of runner) { + console.log('event', event); + } + + const result = await runner.finalResponse(); + console.log(result); +} + +main(); diff --git a/examples/responses/streaming-tools.ts b/examples/responses/streaming-tools.ts new file mode 100755 index 000000000..87a48d0c3 --- /dev/null +++ b/examples/responses/streaming-tools.ts @@ -0,0 +1,52 @@ +#!/usr/bin/env -S npm run tsn -T + +import { OpenAI } from 'openai'; +import { zodResponsesFunction } from 'openai/helpers/zod'; +import { z } from 'zod'; + +const Table = z.enum(['orders', 'customers', 'products']); +const Column = z.enum([ + 'id', + 'status', + 'expected_delivery_date', + 'delivered_at', + 'shipped_at', + 'ordered_at', + 'canceled_at', +]); +const Operator = z.enum(['=', '>', '<', '<=', '>=', '!=']); +const OrderBy = z.enum(['asc', 'desc']); +const DynamicValue = z.object({ + column_name: Column, +}); + +const Condition = z.object({ + column: Column, + operator: Operator, + value: z.union([z.string(), z.number(), DynamicValue]), +}); + +const Query = z.object({ + table_name: Table, + columns: z.array(Column), + conditions: z.array(Condition), + order_by: OrderBy, +}); + +async function main() { + const client = new OpenAI(); + + const tool = zodResponsesFunction({ name: 'query', parameters: Query }); + + const stream = client.responses.stream({ + model: 'gpt-4o-2024-08-06', + input: 'look up all my orders in november of last year that were fulfilled but not delivered on time', + tools: [tool], + }); + + for await (const event of stream) { + console.dir(event, { depth: 10 }); + } +} + +main(); diff --git a/examples/responses/structured-outputs-tools.ts b/examples/responses/structured-outputs-tools.ts new file mode 100755 index 000000000..29eaabf93 --- /dev/null +++ b/examples/responses/structured-outputs-tools.ts @@ -0,0 +1,60 @@ +#!/usr/bin/env -S npm run tsn -T + +import { OpenAI } from 'openai'; +import { zodResponsesFunction } from 'openai/helpers/zod'; +import { z } from 'zod'; + +const Table = z.enum(['orders', 'customers', 'products']); +const Column = z.enum([ + 'id', + 'status', + 'expected_delivery_date', + 'delivered_at', + 'shipped_at', + 'ordered_at', + 'canceled_at', +]); +const Operator = z.enum(['=', '>', '<', '<=', '>=', '!=']); +const OrderBy = z.enum(['asc', 'desc']); +const DynamicValue = z.object({ + column_name: Column, +}); + +const Condition = z.object({ + column: Column, + operator: Operator, + value: z.union([z.string(), z.number(), DynamicValue]), +}); + +const Query = z.object({ + table_name: Table, + columns: z.array(Column), + conditions: z.array(Condition), + order_by: OrderBy, +}); + +async function main() { + const client = new OpenAI(); + + const tool = zodResponsesFunction({ name: 'query', parameters: Query }); + + const rsp = await client.responses.parse({ + model: 'gpt-4o-2024-08-06', + input: 'look up all my orders in november of last year that were fulfilled but not delivered on time', + tools: [tool], + }); + + console.log(rsp); + + const functionCall = rsp.output[0]!; + + if (functionCall.type !== 'function_call') { + throw new Error('Expected function call'); + } + + const query = functionCall.parsed_arguments; + + console.log(query); +} + +main(); diff --git a/examples/responses/structured-outputs.ts b/examples/responses/structured-outputs.ts new file mode 100755 index 000000000..07ff93a60 --- /dev/null +++ b/examples/responses/structured-outputs.ts @@ -0,0 +1,32 @@ +#!/usr/bin/env -S npm run tsn -T + +import { OpenAI } from 'openai'; +import { zodTextFormat } from 'openai/helpers/zod'; +import { z } from 'zod'; + +const Step = z.object({ + explanation: z.string(), + output: z.string(), +}); + +const MathResponse = z.object({ + steps: z.array(Step), + final_answer: z.string(), +}); + +const client = new OpenAI(); + +async function main() { + const rsp = await client.responses.parse({ + input: 'solve 8x + 31 = 2', + model: 'gpt-4o-2024-08-06', + text: { + format: zodTextFormat(MathResponse, 'math_response'), + }, + }); + + console.log(rsp.output_parsed); + console.log('answer: ', rsp.output_parsed?.final_answer); +} + +main().catch(console.error); diff --git a/examples/yarn.lock b/examples/yarn.lock new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/bootstrap b/scripts/bootstrap index 033156d3a..f107c3a24 100755 --- a/scripts/bootstrap +++ b/scripts/bootstrap @@ -4,7 +4,7 @@ set -e cd "$(dirname "$0")/.." -if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then +if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ] && [ "$SKIP_BREW" != "1" ]; then brew bundle check >/dev/null 2>&1 || { echo "==> Installing Homebrew dependencies…" brew bundle diff --git a/src/core.ts b/src/core.ts index 6578c0781..a41eaa3fa 100644 --- a/src/core.ts +++ b/src/core.ts @@ -62,8 +62,8 @@ async function defaultParseResponse(props: APIResponseProps): Promise { return _zodToJsonSchema(schema, { @@ -74,6 +78,23 @@ export function zodResponseFormat( ); } +export function zodTextFormat( + zodObject: ZodInput, + name: string, + props?: Omit, +): AutoParseableTextFormat> { + return makeParseableTextFormat( + { + type: 'json_schema', + ...props, + name, + strict: true, + schema: zodToJsonSchema(zodObject, { name }), + }, + (content) => zodObject.parse(JSON.parse(content)), + ); +} + /** * Creates a chat completion `function` tool that can be invoked * automatically by the chat completion `.runTools()` method or automatically @@ -106,3 +127,28 @@ export function zodFunction(options: { }, ); } + +export function zodResponsesFunction(options: { + name: string; + parameters: Parameters; + function?: ((args: zodInfer) => unknown | Promise) | undefined; + description?: string | undefined; +}): AutoParseableResponseTool<{ + arguments: Parameters; + name: string; + function: (args: zodInfer) => unknown; +}> { + return makeParseableResponseTool( + { + type: 'function', + name: options.name, + parameters: zodToJsonSchema(options.parameters, { name: options.name }), + strict: true, + ...(options.description ? { description: options.description } : undefined), + }, + { + callback: options.function, + parser: (args) => options.parameters.parse(JSON.parse(args)), + }, + ); +} diff --git a/src/index.ts b/src/index.ts index debefce8c..c3abed2db 100644 --- a/src/index.ts +++ b/src/index.ts @@ -65,14 +65,34 @@ import { } from './resources/moderations'; import { Audio, AudioModel, AudioResponseFormat } from './resources/audio/audio'; import { Beta } from './resources/beta/beta'; -import { Chat, ChatModel } from './resources/chat/chat'; +import { Chat } from './resources/chat/chat'; import { FineTuning } from './resources/fine-tuning/fine-tuning'; +import { Responses } from './resources/responses/responses'; import { Upload, UploadCompleteParams, UploadCreateParams, Uploads as UploadsAPIUploads, } from './resources/uploads/uploads'; +import { + AutoFileChunkingStrategyParam, + FileChunkingStrategy, + FileChunkingStrategyParam, + OtherFileChunkingStrategyObject, + StaticFileChunkingStrategy, + StaticFileChunkingStrategyObject, + StaticFileChunkingStrategyObjectParam, + VectorStore, + VectorStoreCreateParams, + VectorStoreDeleted, + VectorStoreListParams, + VectorStoreSearchParams, + VectorStoreSearchResponse, + VectorStoreSearchResponsesPage, + VectorStoreUpdateParams, + VectorStores, + VectorStoresPage, +} from './resources/vector-stores/vector-stores'; import { ChatCompletion, ChatCompletionAssistantMessageParam, @@ -98,7 +118,6 @@ import { ChatCompletionModality, ChatCompletionNamedToolChoice, ChatCompletionPredictionContent, - ChatCompletionReasoningEffort, ChatCompletionRole, ChatCompletionStoreMessage, ChatCompletionStreamOptions, @@ -267,9 +286,11 @@ export class OpenAI extends Core.APIClient { moderations: API.Moderations = new API.Moderations(this); models: API.Models = new API.Models(this); fineTuning: API.FineTuning = new API.FineTuning(this); + vectorStores: API.VectorStores = new API.VectorStores(this); beta: API.Beta = new API.Beta(this); batches: API.Batches = new API.Batches(this); uploads: API.Uploads = new API.Uploads(this); + responses: API.Responses = new API.Responses(this); protected override defaultQuery(): Core.DefaultQuery | undefined { return this._options.defaultQuery; @@ -325,10 +346,14 @@ OpenAI.Moderations = Moderations; OpenAI.Models = Models; OpenAI.ModelsPage = ModelsPage; OpenAI.FineTuning = FineTuning; +OpenAI.VectorStores = VectorStores; +OpenAI.VectorStoresPage = VectorStoresPage; +OpenAI.VectorStoreSearchResponsesPage = VectorStoreSearchResponsesPage; OpenAI.Beta = Beta; OpenAI.Batches = Batches; OpenAI.BatchesPage = BatchesPage; OpenAI.Uploads = UploadsAPIUploads; +OpenAI.Responses = Responses; export declare namespace OpenAI { export type RequestOptions = Core.RequestOptions; @@ -350,7 +375,6 @@ export declare namespace OpenAI { export { Chat as Chat, - type ChatModel as ChatModel, type ChatCompletion as ChatCompletion, type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam, type ChatCompletionAudio as ChatCompletionAudio, @@ -371,7 +395,6 @@ export declare namespace OpenAI { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, @@ -440,6 +463,26 @@ export declare namespace OpenAI { export { FineTuning as FineTuning }; + export { + VectorStores as VectorStores, + type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, + type FileChunkingStrategy as FileChunkingStrategy, + type FileChunkingStrategyParam as FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy as StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, + type VectorStore as VectorStore, + type VectorStoreDeleted as VectorStoreDeleted, + type VectorStoreSearchResponse as VectorStoreSearchResponse, + VectorStoresPage as VectorStoresPage, + VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, + type VectorStoreCreateParams as VectorStoreCreateParams, + type VectorStoreUpdateParams as VectorStoreUpdateParams, + type VectorStoreListParams as VectorStoreListParams, + type VectorStoreSearchParams as VectorStoreSearchParams, + }; + export { Beta as Beta }; export { @@ -459,10 +502,17 @@ export declare namespace OpenAI { type UploadCompleteParams as UploadCompleteParams, }; + export { Responses as Responses }; + + export type ChatModel = API.ChatModel; + export type ComparisonFilter = API.ComparisonFilter; + export type CompoundFilter = API.CompoundFilter; export type ErrorObject = API.ErrorObject; export type FunctionDefinition = API.FunctionDefinition; export type FunctionParameters = API.FunctionParameters; export type Metadata = API.Metadata; + export type Reasoning = API.Reasoning; + export type ReasoningEffort = API.ReasoningEffort; export type ResponseFormatJSONObject = API.ResponseFormatJSONObject; export type ResponseFormatJSONSchema = API.ResponseFormatJSONSchema; export type ResponseFormatText = API.ResponseFormatText; diff --git a/src/lib/ResponsesParser.ts b/src/lib/ResponsesParser.ts new file mode 100644 index 000000000..780b779ff --- /dev/null +++ b/src/lib/ResponsesParser.ts @@ -0,0 +1,262 @@ +import { OpenAIError } from '../error'; +import { type ChatCompletionTool } from '../resources'; +import { + type FunctionTool, + type ParsedContent, + type ParsedResponse, + type ParsedResponseFunctionToolCall, + type ParsedResponseOutputItem, + type Response, + type ResponseCreateParamsBase, + type ResponseCreateParamsNonStreaming, + type ResponseFunctionToolCall, + type Tool, +} from '../resources/responses/responses'; +import { type AutoParseableTextFormat, isAutoParsableResponseFormat } from '../lib/parser'; + +type ParseableToolsParams = Array | ChatCompletionTool | null; + +export type ResponseCreateParamsWithTools = ResponseCreateParamsBase & { + tools?: ParseableToolsParams; +}; + +export type ExtractParsedContentFromParams = + NonNullable['format'] extends AutoParseableTextFormat ? P : null; + +export function maybeParseResponse< + Params extends ResponseCreateParamsBase | null, + ParsedT = Params extends null ? null : ExtractParsedContentFromParams>, +>(response: Response, params: Params): ParsedResponse { + if (!params || !hasAutoParseableInput(params)) { + return { + ...response, + output_parsed: null, + output: response.output.map((item) => { + if (item.type === 'function_call') { + return { + ...item, + parsed_arguments: null, + }; + } + + if (item.type === 'message') { + return { + ...item, + content: item.content.map((content) => ({ + ...content, + parsed: null, + })), + }; + } else { + return item; + } + }), + }; + } + + return parseResponse(response, params); +} + +export function parseResponse< + Params extends ResponseCreateParamsBase, + ParsedT = ExtractParsedContentFromParams, +>(response: Response, params: Params): ParsedResponse { + const output: Array> = response.output.map( + (item): ParsedResponseOutputItem => { + if (item.type === 'function_call') { + return { + ...item, + parsed_arguments: parseToolCall(params, item), + }; + } + if (item.type === 'message') { + const content: Array> = item.content.map((content) => { + if (content.type === 'output_text') { + return { + ...content, + parsed: parseTextFormat(params, content.text), + }; + } + + return content; + }); + + return { + ...item, + content, + }; + } + + return item; + }, + ); + + const parsed: Omit, 'output_parsed'> = Object.assign({}, response, { output }); + if (!Object.getOwnPropertyDescriptor(response, 'output_text')) { + addOutputText(parsed); + } + + Object.defineProperty(parsed, 'output_parsed', { + enumerable: true, + get() { + for (const output of parsed.output) { + if (output.type !== 'message') { + continue; + } + + for (const content of output.content) { + if (content.type === 'output_text' && content.parsed !== null) { + return content.parsed; + } + } + } + + return null; + }, + }); + + return parsed as ParsedResponse; +} + +function parseTextFormat< + Params extends ResponseCreateParamsBase, + ParsedT = ExtractParsedContentFromParams, +>(params: Params, content: string): ParsedT | null { + if (params.text?.format?.type !== 'json_schema') { + return null; + } + + if ('$parseRaw' in params.text?.format) { + const text_format = params.text?.format as unknown as AutoParseableTextFormat; + return text_format.$parseRaw(content); + } + + return JSON.parse(content); +} + +export function hasAutoParseableInput(params: ResponseCreateParamsWithTools): boolean { + if (isAutoParsableResponseFormat(params.text?.format)) { + return true; + } + + return false; +} + +type ToolOptions = { + name: string; + arguments: any; + function?: ((args: any) => any) | undefined; +}; + +export type AutoParseableResponseTool< + OptionsT extends ToolOptions, + HasFunction = OptionsT['function'] extends Function ? true : false, +> = FunctionTool & { + __arguments: OptionsT['arguments']; // type-level only + __name: OptionsT['name']; // type-level only + + $brand: 'auto-parseable-tool'; + $callback: ((args: OptionsT['arguments']) => any) | undefined; + $parseRaw(args: string): OptionsT['arguments']; +}; + +export function makeParseableResponseTool( + tool: FunctionTool, + { + parser, + callback, + }: { + parser: (content: string) => OptionsT['arguments']; + callback: ((args: any) => any) | undefined; + }, +): AutoParseableResponseTool { + const obj = { ...tool }; + + Object.defineProperties(obj, { + $brand: { + value: 'auto-parseable-tool', + enumerable: false, + }, + $parseRaw: { + value: parser, + enumerable: false, + }, + $callback: { + value: callback, + enumerable: false, + }, + }); + + return obj as AutoParseableResponseTool; +} + +export function isAutoParsableTool(tool: any): tool is AutoParseableResponseTool { + return tool?.['$brand'] === 'auto-parseable-tool'; +} + +function getInputToolByName(input_tools: Array, name: string): FunctionTool | undefined { + return input_tools.find((tool) => tool.type === 'function' && tool.name === name) as + | FunctionTool + | undefined; +} + +function parseToolCall( + params: Params, + toolCall: ResponseFunctionToolCall, +): ParsedResponseFunctionToolCall { + const inputTool = getInputToolByName(params.tools ?? [], toolCall.name); + + return { + ...toolCall, + ...toolCall, + parsed_arguments: + isAutoParsableTool(inputTool) ? inputTool.$parseRaw(toolCall.arguments) + : inputTool?.strict ? JSON.parse(toolCall.arguments) + : null, + }; +} + +export function shouldParseToolCall( + params: ResponseCreateParamsNonStreaming | null | undefined, + toolCall: ResponseFunctionToolCall, +): boolean { + if (!params) { + return false; + } + + const inputTool = getInputToolByName(params.tools ?? [], toolCall.name); + return isAutoParsableTool(inputTool) || inputTool?.strict || false; +} + +export function validateInputTools(tools: ChatCompletionTool[] | undefined) { + for (const tool of tools ?? []) { + if (tool.type !== 'function') { + throw new OpenAIError( + `Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``, + ); + } + + if (tool.function.strict !== true) { + throw new OpenAIError( + `The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`, + ); + } + } +} + +export function addOutputText(rsp: Response): void { + const texts: string[] = []; + for (const output of rsp.output) { + if (output.type !== 'message') { + continue; + } + + for (const content of output.content) { + if (content.type === 'output_text') { + texts.push(content.text); + } + } + } + + rsp.output_text = texts.join(''); +} diff --git a/src/lib/parser.ts b/src/lib/parser.ts index a750375dc..d75d32a40 100644 --- a/src/lib/parser.ts +++ b/src/lib/parser.ts @@ -14,6 +14,7 @@ import { } from '../resources/beta/chat/completions'; import { ResponseFormatJSONSchema } from '../resources/shared'; import { ContentFilterFinishReasonError, LengthFinishReasonError, OpenAIError } from '../error'; +import { type ResponseFormatTextJSONSchemaConfig } from '../resources/responses/responses'; type AnyChatCompletionCreateParams = | ChatCompletionCreateParams @@ -51,6 +52,33 @@ export function makeParseableResponseFormat( return obj as AutoParseableResponseFormat; } +export type AutoParseableTextFormat = ResponseFormatTextJSONSchemaConfig & { + __output: ParsedT; // type-level only + + $brand: 'auto-parseable-response-format'; + $parseRaw(content: string): ParsedT; +}; + +export function makeParseableTextFormat( + response_format: ResponseFormatTextJSONSchemaConfig, + parser: (content: string) => ParsedT, +): AutoParseableTextFormat { + const obj = { ...response_format }; + + Object.defineProperties(obj, { + $brand: { + value: 'auto-parseable-response-format', + enumerable: false, + }, + $parseRaw: { + value: parser, + enumerable: false, + }, + }); + + return obj as AutoParseableTextFormat; +} + export function isAutoParsableResponseFormat( response_format: any, ): response_format is AutoParseableResponseFormat { diff --git a/src/lib/responses/EventTypes.ts b/src/lib/responses/EventTypes.ts new file mode 100644 index 000000000..fc1620988 --- /dev/null +++ b/src/lib/responses/EventTypes.ts @@ -0,0 +1,76 @@ +import { + ResponseAudioDeltaEvent, + ResponseAudioDoneEvent, + ResponseAudioTranscriptDeltaEvent, + ResponseAudioTranscriptDoneEvent, + ResponseCodeInterpreterCallCodeDeltaEvent, + ResponseCodeInterpreterCallCodeDoneEvent, + ResponseCodeInterpreterCallCompletedEvent, + ResponseCodeInterpreterCallInProgressEvent, + ResponseCodeInterpreterCallInterpretingEvent, + ResponseCompletedEvent, + ResponseContentPartAddedEvent, + ResponseContentPartDoneEvent, + ResponseCreatedEvent, + ResponseErrorEvent, + ResponseFailedEvent, + ResponseFileSearchCallCompletedEvent, + ResponseFileSearchCallInProgressEvent, + ResponseFileSearchCallSearchingEvent, + ResponseFunctionCallArgumentsDeltaEvent as RawResponseFunctionCallArgumentsDeltaEvent, + ResponseFunctionCallArgumentsDoneEvent, + ResponseInProgressEvent, + ResponseOutputItemAddedEvent, + ResponseOutputItemDoneEvent, + ResponseRefusalDeltaEvent, + ResponseRefusalDoneEvent, + ResponseTextAnnotationDeltaEvent, + ResponseTextDeltaEvent as RawResponseTextDeltaEvent, + ResponseTextDoneEvent, + ResponseIncompleteEvent, + ResponseWebSearchCallCompletedEvent, + ResponseWebSearchCallInProgressEvent, + ResponseWebSearchCallSearchingEvent, +} from '../../resources/responses/responses'; + +export type ResponseFunctionCallArgumentsDeltaEvent = RawResponseFunctionCallArgumentsDeltaEvent & { + snapshot: string; +}; + +export type ResponseTextDeltaEvent = RawResponseTextDeltaEvent & { + snapshot: string; +}; + +export type ParsedResponseStreamEvent = + | ResponseAudioDeltaEvent + | ResponseAudioDoneEvent + | ResponseAudioTranscriptDeltaEvent + | ResponseAudioTranscriptDoneEvent + | ResponseCodeInterpreterCallCodeDeltaEvent + | ResponseCodeInterpreterCallCodeDoneEvent + | ResponseCodeInterpreterCallCompletedEvent + | ResponseCodeInterpreterCallInProgressEvent + | ResponseCodeInterpreterCallInterpretingEvent + | ResponseCompletedEvent + | ResponseContentPartAddedEvent + | ResponseContentPartDoneEvent + | ResponseCreatedEvent + | ResponseErrorEvent + | ResponseFileSearchCallCompletedEvent + | ResponseFileSearchCallInProgressEvent + | ResponseFileSearchCallSearchingEvent + | ResponseFunctionCallArgumentsDeltaEvent + | ResponseFunctionCallArgumentsDoneEvent + | ResponseInProgressEvent + | ResponseFailedEvent + | ResponseIncompleteEvent + | ResponseOutputItemAddedEvent + | ResponseOutputItemDoneEvent + | ResponseRefusalDeltaEvent + | ResponseRefusalDoneEvent + | ResponseTextAnnotationDeltaEvent + | ResponseTextDeltaEvent + | ResponseTextDoneEvent + | ResponseWebSearchCallCompletedEvent + | ResponseWebSearchCallInProgressEvent + | ResponseWebSearchCallSearchingEvent; diff --git a/src/lib/responses/ResponseStream.ts b/src/lib/responses/ResponseStream.ts new file mode 100644 index 000000000..0d6cd47dd --- /dev/null +++ b/src/lib/responses/ResponseStream.ts @@ -0,0 +1,298 @@ +import { + type ParsedResponse, + type Response, + type ResponseCreateParamsBase, + type ResponseCreateParamsStreaming, + type ResponseStreamEvent, +} from 'openai/resources/responses/responses'; +import * as Core from '../../core'; +import { APIUserAbortError, OpenAIError } from '../../error'; +import OpenAI from '../../index'; +import { type BaseEvents, EventStream } from '../EventStream'; +import { type ResponseFunctionCallArgumentsDeltaEvent, type ResponseTextDeltaEvent } from './EventTypes'; +import { maybeParseResponse } from '../ResponsesParser'; + +export type ResponseStreamParams = Omit & { + stream?: true; +}; + +type ResponseEvents = BaseEvents & + Omit< + { + [K in ResponseStreamEvent['type']]: (event: Extract) => void; + }, + 'response.output_text.delta' | 'response.function_call_arguments.delta' + > & { + event: (event: ResponseStreamEvent) => void; + 'response.output_text.delta': (event: ResponseTextDeltaEvent) => void; + 'response.function_call_arguments.delta': (event: ResponseFunctionCallArgumentsDeltaEvent) => void; + }; + +export type ResponseStreamingParams = Omit & { + stream?: true; +}; + +export class ResponseStream + extends EventStream + implements AsyncIterable +{ + #params: ResponseStreamingParams | null; + #currentResponseSnapshot: Response | undefined; + #finalResponse: ParsedResponse | undefined; + + constructor(params: ResponseStreamingParams | null) { + super(); + this.#params = params; + } + + static createResponse( + client: OpenAI, + params: ResponseStreamParams, + options?: Core.RequestOptions, + ): ResponseStream { + const runner = new ResponseStream(params as ResponseCreateParamsStreaming); + runner._run(() => + runner._createResponse(client, params, { + ...options, + headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' }, + }), + ); + return runner; + } + + #beginRequest() { + if (this.ended) return; + this.#currentResponseSnapshot = undefined; + } + + #addEvent(this: ResponseStream, event: ResponseStreamEvent) { + if (this.ended) return; + + const response = this.#accumulateResponse(event); + this._emit('event', event); + + switch (event.type) { + case 'response.output_text.delta': { + const output = response.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'message') { + const content = output.content[event.content_index]; + if (!content) { + throw new OpenAIError(`missing content at index ${event.content_index}`); + } + if (content.type !== 'output_text') { + throw new OpenAIError(`expected content to be 'output_text', got ${content.type}`); + } + + this._emit('response.output_text.delta', { + ...event, + snapshot: content.text, + }); + } + break; + } + case 'response.function_call_arguments.delta': { + const output = response.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'function_call') { + this._emit('response.function_call_arguments.delta', { + ...event, + snapshot: output.arguments, + }); + } + break; + } + default: + // @ts-ignore + this._emit(event.type, event); + break; + } + } + + #endRequest(): ParsedResponse { + if (this.ended) { + throw new OpenAIError(`stream has ended, this shouldn't happen`); + } + const snapshot = this.#currentResponseSnapshot; + if (!snapshot) { + throw new OpenAIError(`request ended without sending any events`); + } + this.#currentResponseSnapshot = undefined; + const parsedResponse = finalizeResponse(snapshot, this.#params); + this.#finalResponse = parsedResponse; + + return parsedResponse; + } + + protected async _createResponse( + client: OpenAI, + params: ResponseStreamingParams, + options?: Core.RequestOptions, + ): Promise> { + const signal = options?.signal; + if (signal) { + if (signal.aborted) this.controller.abort(); + signal.addEventListener('abort', () => this.controller.abort()); + } + this.#beginRequest(); + + const stream = await client.responses.create( + { ...params, stream: true }, + { ...options, signal: this.controller.signal }, + ); + this._connected(); + for await (const event of stream) { + this.#addEvent(event); + } + if (stream.controller.signal?.aborted) { + throw new APIUserAbortError(); + } + return this.#endRequest(); + } + + #accumulateResponse(event: ResponseStreamEvent): Response { + let snapshot = this.#currentResponseSnapshot; + if (!snapshot) { + if (event.type !== 'response.created') { + throw new OpenAIError( + `When snapshot hasn't been set yet, expected 'response.created' event, got ${event.type}`, + ); + } + snapshot = this.#currentResponseSnapshot = event.response; + return snapshot; + } + + switch (event.type) { + case 'response.output_item.added': { + snapshot.output.push(event.item); + break; + } + case 'response.content_part.added': { + const output = snapshot.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'message') { + output.content.push(event.part); + } + break; + } + case 'response.output_text.delta': { + const output = snapshot.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'message') { + const content = output.content[event.content_index]; + if (!content) { + throw new OpenAIError(`missing content at index ${event.content_index}`); + } + if (content.type !== 'output_text') { + throw new OpenAIError(`expected content to be 'output_text', got ${content.type}`); + } + content.text += event.delta; + } + break; + } + case 'response.function_call_arguments.delta': { + const output = snapshot.output[event.output_index]; + if (!output) { + throw new OpenAIError(`missing output at index ${event.output_index}`); + } + if (output.type === 'function_call') { + output.arguments += event.delta; + } + break; + } + case 'response.completed': { + this.#currentResponseSnapshot = event.response; + break; + } + } + + return snapshot; + } + + [Symbol.asyncIterator](this: ResponseStream): AsyncIterator { + const pushQueue: ResponseStreamEvent[] = []; + const readQueue: { + resolve: (event: ResponseStreamEvent | undefined) => void; + reject: (err: unknown) => void; + }[] = []; + let done = false; + + this.on('event', (event) => { + const reader = readQueue.shift(); + if (reader) { + reader.resolve(event); + } else { + pushQueue.push(event); + } + }); + + this.on('end', () => { + done = true; + for (const reader of readQueue) { + reader.resolve(undefined); + } + readQueue.length = 0; + }); + + this.on('abort', (err) => { + done = true; + for (const reader of readQueue) { + reader.reject(err); + } + readQueue.length = 0; + }); + + this.on('error', (err) => { + done = true; + for (const reader of readQueue) { + reader.reject(err); + } + readQueue.length = 0; + }); + + return { + next: async (): Promise> => { + if (!pushQueue.length) { + if (done) { + return { value: undefined, done: true }; + } + return new Promise((resolve, reject) => + readQueue.push({ resolve, reject }), + ).then((event) => (event ? { value: event, done: false } : { value: undefined, done: true })); + } + const event = pushQueue.shift()!; + return { value: event, done: false }; + }, + return: async () => { + this.abort(); + return { value: undefined, done: true }; + }, + }; + } + + /** + * @returns a promise that resolves with the final Response, or rejects + * if an error occurred or the stream ended prematurely without producing a REsponse. + */ + async finalResponse(): Promise> { + await this.done(); + const response = this.#finalResponse; + if (!response) throw new OpenAIError('stream ended without producing a ChatCompletion'); + return response; + } +} + +function finalizeResponse( + snapshot: Response, + params: ResponseStreamingParams | null, +): ParsedResponse { + return maybeParseResponse(snapshot, params); +} diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts index 919bf53b3..0668dcf54 100644 --- a/src/resources/beta/assistants.ts +++ b/src/resources/beta/assistants.ts @@ -4,10 +4,8 @@ import { APIResource } from '../../resource'; import { isRequestOptions } from '../../core'; import * as Core from '../../core'; import * as Shared from '../shared'; -import * as ChatAPI from '../chat/chat'; import * as MessagesAPI from './threads/messages'; import * as ThreadsAPI from './threads/threads'; -import * as VectorStoresAPI from './vector-stores/vector-stores'; import * as RunsAPI from './threads/runs/runs'; import * as StepsAPI from './threads/runs/steps'; import { CursorPage, type CursorPageParams } from '../../pagination'; @@ -1105,7 +1103,7 @@ export interface AssistantCreateParams { * [Model overview](https://platform.openai.com/docs/models) for descriptions of * them. */ - model: (string & {}) | ChatAPI.ChatModel; + model: (string & {}) | Shared.ChatModel; /** * The description of the assistant. The maximum length is 512 characters. @@ -1134,14 +1132,14 @@ export interface AssistantCreateParams { name?: string | null; /** - * **o1 and o3-mini models only** + * **o-series models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: 'low' | 'medium' | 'high' | null; + reasoning_effort?: Shared.ReasoningEffort | null; /** * Specifies the format that the model must output. Compatible with @@ -1244,9 +1242,9 @@ export namespace AssistantCreateParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. Only applicable if `file_ids` is non-empty. + * strategy. */ - chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; + chunking_strategy?: VectorStore.Auto | VectorStore.Static; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -1265,6 +1263,45 @@ export namespace AssistantCreateParams { */ metadata?: Shared.Metadata | null; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } @@ -1337,14 +1374,14 @@ export interface AssistantUpdateParams { name?: string | null; /** - * **o1 and o3-mini models only** + * **o-series models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: 'low' | 'medium' | 'high' | null; + reasoning_effort?: Shared.ReasoningEffort | null; /** * Specifies the format that the model must output. Compatible with diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts index df929b2f7..0b909de18 100644 --- a/src/resources/beta/beta.ts +++ b/src/resources/beta/beta.ts @@ -40,36 +40,16 @@ import { ThreadUpdateParams, Threads, } from './threads/threads'; -import * as VectorStoresAPI from './vector-stores/vector-stores'; -import { - AutoFileChunkingStrategyParam, - FileChunkingStrategy, - FileChunkingStrategyParam, - OtherFileChunkingStrategyObject, - StaticFileChunkingStrategy, - StaticFileChunkingStrategyObject, - StaticFileChunkingStrategyObjectParam, - VectorStore, - VectorStoreCreateParams, - VectorStoreDeleted, - VectorStoreListParams, - VectorStoreUpdateParams, - VectorStores, - VectorStoresPage, -} from './vector-stores/vector-stores'; import { Chat } from './chat/chat'; export class Beta extends APIResource { realtime: RealtimeAPI.Realtime = new RealtimeAPI.Realtime(this._client); - vectorStores: VectorStoresAPI.VectorStores = new VectorStoresAPI.VectorStores(this._client); chat: ChatAPI.Chat = new ChatAPI.Chat(this._client); assistants: AssistantsAPI.Assistants = new AssistantsAPI.Assistants(this._client); threads: ThreadsAPI.Threads = new ThreadsAPI.Threads(this._client); } Beta.Realtime = Realtime; -Beta.VectorStores = VectorStores; -Beta.VectorStoresPage = VectorStoresPage; Beta.Assistants = Assistants; Beta.AssistantsPage = AssistantsPage; Beta.Threads = Threads; @@ -77,23 +57,6 @@ Beta.Threads = Threads; export declare namespace Beta { export { Realtime as Realtime }; - export { - VectorStores as VectorStores, - type AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam, - type FileChunkingStrategy as FileChunkingStrategy, - type FileChunkingStrategyParam as FileChunkingStrategyParam, - type OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject, - type StaticFileChunkingStrategy as StaticFileChunkingStrategy, - type StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject, - type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, - type VectorStore as VectorStore, - type VectorStoreDeleted as VectorStoreDeleted, - VectorStoresPage as VectorStoresPage, - type VectorStoreCreateParams as VectorStoreCreateParams, - type VectorStoreUpdateParams as VectorStoreUpdateParams, - type VectorStoreListParams as VectorStoreListParams, - }; - export { Chat }; export { diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts index babca0016..b9cef17cb 100644 --- a/src/resources/beta/index.ts +++ b/src/resources/beta/index.ts @@ -37,19 +37,3 @@ export { type ThreadCreateAndRunPollParams, type ThreadCreateAndRunStreamParams, } from './threads/index'; -export { - VectorStoresPage, - VectorStores, - type AutoFileChunkingStrategyParam, - type FileChunkingStrategy, - type FileChunkingStrategyParam, - type OtherFileChunkingStrategyObject, - type StaticFileChunkingStrategy, - type StaticFileChunkingStrategyObject, - type StaticFileChunkingStrategyObjectParam, - type VectorStore, - type VectorStoreDeleted, - type VectorStoreCreateParams, - type VectorStoreUpdateParams, - type VectorStoreListParams, -} from './vector-stores/index'; diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts index 8ab94cc99..15bfb4204 100644 --- a/src/resources/beta/threads/runs/runs.ts +++ b/src/resources/beta/threads/runs/runs.ts @@ -10,7 +10,6 @@ import { RunSubmitToolOutputsParamsStream } from '../../../../lib/AssistantStrea import * as RunsAPI from './runs'; import * as Shared from '../../../shared'; import * as AssistantsAPI from '../../assistants'; -import * as ChatAPI from '../../../chat/chat'; import * as MessagesAPI from '../messages'; import * as ThreadsAPI from '../threads'; import * as StepsAPI from './steps'; @@ -722,7 +721,7 @@ export interface RunCreateParamsBase { * associated with the assistant. If not, the model associated with the assistant * will be used. */ - model?: (string & {}) | ChatAPI.ChatModel | null; + model?: (string & {}) | Shared.ChatModel | null; /** * Body param: Whether to enable @@ -732,14 +731,14 @@ export interface RunCreateParamsBase { parallel_tool_calls?: boolean; /** - * Body param: **o1 and o3-mini models only** + * Body param: **o-series models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: 'low' | 'medium' | 'high' | null; + reasoning_effort?: Shared.ReasoningEffort | null; /** * Body param: Specifies the format that the model must output. Compatible with diff --git a/src/resources/beta/threads/threads.ts b/src/resources/beta/threads/threads.ts index 3f69c6e60..8075ba0ac 100644 --- a/src/resources/beta/threads/threads.ts +++ b/src/resources/beta/threads/threads.ts @@ -8,7 +8,6 @@ import * as Core from '../../../core'; import * as ThreadsAPI from './threads'; import * as Shared from '../../shared'; import * as AssistantsAPI from '../assistants'; -import * as ChatAPI from '../../chat/chat'; import * as MessagesAPI from './messages'; import { Annotation, @@ -45,7 +44,6 @@ import { TextDelta, TextDeltaBlock, } from './messages'; -import * as VectorStoresAPI from '../vector-stores/vector-stores'; import * as RunsAPI from './runs/runs'; import { RequiredActionFunctionToolCall, @@ -441,9 +439,9 @@ export namespace ThreadCreateParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. Only applicable if `file_ids` is non-empty. + * strategy. */ - chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; + chunking_strategy?: VectorStore.Auto | VectorStore.Static; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -462,6 +460,45 @@ export namespace ThreadCreateParams { */ metadata?: Shared.Metadata | null; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } @@ -573,7 +610,7 @@ export interface ThreadCreateAndRunParamsBase { * model associated with the assistant. If not, the model associated with the * assistant will be used. */ - model?: (string & {}) | ChatAPI.ChatModel | null; + model?: (string & {}) | Shared.ChatModel | null; /** * Whether to enable @@ -800,9 +837,9 @@ export namespace ThreadCreateAndRunParams { export interface VectorStore { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` - * strategy. Only applicable if `file_ids` is non-empty. + * strategy. */ - chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; + chunking_strategy?: VectorStore.Auto | VectorStore.Static; /** * A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to @@ -821,6 +858,45 @@ export namespace ThreadCreateAndRunParams { */ metadata?: Shared.Metadata | null; } + + export namespace VectorStore { + /** + * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of + * `800` and `chunk_overlap_tokens` of `400`. + */ + export interface Auto { + /** + * Always `auto`. + */ + type: 'auto'; + } + + export interface Static { + static: Static.Static; + + /** + * Always `static`. + */ + type: 'static'; + } + + export namespace Static { + export interface Static { + /** + * The number of tokens that overlap between chunks. The default value is `400`. + * + * Note that the overlap must not exceed half of `max_chunk_size_tokens`. + */ + chunk_overlap_tokens: number; + + /** + * The maximum number of tokens in each chunk. The default value is `800`. The + * minimum value is `100` and the maximum value is `4096`. + */ + max_chunk_size_tokens: number; + } + } + } } } } diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts index 627b4fc23..9dbc636d8 100644 --- a/src/resources/chat/chat.ts +++ b/src/resources/chat/chat.ts @@ -1,6 +1,7 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from '../../resource'; +import * as Shared from '../shared'; import * as CompletionsAPI from './completions/completions'; import { ChatCompletion, @@ -52,48 +53,7 @@ export class Chat extends APIResource { completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this._client); } -export type ChatModel = - | 'o3-mini' - | 'o3-mini-2025-01-31' - | 'o1' - | 'o1-2024-12-17' - | 'o1-preview' - | 'o1-preview-2024-09-12' - | 'o1-mini' - | 'o1-mini-2024-09-12' - | 'gpt-4.5-preview' - | 'gpt-4.5-preview-2025-02-27' - | 'gpt-4o' - | 'gpt-4o-2024-11-20' - | 'gpt-4o-2024-08-06' - | 'gpt-4o-2024-05-13' - | 'gpt-4o-audio-preview' - | 'gpt-4o-audio-preview-2024-10-01' - | 'gpt-4o-audio-preview-2024-12-17' - | 'gpt-4o-mini-audio-preview' - | 'gpt-4o-mini-audio-preview-2024-12-17' - | 'chatgpt-4o-latest' - | 'gpt-4o-mini' - | 'gpt-4o-mini-2024-07-18' - | 'gpt-4-turbo' - | 'gpt-4-turbo-2024-04-09' - | 'gpt-4-0125-preview' - | 'gpt-4-turbo-preview' - | 'gpt-4-1106-preview' - | 'gpt-4-vision-preview' - | 'gpt-4' - | 'gpt-4-0314' - | 'gpt-4-0613' - | 'gpt-4-32k' - | 'gpt-4-32k-0314' - | 'gpt-4-32k-0613' - | 'gpt-3.5-turbo' - | 'gpt-3.5-turbo-16k' - | 'gpt-3.5-turbo-0301' - | 'gpt-3.5-turbo-0613' - | 'gpt-3.5-turbo-1106' - | 'gpt-3.5-turbo-0125' - | 'gpt-3.5-turbo-16k-0613'; +export type ChatModel = Shared.ChatModel; Chat.Completions = Completions; Chat.ChatCompletionsPage = ChatCompletionsPage; @@ -123,7 +83,6 @@ export declare namespace Chat { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, @@ -134,6 +93,7 @@ export declare namespace Chat { type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, ChatCompletionsPage as ChatCompletionsPage, type ChatCompletionCreateParams as ChatCompletionCreateParams, type CompletionCreateParams as CompletionCreateParams, diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts index 3af4a3a1d..7b1c353e2 100644 --- a/src/resources/chat/completions/completions.ts +++ b/src/resources/chat/completions/completions.ts @@ -7,7 +7,6 @@ import * as Core from '../../../core'; import * as CompletionsCompletionsAPI from './completions'; import * as CompletionsAPI from '../../completions'; import * as Shared from '../../shared'; -import * as ChatAPI from '../chat'; import * as MessagesAPI from './messages'; import { MessageListParams, Messages } from './messages'; import { CursorPage, type CursorPageParams } from '../../../pagination'; @@ -17,6 +16,13 @@ export class Completions extends APIResource { messages: MessagesAPI.Messages = new MessagesAPI.Messages(this._client); /** + * **Starting a new project?** We recommend trying + * [Responses](https://platform.openai.com/docs/api-reference/responses) to take + * advantage of the latest OpenAI platform features. Compare + * [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses). + * + * --- + * * Creates a model response for the given chat conversation. Learn more in the * [text generation](https://platform.openai.com/docs/guides/text-generation), * [vision](https://platform.openai.com/docs/guides/vision), and @@ -50,7 +56,7 @@ export class Completions extends APIResource { } /** - * Get a stored chat completion. Only chat completions that have been created with + * Get a stored chat completion. Only Chat Completions that have been created with * the `store` parameter set to `true` will be returned. */ retrieve(completionId: string, options?: Core.RequestOptions): Core.APIPromise { @@ -58,7 +64,7 @@ export class Completions extends APIResource { } /** - * Modify a stored chat completion. Only chat completions that have been created + * Modify a stored chat completion. Only Chat Completions that have been created * with the `store` parameter set to `true` can be modified. Currently, the only * supported modification is to update the `metadata` field. */ @@ -71,7 +77,7 @@ export class Completions extends APIResource { } /** - * List stored chat completions. Only chat completions that have been stored with + * List stored Chat Completions. Only Chat Completions that have been stored with * the `store` parameter set to `true` will be returned. */ list( @@ -90,7 +96,7 @@ export class Completions extends APIResource { } /** - * Delete a stored chat completion. Only chat completions that have been created + * Delete a stored chat completion. Only Chat Completions that have been created * with the `store` parameter set to `true` can be deleted. */ del(completionId: string, options?: Core.RequestOptions): Core.APIPromise { @@ -316,16 +322,16 @@ export interface ChatCompletionAudioParam { format: 'wav' | 'mp3' | 'flac' | 'opus' | 'pcm16'; /** - * The voice the model uses to respond. Supported voices are `ash`, `ballad`, - * `coral`, `sage`, and `verse` (also supported but not recommended are `alloy`, - * `echo`, and `shimmer`; these voices are less expressive). + * The voice the model uses to respond. Supported voices are `alloy`, `ash`, + * `ballad`, `coral`, `echo`, `sage`, and `shimmer`. */ voice: 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse'; } /** - * Represents a streamed chunk of a chat completion response returned by model, + * Represents a streamed chunk of a chat completion response returned by the model, * based on the provided input. + * [Learn more](https://platform.openai.com/docs/guides/streaming-responses). */ export interface ChatCompletionChunk { /** @@ -512,7 +518,43 @@ export namespace ChatCompletionChunk { export type ChatCompletionContentPart = | ChatCompletionContentPartText | ChatCompletionContentPartImage - | ChatCompletionContentPartInputAudio; + | ChatCompletionContentPartInputAudio + | ChatCompletionContentPart.File; + +export namespace ChatCompletionContentPart { + /** + * Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text + * generation. + */ + export interface File { + file: File.File; + + /** + * The type of the content part. Always `file`. + */ + type: 'file'; + } + + export namespace File { + export interface File { + /** + * The base64 encoded file data, used when passing the file to the model as a + * string. + */ + file_data?: string; + + /** + * The ID of an uploaded file to use as input. + */ + file_id?: string; + + /** + * The name of the file, used when passing the file to the model as a string. + */ + file_name?: string; + } + } +} /** * Learn about [image inputs](https://platform.openai.com/docs/guides/vision). @@ -685,6 +727,12 @@ export interface ChatCompletionMessage { */ role: 'assistant'; + /** + * Annotations for the message, when applicable, as when using the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + */ + annotations?: Array; + /** * If the audio output modality is requested, this object contains data about the * audio response from the model. @@ -705,6 +753,48 @@ export interface ChatCompletionMessage { } export namespace ChatCompletionMessage { + /** + * A URL citation when using web search. + */ + export interface Annotation { + /** + * The type of the URL citation. Always `url_citation`. + */ + type: 'url_citation'; + + /** + * A URL citation when using web search. + */ + url_citation: Annotation.URLCitation; + } + + export namespace Annotation { + /** + * A URL citation when using web search. + */ + export interface URLCitation { + /** + * The index of the last character of the URL citation in the message. + */ + end_index: number; + + /** + * The index of the first character of the URL citation in the message. + */ + start_index: number; + + /** + * The title of the web resource. + */ + title: string; + + /** + * The URL of the web resource. + */ + url: string; + } + } + /** * @deprecated Deprecated and replaced by `tool_calls`. The name and arguments of a * function that should be called, as generated by the model. @@ -818,16 +908,6 @@ export interface ChatCompletionPredictionContent { type: 'content'; } -/** - * **o1 and o3-mini models only** - * - * Constrains effort on reasoning for - * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - * result in faster responses and fewer tokens used on reasoning in a response. - */ -export type ChatCompletionReasoningEffort = 'low' | 'medium' | 'high' | null; - /** * The role of the author of a message */ @@ -998,6 +1078,8 @@ export interface ChatCompletionUserMessageParam { */ export type CreateChatCompletionRequestMessage = ChatCompletionMessageParam; +export type ChatCompletionReasoningEffort = Shared.ReasoningEffort | null; + export type ChatCompletionCreateParams = | ChatCompletionCreateParamsNonStreaming | ChatCompletionCreateParamsStreaming; @@ -1014,11 +1096,13 @@ export interface ChatCompletionCreateParamsBase { messages: Array; /** - * ID of the model to use. See the - * [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) - * table for details on which models work with the Chat API. + * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * wide range of models with different capabilities, performance characteristics, + * and price points. Refer to the + * [model guide](https://platform.openai.com/docs/models) to browse and compare + * available models. */ - model: (string & {}) | ChatAPI.ChatModel; + model: (string & {}) | Shared.ChatModel; /** * Parameters for audio output. Required when audio output is requested with @@ -1107,8 +1191,8 @@ export interface ChatCompletionCreateParamsBase { metadata?: Shared.Metadata | null; /** - * Output types that you would like the model to generate for this request. Most - * models are capable of generating text, which is the default: + * Output types that you would like the model to generate. Most models are capable + * of generating text, which is the default: * * `["text"]` * @@ -1118,7 +1202,7 @@ export interface ChatCompletionCreateParamsBase { * * `["text", "audio"]` */ - modalities?: Array | null; + modalities?: Array<'text' | 'audio'> | null; /** * How many chat completion choices to generate for each input message. Note that @@ -1148,14 +1232,14 @@ export interface ChatCompletionCreateParamsBase { presence_penalty?: number | null; /** - * **o1 and o3-mini models only** + * **o-series models only** * * Constrains effort on reasoning for * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can * result in faster responses and fewer tokens used on reasoning in a response. */ - reasoning_effort?: ChatCompletionReasoningEffort | null; + reasoning_effort?: Shared.ReasoningEffort | null; /** * An object specifying the format that the model must output. @@ -1165,21 +1249,14 @@ export interface ChatCompletionCreateParamsBase { * in the * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). * - * Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the - * message the model generates is valid JSON. - * - * **Important:** when using JSON mode, you **must** also instruct the model to - * produce JSON yourself via a system or user message. Without this, the model may - * generate an unending stream of whitespace until the generation reaches the token - * limit, resulting in a long-running and seemingly "stuck" request. Also note that - * the message content may be partially cut off if `finish_reason="length"`, which - * indicates the generation exceeded `max_tokens` or the conversation exceeded the - * max context length. + * Setting to `{ "type": "json_object" }` enables the older JSON mode, which + * ensures the message the model generates is valid JSON. Using `json_schema` is + * preferred for models that support it. */ response_format?: | Shared.ResponseFormatText - | Shared.ResponseFormatJSONObject - | Shared.ResponseFormatJSONSchema; + | Shared.ResponseFormatJSONSchema + | Shared.ResponseFormatJSONObject; /** * This feature is in Beta. If specified, our system will make a best effort to @@ -1198,15 +1275,19 @@ export interface ChatCompletionCreateParamsBase { * utilize scale tier credits until they are exhausted. * - If set to 'auto', and the Project is not Scale tier enabled, the request will * be processed using the default service tier with a lower uptime SLA and no - * latency guarantee. + * latency guarentee. * - If set to 'default', the request will be processed using the default service - * tier with a lower uptime SLA and no latency guarantee. + * tier with a lower uptime SLA and no latency guarentee. * - When not set, the default behavior is 'auto'. + * + * When this parameter is set, the response body will include the `service_tier` + * utilized. */ service_tier?: 'auto' | 'default' | null; /** - * Up to 4 sequences where the API will stop generating further tokens. + * Up to 4 sequences where the API will stop generating further tokens. The + * returned text will not contain the stop sequence. */ stop?: string | null | Array; @@ -1218,12 +1299,14 @@ export interface ChatCompletionCreateParamsBase { store?: boolean | null; /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - * sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` - * message. - * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + * for more information, along with the + * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + * guide for more information on how to handle the streaming events. */ stream?: boolean | null; @@ -1282,6 +1365,13 @@ export interface ChatCompletionCreateParamsBase { * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). */ user?: string; + + /** + * This tool searches the web for relevant results to use in a response. Learn more + * about the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + */ + web_search_options?: ChatCompletionCreateParams.WebSearchOptions; } export namespace ChatCompletionCreateParams { @@ -1313,6 +1403,70 @@ export namespace ChatCompletionCreateParams { parameters?: Shared.FunctionParameters; } + /** + * This tool searches the web for relevant results to use in a response. Learn more + * about the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). + */ + export interface WebSearchOptions { + /** + * High level guidance for the amount of context window space to use for the + * search. One of `low`, `medium`, or `high`. `medium` is the default. + */ + search_context_size?: 'low' | 'medium' | 'high'; + + /** + * Approximate location parameters for the search. + */ + user_location?: WebSearchOptions.UserLocation | null; + } + + export namespace WebSearchOptions { + /** + * Approximate location parameters for the search. + */ + export interface UserLocation { + /** + * Approximate location parameters for the search. + */ + approximate: UserLocation.Approximate; + + /** + * The type of location approximation. Always `approximate`. + */ + type: 'approximate'; + } + + export namespace UserLocation { + /** + * Approximate location parameters for the search. + */ + export interface Approximate { + /** + * Free text input for the city of the user, e.g. `San Francisco`. + */ + city?: string; + + /** + * The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + * the user, e.g. `US`. + */ + country?: string; + + /** + * Free text input for the region of the user, e.g. `California`. + */ + region?: string; + + /** + * The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + * user, e.g. `America/Los_Angeles`. + */ + timezone?: string; + } + } + } + export type ChatCompletionCreateParamsNonStreaming = CompletionsCompletionsAPI.ChatCompletionCreateParamsNonStreaming; export type ChatCompletionCreateParamsStreaming = @@ -1326,12 +1480,14 @@ export type CompletionCreateParams = ChatCompletionCreateParams; export interface ChatCompletionCreateParamsNonStreaming extends ChatCompletionCreateParamsBase { /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - * sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` - * message. - * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + * for more information, along with the + * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + * guide for more information on how to handle the streaming events. */ stream?: false | null; } @@ -1343,12 +1499,14 @@ export type CompletionCreateParamsNonStreaming = ChatCompletionCreateParamsNonSt export interface ChatCompletionCreateParamsStreaming extends ChatCompletionCreateParamsBase { /** - * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be - * sent as data-only - * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) - * as they become available, with the stream terminated by a `data: [DONE]` - * message. - * [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming) + * for more information, along with the + * [streaming responses](https://platform.openai.com/docs/guides/streaming-responses) + * guide for more information on how to handle the streaming events. */ stream: true; } @@ -1377,19 +1535,19 @@ export type CompletionUpdateParams = ChatCompletionUpdateParams; export interface ChatCompletionListParams extends CursorPageParams { /** - * A list of metadata keys to filter the chat completions by. Example: + * A list of metadata keys to filter the Chat Completions by. Example: * * `metadata[key1]=value1&metadata[key2]=value2` */ metadata?: Shared.Metadata | null; /** - * The model used to generate the chat completions. + * The model used to generate the Chat Completions. */ model?: string; /** - * Sort order for chat completions by timestamp. Use `asc` for ascending order or + * Sort order for Chat Completions by timestamp. Use `asc` for ascending order or * `desc` for descending order. Defaults to `asc`. */ order?: 'asc' | 'desc'; @@ -1425,7 +1583,6 @@ export declare namespace Completions { type ChatCompletionModality as ChatCompletionModality, type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent as ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, type ChatCompletionRole as ChatCompletionRole, type ChatCompletionStoreMessage as ChatCompletionStoreMessage, type ChatCompletionStreamOptions as ChatCompletionStreamOptions, @@ -1436,6 +1593,7 @@ export declare namespace Completions { type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam, type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam, type CreateChatCompletionRequestMessage as CreateChatCompletionRequestMessage, + type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort, ChatCompletionsPage as ChatCompletionsPage, type ChatCompletionCreateParams as ChatCompletionCreateParams, type CompletionCreateParams as CompletionCreateParams, diff --git a/src/resources/chat/completions/index.ts b/src/resources/chat/completions/index.ts index 3691f41d8..994d6f880 100644 --- a/src/resources/chat/completions/index.ts +++ b/src/resources/chat/completions/index.ts @@ -24,7 +24,6 @@ export { type ChatCompletionModality, type ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort, type ChatCompletionRole, type ChatCompletionStoreMessage, type ChatCompletionStreamOptions, diff --git a/src/resources/chat/completions/messages.ts b/src/resources/chat/completions/messages.ts index fc1cc5d94..519a33aff 100644 --- a/src/resources/chat/completions/messages.ts +++ b/src/resources/chat/completions/messages.ts @@ -9,7 +9,7 @@ import { type CursorPageParams } from '../../../pagination'; export class Messages extends APIResource { /** - * Get the messages in a stored chat completion. Only chat completions that have + * Get the messages in a stored chat completion. Only Chat Completions that have * been created with the `store` parameter set to `true` will be returned. */ list( diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts index a9b5b46fb..62ca758e0 100644 --- a/src/resources/chat/index.ts +++ b/src/resources/chat/index.ts @@ -1,6 +1,6 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -export { Chat, type ChatModel } from './chat'; +export { Chat } from './chat'; export { ChatCompletionStoreMessagesPage, ChatCompletionsPage, @@ -25,7 +25,6 @@ export { type ChatCompletionModality, type ChatCompletionNamedToolChoice, type ChatCompletionPredictionContent, - type ChatCompletionReasoningEffort, type ChatCompletionRole, type ChatCompletionStoreMessage, type ChatCompletionStreamOptions, diff --git a/src/resources/files.ts b/src/resources/files.ts index f5f23dcad..723ac4cde 100644 --- a/src/resources/files.ts +++ b/src/resources/files.ts @@ -186,16 +186,12 @@ export interface FileObject { } /** - * The intended purpose of the uploaded file. - * - * Use "assistants" for - * [Assistants](https://platform.openai.com/docs/api-reference/assistants) and - * [Message](https://platform.openai.com/docs/api-reference/messages) files, - * "vision" for Assistants image file inputs, "batch" for - * [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for - * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). + * The intended purpose of the uploaded file. One of: - `assistants`: Used in the + * Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + * fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + * Flexible file type for any purpose - `evals`: Used for eval data sets */ -export type FilePurpose = 'assistants' | 'batch' | 'fine-tune' | 'vision'; +export type FilePurpose = 'assistants' | 'batch' | 'fine-tune' | 'vision' | 'user_data' | 'evals'; export interface FileCreateParams { /** @@ -204,14 +200,10 @@ export interface FileCreateParams { file: Core.Uploadable; /** - * The intended purpose of the uploaded file. - * - * Use "assistants" for - * [Assistants](https://platform.openai.com/docs/api-reference/assistants) and - * [Message](https://platform.openai.com/docs/api-reference/messages) files, - * "vision" for Assistants image file inputs, "batch" for - * [Batch API](https://platform.openai.com/docs/guides/batch), and "fine-tune" for - * [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning). + * The intended purpose of the uploaded file. One of: - `assistants`: Used in the + * Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for + * fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: + * Flexible file type for any purpose - `evals`: Used for eval data sets */ purpose: FilePurpose; } diff --git a/src/resources/index.ts b/src/resources/index.ts index ad0302357..04c2c887b 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -60,4 +60,24 @@ export { type ModerationCreateResponse, type ModerationCreateParams, } from './moderations'; +export { Responses } from './responses/responses'; export { Uploads, type Upload, type UploadCreateParams, type UploadCompleteParams } from './uploads/uploads'; +export { + VectorStoresPage, + VectorStoreSearchResponsesPage, + VectorStores, + type AutoFileChunkingStrategyParam, + type FileChunkingStrategy, + type FileChunkingStrategyParam, + type OtherFileChunkingStrategyObject, + type StaticFileChunkingStrategy, + type StaticFileChunkingStrategyObject, + type StaticFileChunkingStrategyObjectParam, + type VectorStore, + type VectorStoreDeleted, + type VectorStoreSearchResponse, + type VectorStoreCreateParams, + type VectorStoreUpdateParams, + type VectorStoreListParams, + type VectorStoreSearchParams, +} from './vector-stores/vector-stores'; diff --git a/src/resources/responses/index.ts b/src/resources/responses/index.ts new file mode 100644 index 000000000..84f761a93 --- /dev/null +++ b/src/resources/responses/index.ts @@ -0,0 +1,9 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +export { + ResponseItemListDataPage, + InputItems, + type ResponseItemList, + type InputItemListParams, +} from './input-items'; +export { Responses } from './responses'; diff --git a/src/resources/responses/input-items.ts b/src/resources/responses/input-items.ts new file mode 100644 index 000000000..9704be89a --- /dev/null +++ b/src/resources/responses/input-items.ts @@ -0,0 +1,276 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as ResponsesAPI from './responses'; +import { CursorPage, type CursorPageParams } from '../../pagination'; + +export class InputItems extends APIResource { + /** + * Returns a list of input items for a given response. + */ + list( + responseId: string, + query?: InputItemListParams, + options?: Core.RequestOptions, + ): Core.PagePromise< + ResponseItemListDataPage, + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput + >; + list( + responseId: string, + options?: Core.RequestOptions, + ): Core.PagePromise< + ResponseItemListDataPage, + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput + >; + list( + responseId: string, + query: InputItemListParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.PagePromise< + ResponseItemListDataPage, + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput + > { + if (isRequestOptions(query)) { + return this.list(responseId, {}, query); + } + return this._client.getAPIList(`/responses/${responseId}/input_items`, ResponseItemListDataPage, { + query, + ...options, + }); + } +} + +export class ResponseItemListDataPage extends CursorPage< + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput +> {} + +/** + * A list of Response items. + */ +export interface ResponseItemList { + /** + * A list of items used to generate this response. + */ + data: Array< + | ResponseItemList.Message + | ResponsesAPI.ResponseOutputMessage + | ResponsesAPI.ResponseFileSearchToolCall + | ResponsesAPI.ResponseComputerToolCall + | ResponseItemList.ComputerCallOutput + | ResponsesAPI.ResponseFunctionWebSearch + | ResponsesAPI.ResponseFunctionToolCall + | ResponseItemList.FunctionCallOutput + >; + + /** + * The ID of the first item in the list. + */ + first_id: string; + + /** + * Whether there are more items available. + */ + has_more: boolean; + + /** + * The ID of the last item in the list. + */ + last_id: string; + + /** + * The type of object returned, must be `list`. + */ + object: 'list'; +} + +export namespace ResponseItemList { + export interface Message { + /** + * The unique ID of the message input. + */ + id: string; + + /** + * A list of one or many input items to the model, containing different content + * types. + */ + content: ResponsesAPI.ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The status of item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the message input. Always set to `message`. + */ + type?: 'message'; + } + + export interface ComputerCallOutput { + /** + * The unique ID of the computer call tool output. + */ + id: string; + + /** + * The ID of the computer tool call that produced the output. + */ + call_id: string; + + /** + * A computer screenshot image used with the computer use tool. + */ + output: ComputerCallOutput.Output; + + /** + * The type of the computer tool call output. Always `computer_call_output`. + */ + type: 'computer_call_output'; + + /** + * The safety checks reported by the API that have been acknowledged by the + * developer. + */ + acknowledged_safety_checks?: Array; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + export namespace ComputerCallOutput { + /** + * A computer screenshot image used with the computer use tool. + */ + export interface Output { + /** + * Specifies the event type. For a computer screenshot, this property is always set + * to `computer_screenshot`. + */ + type: 'computer_screenshot'; + + /** + * The identifier of an uploaded file that contains the screenshot. + */ + file_id?: string; + + /** + * The URL of the screenshot image. + */ + image_url?: string; + } + + /** + * A pending safety check for the computer call. + */ + export interface AcknowledgedSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code: string; + + /** + * Details about the pending safety check. + */ + message: string; + } + } + + export interface FunctionCallOutput { + /** + * The unique ID of the function call tool output. + */ + id: string; + + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * A JSON string of the output of the function tool call. + */ + output: string; + + /** + * The type of the function tool call output. Always `function_call_output`. + */ + type: 'function_call_output'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } +} + +export interface InputItemListParams extends CursorPageParams { + /** + * An item ID to list items before, used in pagination. + */ + before?: string; + + /** + * The order to return the input items in. Default is `asc`. + * + * - `asc`: Return the input items in ascending order. + * - `desc`: Return the input items in descending order. + */ + order?: 'asc' | 'desc'; +} + +InputItems.ResponseItemListDataPage = ResponseItemListDataPage; + +export declare namespace InputItems { + export { + type ResponseItemList as ResponseItemList, + ResponseItemListDataPage as ResponseItemListDataPage, + type InputItemListParams as InputItemListParams, + }; +} diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts new file mode 100644 index 000000000..2ad146873 --- /dev/null +++ b/src/resources/responses/responses.ts @@ -0,0 +1,2761 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import { + type ExtractParsedContentFromParams, + parseResponse, + type ResponseCreateParamsWithTools, + addOutputText, +} from '../../lib/ResponsesParser'; +import * as Core from '../../core'; +import { APIPromise, isRequestOptions } from '../../core'; +import { APIResource } from '../../resource'; +import { Stream } from '../../streaming'; +import * as Shared from '../shared'; +import * as InputItemsAPI from './input-items'; +import { InputItemListParams, InputItems, ResponseItemList, ResponseItemListDataPage } from './input-items'; +import * as ResponsesAPI from './responses'; +import { ResponseStream, ResponseStreamParams } from '../../lib/responses/ResponseStream'; + +export interface ParsedResponseOutputText extends ResponseOutputText { + parsed: ParsedT | null; +} + +export type ParsedContent = ParsedResponseOutputText | ResponseOutputRefusal; + +export interface ParsedResponseOutputMessage extends ResponseOutputMessage { + content: ParsedContent[]; +} + +export interface ParsedResponseFunctionToolCall extends ResponseFunctionToolCall { + parsed_arguments: any; +} + +export type ParsedResponseOutputItem = + | ParsedResponseOutputMessage + | ParsedResponseFunctionToolCall + | ResponseFileSearchToolCall + | ResponseFunctionWebSearch + | ResponseComputerToolCall + | ResponseOutputItem.Reasoning; + +export interface ParsedResponse extends Response { + output: Array>; + + output_parsed: ParsedT | null; +} + +export type ResponseParseParams = ResponseCreateParamsNonStreaming; +export class Responses extends APIResource { + inputItems: InputItemsAPI.InputItems = new InputItemsAPI.InputItems(this._client); + + /** + * Creates a model response. Provide + * [text](https://platform.openai.com/docs/guides/text) or + * [image](https://platform.openai.com/docs/guides/images) inputs to generate + * [text](https://platform.openai.com/docs/guides/text) or + * [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have + * the model call your own + * [custom code](https://platform.openai.com/docs/guides/function-calling) or use + * built-in [tools](https://platform.openai.com/docs/guides/tools) like + * [web search](https://platform.openai.com/docs/guides/tools-web-search) or + * [file search](https://platform.openai.com/docs/guides/tools-file-search) to use + * your own data as input for the model's response. + */ + create(body: ResponseCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise; + create( + body: ResponseCreateParamsStreaming, + options?: Core.RequestOptions, + ): APIPromise>; + create( + body: ResponseCreateParamsBase, + options?: Core.RequestOptions, + ): APIPromise | Response>; + create( + body: ResponseCreateParams, + options?: Core.RequestOptions, + ): APIPromise | APIPromise> { + return ( + this._client.post('/responses', { body, ...options, stream: body.stream ?? false }) as + | APIPromise + | APIPromise> + )._thenUnwrap((rsp) => { + if ('type' in rsp && rsp.type === 'response') { + addOutputText(rsp as Response); + } + + return rsp; + }) as APIPromise | APIPromise>; + } + + /** + * Retrieves a model response with the given ID. + */ + retrieve( + responseId: string, + query?: ResponseRetrieveParams, + options?: Core.RequestOptions, + ): Core.APIPromise; + retrieve(responseId: string, options?: Core.RequestOptions): Core.APIPromise; + retrieve( + responseId: string, + query: ResponseRetrieveParams | Core.RequestOptions = {}, + options?: Core.RequestOptions, + ): Core.APIPromise { + if (isRequestOptions(query)) { + return this.retrieve(responseId, {}, query); + } + return this._client.get(`/responses/${responseId}`, { query, ...options }); + } + + /** + * Deletes a model response with the given ID. + */ + del(responseId: string, options?: Core.RequestOptions): Core.APIPromise { + return this._client.delete(`/responses/${responseId}`, { + ...options, + headers: { Accept: '*/*', ...options?.headers }, + }); + } + + parse>( + body: Params, + options?: Core.RequestOptions, + ): Core.APIPromise> { + return this._client.responses + .create(body, options) + ._thenUnwrap((response) => parseResponse(response as Response, body)); + } + + /** + * Creates a chat completion stream + */ + stream>( + body: Params, + options?: Core.RequestOptions, + ): ResponseStream { + return ResponseStream.createResponse(this._client, body, options); + } +} + +/** + * A tool that controls a virtual computer. Learn more about the + * [computer tool](https://platform.openai.com/docs/guides/tools-computer-use). + */ +export interface ComputerTool { + /** + * The height of the computer display. + */ + display_height: number; + + /** + * The width of the computer display. + */ + display_width: number; + + /** + * The type of computer environment to control. + */ + environment: 'mac' | 'windows' | 'ubuntu' | 'browser'; + + /** + * The type of the computer use tool. Always `computer_use_preview`. + */ + type: 'computer-preview'; +} + +/** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ +export interface EasyInputMessage { + /** + * Text, image, or audio input to the model, used to generate a response. Can also + * contain previous assistant responses. + */ + content: string | ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `assistant`, `system`, or + * `developer`. + */ + role: 'user' | 'assistant' | 'system' | 'developer'; + + /** + * The type of the message input. Always `message`. + */ + type?: 'message'; +} + +/** + * A tool that searches for relevant content from uploaded files. Learn more about + * the + * [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + */ +export interface FileSearchTool { + /** + * The type of the file search tool. Always `file_search`. + */ + type: 'file_search'; + + /** + * The IDs of the vector stores to search. + */ + vector_store_ids: Array; + + /** + * A filter to apply based on file attributes. + */ + filters?: Shared.ComparisonFilter | Shared.CompoundFilter; + + /** + * The maximum number of results to return. This number should be between 1 and 50 + * inclusive. + */ + max_num_results?: number; + + /** + * Ranking options for search. + */ + ranking_options?: FileSearchTool.RankingOptions; +} + +export namespace FileSearchTool { + /** + * Ranking options for search. + */ + export interface RankingOptions { + /** + * The ranker to use for the file search. + */ + ranker?: 'auto' | 'default-2024-11-15'; + + /** + * The score threshold for the file search, a number between 0 and 1. Numbers + * closer to 1 will attempt to return only the most relevant results, but may + * return fewer results. + */ + score_threshold?: number; + } +} + +/** + * Defines a function in your own code the model can choose to call. Learn more + * about + * [function calling](https://platform.openai.com/docs/guides/function-calling). + */ +export interface FunctionTool { + /** + * The name of the function to call. + */ + name: string; + + /** + * A JSON schema object describing the parameters of the function. + */ + parameters: Record; + + /** + * Whether to enforce strict parameter validation. Default `true`. + */ + strict: boolean; + + /** + * The type of the function tool. Always `function`. + */ + type: 'function'; + + /** + * A description of the function. Used by the model to determine whether or not to + * call the function. + */ + description?: string | null; +} + +export interface Response { + /** + * Unique identifier for this Response. + */ + id: string; + + /** + * Unix timestamp (in seconds) of when this Response was created. + */ + created_at: number; + + output_text: string; + + /** + * An error object returned when the model fails to generate a Response. + */ + error: ResponseError | null; + + /** + * Details about why the response is incomplete. + */ + incomplete_details: Response.IncompleteDetails | null; + + /** + * Inserts a system (or developer) message as the first item in the model's + * context. + * + * When using along with `previous_response_id`, the instructions from a previous + * response will be not be carried over to the next response. This makes it simple + * to swap out system (or developer) messages in new responses. + */ + instructions: string | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata: Shared.Metadata | null; + + /** + * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * wide range of models with different capabilities, performance characteristics, + * and price points. Refer to the + * [model guide](https://platform.openai.com/docs/models) to browse and compare + * available models. + */ + model: (string & {}) | Shared.ChatModel; + + /** + * The object type of this resource - always set to `response`. + */ + object: 'response'; + + /** + * An array of content items generated by the model. + * + * - The length and order of items in the `output` array is dependent on the + * model's response. + * - Rather than accessing the first item in the `output` array and assuming it's + * an `assistant` message with the content generated by the model, you might + * consider using the `output_text` property where supported in SDKs. + */ + output: Array; + + /** + * Whether to allow the model to run tool calls in parallel. + */ + parallel_tool_calls: boolean; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. We generally recommend altering this or `top_p` but + * not both. + */ + temperature: number | null; + + /** + * How the model should select which tool (or tools) to use when generating a + * response. See the `tools` parameter to see how to specify which tools the model + * can call. + */ + tool_choice: ToolChoiceOptions | ToolChoiceTypes | ToolChoiceFunction; + + /** + * An array of tools the model may call while generating a response. You can + * specify which tool to use by setting the `tool_choice` parameter. + * + * The two categories of tools you can provide the model are: + * + * - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + * capabilities, like + * [web search](https://platform.openai.com/docs/guides/tools-web-search) or + * [file search](https://platform.openai.com/docs/guides/tools-file-search). + * Learn more about + * [built-in tools](https://platform.openai.com/docs/guides/tools). + * - **Function calls (custom tools)**: Functions that are defined by you, enabling + * the model to call your own code. Learn more about + * [function calling](https://platform.openai.com/docs/guides/function-calling). + */ + tools: Array; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + top_p: number | null; + + /** + * An upper bound for the number of tokens that can be generated for a response, + * including visible output tokens and + * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + */ + max_output_tokens?: number | null; + + /** + * The unique ID of the previous response to the model. Use this to create + * multi-turn conversations. Learn more about + * [conversation state](https://platform.openai.com/docs/guides/conversation-state). + */ + previous_response_id?: string | null; + + /** + * **o-series models only** + * + * Configuration options for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). + */ + reasoning?: Shared.Reasoning | null; + + /** + * The status of the response generation. One of `completed`, `failed`, + * `in_progress`, or `incomplete`. + */ + status?: ResponseStatus; + + /** + * Configuration options for a text response from the model. Can be plain text or + * structured JSON data. Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + */ + text?: ResponseTextConfig; + + /** + * The truncation strategy to use for the model response. + * + * - `auto`: If the context of this response and previous ones exceeds the model's + * context window size, the model will truncate the response to fit the context + * window by dropping input items in the middle of the conversation. + * - `disabled` (default): If a model response will exceed the context window size + * for a model, the request will fail with a 400 error. + */ + truncation?: 'auto' | 'disabled' | null; + + /** + * Represents token usage details including input tokens, output tokens, a + * breakdown of output tokens, and the total tokens used. + */ + usage?: ResponseUsage; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor + * and detect abuse. + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + */ + user?: string; +} + +export namespace Response { + /** + * Details about why the response is incomplete. + */ + export interface IncompleteDetails { + /** + * The reason why the response is incomplete. + */ + reason?: 'max_output_tokens' | 'content_filter'; + } +} + +/** + * Emitted when there is a partial audio response. + */ +export interface ResponseAudioDeltaEvent { + /** + * A chunk of Base64 encoded response audio bytes. + */ + delta: string; + + /** + * The type of the event. Always `response.audio.delta`. + */ + type: 'response.audio.delta'; +} + +/** + * Emitted when the audio response is complete. + */ +export interface ResponseAudioDoneEvent { + /** + * The type of the event. Always `response.audio.done`. + */ + type: 'response.audio.done'; +} + +/** + * Emitted when there is a partial transcript of audio. + */ +export interface ResponseAudioTranscriptDeltaEvent { + /** + * The partial transcript of the audio response. + */ + delta: string; + + /** + * The type of the event. Always `response.audio.transcript.delta`. + */ + type: 'response.audio.transcript.delta'; +} + +/** + * Emitted when the full audio transcript is completed. + */ +export interface ResponseAudioTranscriptDoneEvent { + /** + * The type of the event. Always `response.audio.transcript.done`. + */ + type: 'response.audio.transcript.done'; +} + +/** + * Emitted when a partial code snippet is added by the code interpreter. + */ +export interface ResponseCodeInterpreterCallCodeDeltaEvent { + /** + * The partial code snippet added by the code interpreter. + */ + delta: string; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.code.delta`. + */ + type: 'response.code_interpreter_call.code.delta'; +} + +/** + * Emitted when code snippet output is finalized by the code interpreter. + */ +export interface ResponseCodeInterpreterCallCodeDoneEvent { + /** + * The final code snippet output by the code interpreter. + */ + code: string; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.code.done`. + */ + type: 'response.code_interpreter_call.code.done'; +} + +/** + * Emitted when the code interpreter call is completed. + */ +export interface ResponseCodeInterpreterCallCompletedEvent { + /** + * A tool call to run code. + */ + code_interpreter_call: ResponseCodeInterpreterToolCall; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.completed`. + */ + type: 'response.code_interpreter_call.completed'; +} + +/** + * Emitted when a code interpreter call is in progress. + */ +export interface ResponseCodeInterpreterCallInProgressEvent { + /** + * A tool call to run code. + */ + code_interpreter_call: ResponseCodeInterpreterToolCall; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.in_progress`. + */ + type: 'response.code_interpreter_call.in_progress'; +} + +/** + * Emitted when the code interpreter is actively interpreting the code snippet. + */ +export interface ResponseCodeInterpreterCallInterpretingEvent { + /** + * A tool call to run code. + */ + code_interpreter_call: ResponseCodeInterpreterToolCall; + + /** + * The index of the output item that the code interpreter call is in progress. + */ + output_index: number; + + /** + * The type of the event. Always `response.code_interpreter_call.interpreting`. + */ + type: 'response.code_interpreter_call.interpreting'; +} + +/** + * A tool call to run code. + */ +export interface ResponseCodeInterpreterToolCall { + /** + * The unique ID of the code interpreter tool call. + */ + id: string; + + /** + * The code to run. + */ + code: string; + + /** + * The results of the code interpreter tool call. + */ + results: Array; + + /** + * The status of the code interpreter tool call. + */ + status: 'in_progress' | 'interpreting' | 'completed'; + + /** + * The type of the code interpreter tool call. Always `code_interpreter_call`. + */ + type: 'code_interpreter_call'; +} + +export namespace ResponseCodeInterpreterToolCall { + /** + * The output of a code interpreter tool call that is text. + */ + export interface Logs { + /** + * The logs of the code interpreter tool call. + */ + logs: string; + + /** + * The type of the code interpreter text output. Always `logs`. + */ + type: 'logs'; + } + + /** + * The output of a code interpreter tool call that is a file. + */ + export interface Files { + files: Array; + + /** + * The type of the code interpreter file output. Always `files`. + */ + type: 'files'; + } + + export namespace Files { + export interface File { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The MIME type of the file. + */ + mime_type: string; + } + } +} + +/** + * Emitted when the model response is complete. + */ +export interface ResponseCompletedEvent { + /** + * Properties of the completed response. + */ + response: Response; + + /** + * The type of the event. Always `response.completed`. + */ + type: 'response.completed'; +} + +/** + * A tool call to a computer use tool. See the + * [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) + * for more information. + */ +export interface ResponseComputerToolCall { + /** + * The unique ID of the computer call. + */ + id: string; + + /** + * A click action. + */ + action: + | ResponseComputerToolCall.Click + | ResponseComputerToolCall.DoubleClick + | ResponseComputerToolCall.Drag + | ResponseComputerToolCall.Keypress + | ResponseComputerToolCall.Move + | ResponseComputerToolCall.Screenshot + | ResponseComputerToolCall.Scroll + | ResponseComputerToolCall.Type + | ResponseComputerToolCall.Wait; + + /** + * An identifier used when responding to the tool call with output. + */ + call_id: string; + + /** + * The pending safety checks for the computer call. + */ + pending_safety_checks: Array; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the computer call. Always `computer_call`. + */ + type: 'computer_call'; +} + +export namespace ResponseComputerToolCall { + /** + * A click action. + */ + export interface Click { + /** + * Indicates which mouse button was pressed during the click. One of `left`, + * `right`, `wheel`, `back`, or `forward`. + */ + button: 'left' | 'right' | 'wheel' | 'back' | 'forward'; + + /** + * Specifies the event type. For a click action, this property is always set to + * `click`. + */ + type: 'click'; + + /** + * The x-coordinate where the click occurred. + */ + x: number; + + /** + * The y-coordinate where the click occurred. + */ + y: number; + } + + /** + * A double click action. + */ + export interface DoubleClick { + /** + * Specifies the event type. For a double click action, this property is always set + * to `double_click`. + */ + type: 'double_click'; + + /** + * The x-coordinate where the double click occurred. + */ + x: number; + + /** + * The y-coordinate where the double click occurred. + */ + y: number; + } + + /** + * A drag action. + */ + export interface Drag { + /** + * An array of coordinates representing the path of the drag action. Coordinates + * will appear as an array of objects, eg + * + * ``` + * [ + * { x: 100, y: 200 }, + * { x: 200, y: 300 } + * ] + * ``` + */ + path: Array; + + /** + * Specifies the event type. For a drag action, this property is always set to + * `drag`. + */ + type: 'drag'; + } + + export namespace Drag { + /** + * A series of x/y coordinate pairs in the drag path. + */ + export interface Path { + /** + * The x-coordinate. + */ + x: number; + + /** + * The y-coordinate. + */ + y: number; + } + } + + /** + * A collection of keypresses the model would like to perform. + */ + export interface Keypress { + /** + * The combination of keys the model is requesting to be pressed. This is an array + * of strings, each representing a key. + */ + keys: Array; + + /** + * Specifies the event type. For a keypress action, this property is always set to + * `keypress`. + */ + type: 'keypress'; + } + + /** + * A mouse move action. + */ + export interface Move { + /** + * Specifies the event type. For a move action, this property is always set to + * `move`. + */ + type: 'move'; + + /** + * The x-coordinate to move to. + */ + x: number; + + /** + * The y-coordinate to move to. + */ + y: number; + } + + /** + * A screenshot action. + */ + export interface Screenshot { + /** + * Specifies the event type. For a screenshot action, this property is always set + * to `screenshot`. + */ + type: 'screenshot'; + } + + /** + * A scroll action. + */ + export interface Scroll { + /** + * The horizontal scroll distance. + */ + scroll_x: number; + + /** + * The vertical scroll distance. + */ + scroll_y: number; + + /** + * Specifies the event type. For a scroll action, this property is always set to + * `scroll`. + */ + type: 'scroll'; + + /** + * The x-coordinate where the scroll occurred. + */ + x: number; + + /** + * The y-coordinate where the scroll occurred. + */ + y: number; + } + + /** + * An action to type in text. + */ + export interface Type { + /** + * The text to type. + */ + text: string; + + /** + * Specifies the event type. For a type action, this property is always set to + * `type`. + */ + type: 'type'; + } + + /** + * A wait action. + */ + export interface Wait { + /** + * Specifies the event type. For a wait action, this property is always set to + * `wait`. + */ + type: 'wait'; + } + + /** + * A pending safety check for the computer call. + */ + export interface PendingSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code: string; + + /** + * Details about the pending safety check. + */ + message: string; + } +} + +/** + * Multi-modal input and output contents. + */ +export type ResponseContent = + | ResponseInputText + | ResponseInputImage + | ResponseInputFile + | ResponseOutputText + | ResponseOutputRefusal; + +/** + * Emitted when a new content part is added. + */ +export interface ResponseContentPartAddedEvent { + /** + * The index of the content part that was added. + */ + content_index: number; + + /** + * The ID of the output item that the content part was added to. + */ + item_id: string; + + /** + * The index of the output item that the content part was added to. + */ + output_index: number; + + /** + * The content part that was added. + */ + part: ResponseOutputText | ResponseOutputRefusal; + + /** + * The type of the event. Always `response.content_part.added`. + */ + type: 'response.content_part.added'; +} + +/** + * Emitted when a content part is done. + */ +export interface ResponseContentPartDoneEvent { + /** + * The index of the content part that is done. + */ + content_index: number; + + /** + * The ID of the output item that the content part was added to. + */ + item_id: string; + + /** + * The index of the output item that the content part was added to. + */ + output_index: number; + + /** + * The content part that is done. + */ + part: ResponseOutputText | ResponseOutputRefusal; + + /** + * The type of the event. Always `response.content_part.done`. + */ + type: 'response.content_part.done'; +} + +/** + * An event that is emitted when a response is created. + */ +export interface ResponseCreatedEvent { + /** + * The response that was created. + */ + response: Response; + + /** + * The type of the event. Always `response.created`. + */ + type: 'response.created'; +} + +/** + * An error object returned when the model fails to generate a Response. + */ +export interface ResponseError { + /** + * The error code for the response. + */ + code: + | 'server_error' + | 'rate_limit_exceeded' + | 'invalid_prompt' + | 'vector_store_timeout' + | 'invalid_image' + | 'invalid_image_format' + | 'invalid_base64_image' + | 'invalid_image_url' + | 'image_too_large' + | 'image_too_small' + | 'image_parse_error' + | 'image_content_policy_violation' + | 'invalid_image_mode' + | 'image_file_too_large' + | 'unsupported_image_media_type' + | 'empty_image_file' + | 'failed_to_download_image' + | 'image_file_not_found'; + + /** + * A human-readable description of the error. + */ + message: string; +} + +/** + * Emitted when an error occurs. + */ +export interface ResponseErrorEvent { + /** + * The error code. + */ + code: string | null; + + /** + * The error message. + */ + message: string; + + /** + * The error parameter. + */ + param: string | null; + + /** + * The type of the event. Always `error`. + */ + type: 'error'; +} + +/** + * An event that is emitted when a response fails. + */ +export interface ResponseFailedEvent { + /** + * The response that failed. + */ + response: Response; + + /** + * The type of the event. Always `response.failed`. + */ + type: 'response.failed'; +} + +/** + * Emitted when a file search call is completed (results found). + */ +export interface ResponseFileSearchCallCompletedEvent { + /** + * The ID of the output item that the file search call is initiated. + */ + item_id: string; + + /** + * The index of the output item that the file search call is initiated. + */ + output_index: number; + + /** + * The type of the event. Always `response.file_search_call.completed`. + */ + type: 'response.file_search_call.completed'; +} + +/** + * Emitted when a file search call is initiated. + */ +export interface ResponseFileSearchCallInProgressEvent { + /** + * The ID of the output item that the file search call is initiated. + */ + item_id: string; + + /** + * The index of the output item that the file search call is initiated. + */ + output_index: number; + + /** + * The type of the event. Always `response.file_search_call.in_progress`. + */ + type: 'response.file_search_call.in_progress'; +} + +/** + * Emitted when a file search is currently searching. + */ +export interface ResponseFileSearchCallSearchingEvent { + /** + * The ID of the output item that the file search call is initiated. + */ + item_id: string; + + /** + * The index of the output item that the file search call is searching. + */ + output_index: number; + + /** + * The type of the event. Always `response.file_search_call.searching`. + */ + type: 'response.file_search_call.searching'; +} + +/** + * The results of a file search tool call. See the + * [file search guide](https://platform.openai.com/docs/guides/tools-file-search) + * for more information. + */ +export interface ResponseFileSearchToolCall { + /** + * The unique ID of the file search tool call. + */ + id: string; + + /** + * The queries used to search for files. + */ + queries: Array; + + /** + * The status of the file search tool call. One of `in_progress`, `searching`, + * `incomplete` or `failed`, + */ + status: 'in_progress' | 'searching' | 'completed' | 'incomplete' | 'failed'; + + /** + * The type of the file search tool call. Always `file_search_call`. + */ + type: 'file_search_call'; + + /** + * The results of the file search tool call. + */ + results?: Array | null; +} + +export namespace ResponseFileSearchToolCall { + export interface Result { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + + /** + * The unique ID of the file. + */ + file_id?: string; + + /** + * The name of the file. + */ + filename?: string; + + /** + * The relevance score of the file - a value between 0 and 1. + */ + score?: number; + + /** + * The text that was retrieved from the file. + */ + text?: string; + } +} + +/** + * An object specifying the format that the model must output. + * + * Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + * ensures the model will match your supplied JSON schema. Learn more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * The default format is `{ "type": "text" }` with no additional options. + * + * **Not recommended for gpt-4o and newer models:** + * + * Setting to `{ "type": "json_object" }` enables the older JSON mode, which + * ensures the message the model generates is valid JSON. Using `json_schema` is + * preferred for models that support it. + */ +export type ResponseFormatTextConfig = + | Shared.ResponseFormatText + | ResponseFormatTextJSONSchemaConfig + | Shared.ResponseFormatJSONObject; + +/** + * JSON Schema response format. Used to generate structured JSON responses. Learn + * more about + * [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + */ +export interface ResponseFormatTextJSONSchemaConfig { + /** + * The schema for the response format, described as a JSON Schema object. Learn how + * to build JSON schemas [here](https://json-schema.org/). + */ + schema: Record; + + /** + * The type of response format being defined. Always `json_schema`. + */ + type: 'json_schema'; + + /** + * A description of what the response format is for, used by the model to determine + * how to respond in the format. + */ + description?: string; + + /** + * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores + * and dashes, with a maximum length of 64. + */ + name?: string; + + /** + * Whether to enable strict schema adherence when generating the output. If set to + * true, the model will always follow the exact schema defined in the `schema` + * field. Only a subset of JSON Schema is supported when `strict` is `true`. To + * learn more, read the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + */ + strict?: boolean | null; +} + +/** + * Emitted when there is a partial function-call arguments delta. + */ +export interface ResponseFunctionCallArgumentsDeltaEvent { + /** + * The function-call arguments delta that is added. + */ + delta: string; + + /** + * The ID of the output item that the function-call arguments delta is added to. + */ + item_id: string; + + /** + * The index of the output item that the function-call arguments delta is added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.function_call_arguments.delta`. + */ + type: 'response.function_call_arguments.delta'; +} + +/** + * Emitted when function-call arguments are finalized. + */ +export interface ResponseFunctionCallArgumentsDoneEvent { + /** + * The function-call arguments. + */ + arguments: string; + + /** + * The ID of the item. + */ + item_id: string; + + /** + * The index of the output item. + */ + output_index: number; + + type: 'response.function_call_arguments.done'; +} + +/** + * A tool call to run a function. See the + * [function calling guide](https://platform.openai.com/docs/guides/function-calling) + * for more information. + */ +export interface ResponseFunctionToolCall { + /** + * The unique ID of the function tool call. + */ + id: string; + + /** + * A JSON string of the arguments to pass to the function. + */ + arguments: string; + + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * The name of the function to run. + */ + name: string; + + /** + * The type of the function tool call. Always `function_call`. + */ + type: 'function_call'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; +} + +/** + * The results of a web search tool call. See the + * [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for + * more information. + */ +export interface ResponseFunctionWebSearch { + /** + * The unique ID of the web search tool call. + */ + id: string; + + /** + * The status of the web search tool call. + */ + status: 'in_progress' | 'searching' | 'completed' | 'failed'; + + /** + * The type of the web search tool call. Always `web_search_call`. + */ + type: 'web_search_call'; +} + +/** + * Emitted when the response is in progress. + */ +export interface ResponseInProgressEvent { + /** + * The response that is in progress. + */ + response: Response; + + /** + * The type of the event. Always `response.in_progress`. + */ + type: 'response.in_progress'; +} + +/** + * Specify additional output data to include in the model response. Currently + * supported values are: + * + * - `file_search_call.results`: Include the search results of the file search tool + * call. + * - `message.input_image.image_url`: Include image urls from the input message. + * - `computer_call_output.output.image_url`: Include image urls from the computer + * call output. + */ +export type ResponseIncludable = + | 'file_search_call.results' + | 'message.input_image.image_url' + | 'computer_call_output.output.image_url'; + +/** + * An event that is emitted when a response finishes as incomplete. + */ +export interface ResponseIncompleteEvent { + /** + * The response that was incomplete. + */ + response: Response; + + /** + * The type of the event. Always `response.incomplete`. + */ + type: 'response.incomplete'; +} + +/** + * A list of one or many input items to the model, containing different content + * types. + */ +export type ResponseInput = Array; + +/** + * An audio input to the model. + */ +export interface ResponseInputAudio { + /** + * Base64-encoded audio data. + */ + data: string; + + /** + * The format of the audio data. Currently supported formats are `mp3` and `wav`. + */ + format: 'mp3' | 'wav'; + + /** + * The type of the input item. Always `input_audio`. + */ + type: 'input_audio'; +} + +/** + * A text input to the model. + */ +export type ResponseInputContent = ResponseInputText | ResponseInputImage | ResponseInputFile; + +/** + * A file input to the model. + */ +export interface ResponseInputFile { + /** + * The type of the input item. Always `input_file`. + */ + type: 'input_file'; + + /** + * The content of the file to be sent to the model. + */ + file_data?: string; + + /** + * The ID of the file to be sent to the model. + */ + file_id?: string; + + /** + * The name of the file to be sent to the model. + */ + filename?: string; +} + +/** + * An image input to the model. Learn about + * [image inputs](https://platform.openai.com/docs/guides/vision). + */ +export interface ResponseInputImage { + /** + * The detail level of the image to be sent to the model. One of `high`, `low`, or + * `auto`. Defaults to `auto`. + */ + detail: 'high' | 'low' | 'auto'; + + /** + * The type of the input item. Always `input_image`. + */ + type: 'input_image'; + + /** + * The ID of the file to be sent to the model. + */ + file_id?: string | null; + + /** + * The URL of the image to be sent to the model. A fully qualified URL or base64 + * encoded image in a data URL. + */ + image_url?: string | null; +} + +/** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. Messages with the + * `assistant` role are presumed to have been generated by the model in previous + * interactions. + */ +export type ResponseInputItem = + | EasyInputMessage + | ResponseInputItem.Message + | ResponseOutputMessage + | ResponseFileSearchToolCall + | ResponseComputerToolCall + | ResponseInputItem.ComputerCallOutput + | ResponseFunctionWebSearch + | ResponseFunctionToolCall + | ResponseInputItem.FunctionCallOutput + | ResponseInputItem.Reasoning + | ResponseInputItem.ItemReference; + +export namespace ResponseInputItem { + /** + * A message input to the model with a role indicating instruction following + * hierarchy. Instructions given with the `developer` or `system` role take + * precedence over instructions given with the `user` role. + */ + export interface Message { + /** + * A list of one or many input items to the model, containing different content + * types. + */ + content: ResponsesAPI.ResponseInputMessageContentList; + + /** + * The role of the message input. One of `user`, `system`, or `developer`. + */ + role: 'user' | 'system' | 'developer'; + + /** + * The status of item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the message input. Always set to `message`. + */ + type?: 'message'; + } + + /** + * The output of a computer tool call. + */ + export interface ComputerCallOutput { + /** + * The ID of the computer tool call that produced the output. + */ + call_id: string; + + /** + * A computer screenshot image used with the computer use tool. + */ + output: ComputerCallOutput.Output; + + /** + * The type of the computer tool call output. Always `computer_call_output`. + */ + type: 'computer_call_output'; + + /** + * The ID of the computer tool call output. + */ + id?: string; + + /** + * The safety checks reported by the API that have been acknowledged by the + * developer. + */ + acknowledged_safety_checks?: Array; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + export namespace ComputerCallOutput { + /** + * A computer screenshot image used with the computer use tool. + */ + export interface Output { + /** + * Specifies the event type. For a computer screenshot, this property is always set + * to `computer_screenshot`. + */ + type: 'computer_screenshot'; + + /** + * The identifier of an uploaded file that contains the screenshot. + */ + file_id?: string; + + /** + * The URL of the screenshot image. + */ + image_url?: string; + } + + /** + * A pending safety check for the computer call. + */ + export interface AcknowledgedSafetyCheck { + /** + * The ID of the pending safety check. + */ + id: string; + + /** + * The type of the pending safety check. + */ + code: string; + + /** + * Details about the pending safety check. + */ + message: string; + } + } + + /** + * The output of a function tool call. + */ + export interface FunctionCallOutput { + /** + * The unique ID of the function tool call generated by the model. + */ + call_id: string; + + /** + * A JSON string of the output of the function tool call. + */ + output: string; + + /** + * The type of the function tool call output. Always `function_call_output`. + */ + type: 'function_call_output'; + + /** + * The unique ID of the function tool call output. Populated when this item is + * returned via API. + */ + id?: string; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + /** + * A description of the chain of thought used by a reasoning model while generating + * a response. + */ + export interface Reasoning { + /** + * The unique identifier of the reasoning content. + */ + id: string; + + /** + * Reasoning text contents. + */ + content: Array; + + /** + * The type of the object. Always `reasoning`. + */ + type: 'reasoning'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + export namespace Reasoning { + export interface Content { + /** + * A short summary of the reasoning used by the model when generating the response. + */ + text: string; + + /** + * The type of the object. Always `text`. + */ + type: 'reasoning_summary'; + } + } + + /** + * An internal identifier for an item to reference. + */ + export interface ItemReference { + /** + * The ID of the item to reference. + */ + id: string; + + /** + * The type of item to reference. Always `item_reference`. + */ + type: 'item_reference'; + } +} + +/** + * A list of one or many input items to the model, containing different content + * types. + */ +export type ResponseInputMessageContentList = Array; + +/** + * A text input to the model. + */ +export interface ResponseInputText { + /** + * The text input to the model. + */ + text: string; + + /** + * The type of the input item. Always `input_text`. + */ + type: 'input_text'; +} + +/** + * An audio output from the model. + */ +export interface ResponseOutputAudio { + /** + * Base64-encoded audio data from the model. + */ + data: string; + + /** + * The transcript of the audio data from the model. + */ + transcript: string; + + /** + * The type of the output audio. Always `output_audio`. + */ + type: 'output_audio'; +} + +/** + * An output message from the model. + */ +export type ResponseOutputItem = + | ResponseOutputMessage + | ResponseFileSearchToolCall + | ResponseFunctionToolCall + | ResponseFunctionWebSearch + | ResponseComputerToolCall + | ResponseOutputItem.Reasoning; + +export namespace ResponseOutputItem { + /** + * A description of the chain of thought used by a reasoning model while generating + * a response. + */ + export interface Reasoning { + /** + * The unique identifier of the reasoning content. + */ + id: string; + + /** + * Reasoning text contents. + */ + content: Array; + + /** + * The type of the object. Always `reasoning`. + */ + type: 'reasoning'; + + /** + * The status of the item. One of `in_progress`, `completed`, or `incomplete`. + * Populated when items are returned via API. + */ + status?: 'in_progress' | 'completed' | 'incomplete'; + } + + export namespace Reasoning { + export interface Content { + /** + * A short summary of the reasoning used by the model when generating the response. + */ + text: string; + + /** + * The type of the object. Always `text`. + */ + type: 'reasoning_summary'; + } + } +} + +/** + * Emitted when a new output item is added. + */ +export interface ResponseOutputItemAddedEvent { + /** + * The output item that was added. + */ + item: ResponseOutputItem; + + /** + * The index of the output item that was added. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_item.added`. + */ + type: 'response.output_item.added'; +} + +/** + * Emitted when an output item is marked done. + */ +export interface ResponseOutputItemDoneEvent { + /** + * The output item that was marked done. + */ + item: ResponseOutputItem; + + /** + * The index of the output item that was marked done. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_item.done`. + */ + type: 'response.output_item.done'; +} + +/** + * An output message from the model. + */ +export interface ResponseOutputMessage { + /** + * The unique ID of the output message. + */ + id: string; + + /** + * The content of the output message. + */ + content: Array; + + /** + * The role of the output message. Always `assistant`. + */ + role: 'assistant'; + + /** + * The status of the message input. One of `in_progress`, `completed`, or + * `incomplete`. Populated when input items are returned via API. + */ + status: 'in_progress' | 'completed' | 'incomplete'; + + /** + * The type of the output message. Always `message`. + */ + type: 'message'; +} + +/** + * A refusal from the model. + */ +export interface ResponseOutputRefusal { + /** + * The refusal explanationfrom the model. + */ + refusal: string; + + /** + * The type of the refusal. Always `refusal`. + */ + type: 'refusal'; +} + +/** + * A text output from the model. + */ +export interface ResponseOutputText { + /** + * The annotations of the text output. + */ + annotations: Array< + ResponseOutputText.FileCitation | ResponseOutputText.URLCitation | ResponseOutputText.FilePath + >; + + /** + * The text output from the model. + */ + text: string; + + /** + * The type of the output text. Always `output_text`. + */ + type: 'output_text'; +} + +export namespace ResponseOutputText { + /** + * A citation to a file. + */ + export interface FileCitation { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file citation. Always `file_citation`. + */ + type: 'file_citation'; + } + + /** + * A citation for a web resource used to generate a model response. + */ + export interface URLCitation { + /** + * The index of the last character of the URL citation in the message. + */ + end_index: number; + + /** + * The index of the first character of the URL citation in the message. + */ + start_index: number; + + /** + * The title of the web resource. + */ + title: string; + + /** + * The type of the URL citation. Always `url_citation`. + */ + type: 'url_citation'; + + /** + * The URL of the web resource. + */ + url: string; + } + + /** + * A path to a file. + */ + export interface FilePath { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file path. Always `file_path`. + */ + type: 'file_path'; + } +} + +/** + * Emitted when there is a partial refusal text. + */ +export interface ResponseRefusalDeltaEvent { + /** + * The index of the content part that the refusal text is added to. + */ + content_index: number; + + /** + * The refusal text that is added. + */ + delta: string; + + /** + * The ID of the output item that the refusal text is added to. + */ + item_id: string; + + /** + * The index of the output item that the refusal text is added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.refusal.delta`. + */ + type: 'response.refusal.delta'; +} + +/** + * Emitted when refusal text is finalized. + */ +export interface ResponseRefusalDoneEvent { + /** + * The index of the content part that the refusal text is finalized. + */ + content_index: number; + + /** + * The ID of the output item that the refusal text is finalized. + */ + item_id: string; + + /** + * The index of the output item that the refusal text is finalized. + */ + output_index: number; + + /** + * The refusal text that is finalized. + */ + refusal: string; + + /** + * The type of the event. Always `response.refusal.done`. + */ + type: 'response.refusal.done'; +} + +/** + * The status of the response generation. One of `completed`, `failed`, + * `in_progress`, or `incomplete`. + */ +export type ResponseStatus = 'completed' | 'failed' | 'in_progress' | 'incomplete'; + +/** + * Emitted when there is a partial audio response. + */ +export type ResponseStreamEvent = + | ResponseAudioDeltaEvent + | ResponseAudioDoneEvent + | ResponseAudioTranscriptDeltaEvent + | ResponseAudioTranscriptDoneEvent + | ResponseCodeInterpreterCallCodeDeltaEvent + | ResponseCodeInterpreterCallCodeDoneEvent + | ResponseCodeInterpreterCallCompletedEvent + | ResponseCodeInterpreterCallInProgressEvent + | ResponseCodeInterpreterCallInterpretingEvent + | ResponseCompletedEvent + | ResponseContentPartAddedEvent + | ResponseContentPartDoneEvent + | ResponseCreatedEvent + | ResponseErrorEvent + | ResponseFileSearchCallCompletedEvent + | ResponseFileSearchCallInProgressEvent + | ResponseFileSearchCallSearchingEvent + | ResponseFunctionCallArgumentsDeltaEvent + | ResponseFunctionCallArgumentsDoneEvent + | ResponseInProgressEvent + | ResponseFailedEvent + | ResponseIncompleteEvent + | ResponseOutputItemAddedEvent + | ResponseOutputItemDoneEvent + | ResponseRefusalDeltaEvent + | ResponseRefusalDoneEvent + | ResponseTextAnnotationDeltaEvent + | ResponseTextDeltaEvent + | ResponseTextDoneEvent + | ResponseWebSearchCallCompletedEvent + | ResponseWebSearchCallInProgressEvent + | ResponseWebSearchCallSearchingEvent; + +/** + * Emitted when a text annotation is added. + */ +export interface ResponseTextAnnotationDeltaEvent { + /** + * A citation to a file. + */ + annotation: + | ResponseTextAnnotationDeltaEvent.FileCitation + | ResponseTextAnnotationDeltaEvent.URLCitation + | ResponseTextAnnotationDeltaEvent.FilePath; + + /** + * The index of the annotation that was added. + */ + annotation_index: number; + + /** + * The index of the content part that the text annotation was added to. + */ + content_index: number; + + /** + * The ID of the output item that the text annotation was added to. + */ + item_id: string; + + /** + * The index of the output item that the text annotation was added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_text.annotation.added`. + */ + type: 'response.output_text.annotation.added'; +} + +export namespace ResponseTextAnnotationDeltaEvent { + /** + * A citation to a file. + */ + export interface FileCitation { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file citation. Always `file_citation`. + */ + type: 'file_citation'; + } + + /** + * A citation for a web resource used to generate a model response. + */ + export interface URLCitation { + /** + * The index of the last character of the URL citation in the message. + */ + end_index: number; + + /** + * The index of the first character of the URL citation in the message. + */ + start_index: number; + + /** + * The title of the web resource. + */ + title: string; + + /** + * The type of the URL citation. Always `url_citation`. + */ + type: 'url_citation'; + + /** + * The URL of the web resource. + */ + url: string; + } + + /** + * A path to a file. + */ + export interface FilePath { + /** + * The ID of the file. + */ + file_id: string; + + /** + * The index of the file in the list of files. + */ + index: number; + + /** + * The type of the file path. Always `file_path`. + */ + type: 'file_path'; + } +} + +/** + * Configuration options for a text response from the model. Can be plain text or + * structured JSON data. Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + */ +export interface ResponseTextConfig { + /** + * An object specifying the format that the model must output. + * + * Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + * ensures the model will match your supplied JSON schema. Learn more in the + * [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + * + * The default format is `{ "type": "text" }` with no additional options. + * + * **Not recommended for gpt-4o and newer models:** + * + * Setting to `{ "type": "json_object" }` enables the older JSON mode, which + * ensures the message the model generates is valid JSON. Using `json_schema` is + * preferred for models that support it. + */ + format?: ResponseFormatTextConfig; +} + +/** + * Emitted when there is an additional text delta. + */ +export interface ResponseTextDeltaEvent { + /** + * The index of the content part that the text delta was added to. + */ + content_index: number; + + /** + * The text delta that was added. + */ + delta: string; + + /** + * The ID of the output item that the text delta was added to. + */ + item_id: string; + + /** + * The index of the output item that the text delta was added to. + */ + output_index: number; + + /** + * The type of the event. Always `response.output_text.delta`. + */ + type: 'response.output_text.delta'; +} + +/** + * Emitted when text content is finalized. + */ +export interface ResponseTextDoneEvent { + /** + * The index of the content part that the text content is finalized. + */ + content_index: number; + + /** + * The ID of the output item that the text content is finalized. + */ + item_id: string; + + /** + * The index of the output item that the text content is finalized. + */ + output_index: number; + + /** + * The text content that is finalized. + */ + text: string; + + /** + * The type of the event. Always `response.output_text.done`. + */ + type: 'response.output_text.done'; +} + +/** + * Represents token usage details including input tokens, output tokens, a + * breakdown of output tokens, and the total tokens used. + */ +export interface ResponseUsage { + /** + * The number of input tokens. + */ + input_tokens: number; + + /** + * The number of output tokens. + */ + output_tokens: number; + + /** + * A detailed breakdown of the output tokens. + */ + output_tokens_details: ResponseUsage.OutputTokensDetails; + + /** + * The total number of tokens used. + */ + total_tokens: number; +} + +export namespace ResponseUsage { + /** + * A detailed breakdown of the output tokens. + */ + export interface OutputTokensDetails { + /** + * The number of reasoning tokens. + */ + reasoning_tokens: number; + } +} + +/** + * Emitted when a web search call is completed. + */ +export interface ResponseWebSearchCallCompletedEvent { + /** + * Unique ID for the output item associated with the web search call. + */ + item_id: string; + + /** + * The index of the output item that the web search call is associated with. + */ + output_index: number; + + /** + * The type of the event. Always `response.web_search_call.completed`. + */ + type: 'response.web_search_call.completed'; +} + +/** + * Emitted when a web search call is initiated. + */ +export interface ResponseWebSearchCallInProgressEvent { + /** + * Unique ID for the output item associated with the web search call. + */ + item_id: string; + + /** + * The index of the output item that the web search call is associated with. + */ + output_index: number; + + /** + * The type of the event. Always `response.web_search_call.in_progress`. + */ + type: 'response.web_search_call.in_progress'; +} + +/** + * Emitted when a web search call is executing. + */ +export interface ResponseWebSearchCallSearchingEvent { + /** + * Unique ID for the output item associated with the web search call. + */ + item_id: string; + + /** + * The index of the output item that the web search call is associated with. + */ + output_index: number; + + /** + * The type of the event. Always `response.web_search_call.searching`. + */ + type: 'response.web_search_call.searching'; +} + +/** + * A tool that searches for relevant content from uploaded files. Learn more about + * the + * [file search tool](https://platform.openai.com/docs/guides/tools-file-search). + */ +export type Tool = FileSearchTool | FunctionTool | ComputerTool | WebSearchTool; + +/** + * Use this option to force the model to call a specific function. + */ +export interface ToolChoiceFunction { + /** + * The name of the function to call. + */ + name: string; + + /** + * For function calling, the type is always `function`. + */ + type: 'function'; +} + +/** + * Controls which (if any) tool is called by the model. + * + * `none` means the model will not call any tool and instead generates a message. + * + * `auto` means the model can pick between generating a message or calling one or + * more tools. + * + * `required` means the model must call one or more tools. + */ +export type ToolChoiceOptions = 'none' | 'auto' | 'required'; + +/** + * Indicates that the model should use a built-in tool to generate a response. + * [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools). + */ +export interface ToolChoiceTypes { + /** + * The type of hosted tool the model should to use. Learn more about + * [built-in tools](https://platform.openai.com/docs/guides/tools). + * + * Allowed values are: + * + * - `file_search` + * - `web_search_preview` + * - `computer_use_preview` + */ + type: 'file_search' | 'web_search_preview' | 'computer_use_preview' | 'web_search_preview_2025_03_11'; +} + +/** + * This tool searches the web for relevant results to use in a response. Learn more + * about the + * [web search tool](https://platform.openai.com/docs/guides/tools-web-search). + */ +export interface WebSearchTool { + /** + * The type of the web search tool. One of: + * + * - `web_search_preview` + * - `web_search_preview_2025_03_11` + */ + type: 'web_search_preview' | 'web_search_preview_2025_03_11'; + + /** + * High level guidance for the amount of context window space to use for the + * search. One of `low`, `medium`, or `high`. `medium` is the default. + */ + search_context_size?: 'low' | 'medium' | 'high'; + + user_location?: WebSearchTool.UserLocation | null; +} + +export namespace WebSearchTool { + export interface UserLocation { + /** + * The type of location approximation. Always `approximate`. + */ + type: 'approximate'; + + /** + * Free text input for the city of the user, e.g. `San Francisco`. + */ + city?: string; + + /** + * The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + * the user, e.g. `US`. + */ + country?: string; + + /** + * Free text input for the region of the user, e.g. `California`. + */ + region?: string; + + /** + * The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + * user, e.g. `America/Los_Angeles`. + */ + timezone?: string; + } +} + +export type ResponseCreateParams = ResponseCreateParamsNonStreaming | ResponseCreateParamsStreaming; + +export interface ResponseCreateParamsBase { + /** + * Text, image, or file inputs to the model, used to generate a response. + * + * Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Image inputs](https://platform.openai.com/docs/guides/images) + * - [File inputs](https://platform.openai.com/docs/guides/pdf-files) + * - [Conversation state](https://platform.openai.com/docs/guides/conversation-state) + * - [Function calling](https://platform.openai.com/docs/guides/function-calling) + */ + input: string | ResponseInput; + + /** + * Model ID used to generate the response, like `gpt-4o` or `o1`. OpenAI offers a + * wide range of models with different capabilities, performance characteristics, + * and price points. Refer to the + * [model guide](https://platform.openai.com/docs/models) to browse and compare + * available models. + */ + model: (string & {}) | Shared.ChatModel; + + /** + * Specify additional output data to include in the model response. Currently + * supported values are: + * + * - `file_search_call.results`: Include the search results of the file search tool + * call. + * - `message.input_image.image_url`: Include image urls from the input message. + * - `computer_call_output.output.image_url`: Include image urls from the computer + * call output. + */ + include?: Array | null; + + /** + * Inserts a system (or developer) message as the first item in the model's + * context. + * + * When using along with `previous_response_id`, the instructions from a previous + * response will be not be carried over to the next response. This makes it simple + * to swap out system (or developer) messages in new responses. + */ + instructions?: string | null; + + /** + * An upper bound for the number of tokens that can be generated for a response, + * including visible output tokens and + * [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + */ + max_output_tokens?: number | null; + + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. + * + * Keys are strings with a maximum length of 64 characters. Values are strings with + * a maximum length of 512 characters. + */ + metadata?: Shared.Metadata | null; + + /** + * Whether to allow the model to run tool calls in parallel. + */ + parallel_tool_calls?: boolean | null; + + /** + * The unique ID of the previous response to the model. Use this to create + * multi-turn conversations. Learn more about + * [conversation state](https://platform.openai.com/docs/guides/conversation-state). + */ + previous_response_id?: string | null; + + /** + * **o-series models only** + * + * Configuration options for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). + */ + reasoning?: Shared.Reasoning | null; + + /** + * Whether to store the generated model response for later retrieval via API. + */ + store?: boolean | null; + + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + * for more information. + */ + stream?: boolean | null; + + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will + * make the output more random, while lower values like 0.2 will make it more + * focused and deterministic. We generally recommend altering this or `top_p` but + * not both. + */ + temperature?: number | null; + + /** + * Configuration options for a text response from the model. Can be plain text or + * structured JSON data. Learn more: + * + * - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + * - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + */ + text?: ResponseTextConfig; + + /** + * How the model should select which tool (or tools) to use when generating a + * response. See the `tools` parameter to see how to specify which tools the model + * can call. + */ + tool_choice?: ToolChoiceOptions | ToolChoiceTypes | ToolChoiceFunction; + + /** + * An array of tools the model may call while generating a response. You can + * specify which tool to use by setting the `tool_choice` parameter. + * + * The two categories of tools you can provide the model are: + * + * - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + * capabilities, like + * [web search](https://platform.openai.com/docs/guides/tools-web-search) or + * [file search](https://platform.openai.com/docs/guides/tools-file-search). + * Learn more about + * [built-in tools](https://platform.openai.com/docs/guides/tools). + * - **Function calls (custom tools)**: Functions that are defined by you, enabling + * the model to call your own code. Learn more about + * [function calling](https://platform.openai.com/docs/guides/function-calling). + */ + tools?: Array; + + /** + * An alternative to sampling with temperature, called nucleus sampling, where the + * model considers the results of the tokens with top_p probability mass. So 0.1 + * means only the tokens comprising the top 10% probability mass are considered. + * + * We generally recommend altering this or `temperature` but not both. + */ + top_p?: number | null; + + /** + * The truncation strategy to use for the model response. + * + * - `auto`: If the context of this response and previous ones exceeds the model's + * context window size, the model will truncate the response to fit the context + * window by dropping input items in the middle of the conversation. + * - `disabled` (default): If a model response will exceed the context window size + * for a model, the request will fail with a 400 error. + */ + truncation?: 'auto' | 'disabled' | null; + + /** + * A unique identifier representing your end-user, which can help OpenAI to monitor + * and detect abuse. + * [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + */ + user?: string; +} + +export namespace ResponseCreateParams { + export type ResponseCreateParamsNonStreaming = ResponsesAPI.ResponseCreateParamsNonStreaming; + export type ResponseCreateParamsStreaming = ResponsesAPI.ResponseCreateParamsStreaming; +} + +export interface ResponseCreateParamsNonStreaming extends ResponseCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + * for more information. + */ + stream?: false | null; +} + +export interface ResponseCreateParamsStreaming extends ResponseCreateParamsBase { + /** + * If set to true, the model response data will be streamed to the client as it is + * generated using + * [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + * See the + * [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) + * for more information. + */ + stream: true; +} + +export interface ResponseRetrieveParams { + /** + * Additional fields to include in the response. See the `include` parameter for + * Response creation above for more information. + */ + include?: Array; +} + +Responses.InputItems = InputItems; +Responses.ResponseItemListDataPage = ResponseItemListDataPage; + +export declare namespace Responses { + export { + InputItems as InputItems, + type ResponseItemList as ResponseItemList, + ResponseItemListDataPage as ResponseItemListDataPage, + type InputItemListParams as InputItemListParams, + }; +} diff --git a/src/resources/shared.ts b/src/resources/shared.ts index 3bb11582f..86b2d2dee 100644 --- a/src/resources/shared.ts +++ b/src/resources/shared.ts @@ -1,5 +1,96 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +export type ChatModel = + | 'o3-mini' + | 'o3-mini-2025-01-31' + | 'o1' + | 'o1-2024-12-17' + | 'o1-preview' + | 'o1-preview-2024-09-12' + | 'o1-mini' + | 'o1-mini-2024-09-12' + | 'computer-use-preview' + | 'computer-use-preview-2025-02-04' + | 'computer-use-preview-2025-03-11' + | 'gpt-4.5-preview' + | 'gpt-4.5-preview-2025-02-27' + | 'gpt-4o' + | 'gpt-4o-2024-11-20' + | 'gpt-4o-2024-08-06' + | 'gpt-4o-2024-05-13' + | 'gpt-4o-audio-preview' + | 'gpt-4o-audio-preview-2024-10-01' + | 'gpt-4o-audio-preview-2024-12-17' + | 'gpt-4o-mini-audio-preview' + | 'gpt-4o-mini-audio-preview-2024-12-17' + | 'chatgpt-4o-latest' + | 'gpt-4o-mini' + | 'gpt-4o-mini-2024-07-18' + | 'gpt-4-turbo' + | 'gpt-4-turbo-2024-04-09' + | 'gpt-4-0125-preview' + | 'gpt-4-turbo-preview' + | 'gpt-4-1106-preview' + | 'gpt-4-vision-preview' + | 'gpt-4' + | 'gpt-4-0314' + | 'gpt-4-0613' + | 'gpt-4-32k' + | 'gpt-4-32k-0314' + | 'gpt-4-32k-0613' + | 'gpt-3.5-turbo' + | 'gpt-3.5-turbo-16k' + | 'gpt-3.5-turbo-0301' + | 'gpt-3.5-turbo-0613' + | 'gpt-3.5-turbo-1106' + | 'gpt-3.5-turbo-0125' + | 'gpt-3.5-turbo-16k-0613'; + +/** + * A filter used to compare a specified attribute key to a given value using a + * defined comparison operation. + */ +export interface ComparisonFilter { + /** + * The key to compare against the value. + */ + key: string; + + /** + * Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`. + * + * - `eq`: equals + * - `ne`: not equal + * - `gt`: greater than + * - `gte`: greater than or equal + * - `lt`: less than + * - `lte`: less than or equal + */ + type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte'; + + /** + * The value to compare against the attribute key; supports string, number, or + * boolean types. + */ + value: string | number | boolean; +} + +/** + * Combine multiple filters using `and` or `or`. + */ +export interface CompoundFilter { + /** + * Array of filters to combine. Items can be `ComparisonFilter` or + * `CompoundFilter`. + */ + filters: Array; + + /** + * Type of operation: `and` or `or`. + */ + type: 'and' | 'or'; +} + export interface ErrorObject { code: string | null; @@ -65,23 +156,76 @@ export type FunctionParameters = Record; */ export type Metadata = Record; +/** + * **o-series models only** + * + * Configuration options for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). + */ +export interface Reasoning { + /** + * **o-series models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ + effort: ReasoningEffort | null; + + /** + * **o-series models only** + * + * A summary of the reasoning performed by the model. This can be useful for + * debugging and understanding the model's reasoning process. One of `concise` or + * `detailed`. + */ + generate_summary?: 'concise' | 'detailed' | null; +} + +/** + * **o-series models only** + * + * Constrains effort on reasoning for + * [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + * result in faster responses and fewer tokens used on reasoning in a response. + */ +export type ReasoningEffort = 'low' | 'medium' | 'high' | null; + +/** + * JSON object response format. An older method of generating JSON responses. Using + * `json_schema` is recommended for models that support it. Note that the model + * will not generate JSON without a system or user message instructing it to do so. + */ export interface ResponseFormatJSONObject { /** - * The type of response format being defined: `json_object` + * The type of response format being defined. Always `json_object`. */ type: 'json_object'; } +/** + * JSON Schema response format. Used to generate structured JSON responses. Learn + * more about + * [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs). + */ export interface ResponseFormatJSONSchema { + /** + * Structured Outputs configuration options, including a JSON Schema. + */ json_schema: ResponseFormatJSONSchema.JSONSchema; /** - * The type of response format being defined: `json_schema` + * The type of response format being defined. Always `json_schema`. */ type: 'json_schema'; } export namespace ResponseFormatJSONSchema { + /** + * Structured Outputs configuration options, including a JSON Schema. + */ export interface JSONSchema { /** * The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores @@ -96,7 +240,8 @@ export namespace ResponseFormatJSONSchema { description?: string; /** - * The schema for the response format, described as a JSON Schema object. + * The schema for the response format, described as a JSON Schema object. Learn how + * to build JSON schemas [here](https://json-schema.org/). */ schema?: Record; @@ -111,9 +256,12 @@ export namespace ResponseFormatJSONSchema { } } +/** + * Default response format. Used to generate text responses. + */ export interface ResponseFormatText { /** - * The type of response format being defined: `text` + * The type of response format being defined. Always `text`. */ type: 'text'; } diff --git a/src/resources/uploads/uploads.ts b/src/resources/uploads/uploads.ts index f977e18f6..9e046b48d 100644 --- a/src/resources/uploads/uploads.ts +++ b/src/resources/uploads/uploads.ts @@ -22,10 +22,9 @@ export class Uploads extends APIResource { * contains all the parts you uploaded. This File is usable in the rest of our * platform as a regular File object. * - * For certain `purpose`s, the correct `mime_type` must be specified. Please refer - * to documentation for the supported MIME types for your use case: - * - * - [Assistants](https://platform.openai.com/docs/assistants/tools/file-search#supported-files) + * For certain `purpose` values, the correct `mime_type` must be specified. Please + * refer to documentation for the + * [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files). * * For guidance on the proper filename extensions for each purpose, please follow * the documentation on diff --git a/src/resources/beta/vector-stores/file-batches.ts b/src/resources/vector-stores/file-batches.ts similarity index 92% rename from src/resources/beta/vector-stores/file-batches.ts rename to src/resources/vector-stores/file-batches.ts index 2c47cb9c2..9be1d81a3 100644 --- a/src/resources/beta/vector-stores/file-batches.ts +++ b/src/resources/vector-stores/file-batches.ts @@ -1,15 +1,15 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; -import { isRequestOptions } from '../../../core'; -import { sleep } from '../../../core'; -import { Uploadable } from '../../../core'; -import { allSettledWithThrow } from '../../../lib/Util'; -import * as Core from '../../../core'; +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import { sleep } from '../../core'; +import { Uploadable } from '../../core'; +import { allSettledWithThrow } from '../../lib/Util'; +import * as Core from '../../core'; import * as FilesAPI from './files'; import { VectorStoreFilesPage } from './files'; import * as VectorStoresAPI from './vector-stores'; -import { type CursorPageParams } from '../../../pagination'; +import { type CursorPageParams } from '../../pagination'; export class FileBatches extends APIResource { /** @@ -265,6 +265,15 @@ export interface FileBatchCreateParams { */ file_ids: Array; + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. diff --git a/src/resources/beta/vector-stores/files.ts b/src/resources/vector-stores/files.ts similarity index 74% rename from src/resources/beta/vector-stores/files.ts rename to src/resources/vector-stores/files.ts index 1fda9a99b..28caf9781 100644 --- a/src/resources/beta/vector-stores/files.ts +++ b/src/resources/vector-stores/files.ts @@ -1,10 +1,10 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; -import { sleep, Uploadable, isRequestOptions } from '../../../core'; -import * as Core from '../../../core'; +import { APIResource } from '../../resource'; +import { sleep, Uploadable, isRequestOptions } from '../../core'; +import * as Core from '../../core'; import * as VectorStoresAPI from './vector-stores'; -import { CursorPage, type CursorPageParams } from '../../../pagination'; +import { CursorPage, type CursorPageParams, Page } from '../../pagination'; export class Files extends APIResource { /** @@ -38,6 +38,22 @@ export class Files extends APIResource { }); } + /** + * Update attributes on a vector store file. + */ + update( + vectorStoreId: string, + fileId: string, + body: FileUpdateParams, + options?: Core.RequestOptions, + ): Core.APIPromise { + return this._client.post(`/vector_stores/${vectorStoreId}/files/${fileId}`, { + body, + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } + /** * Returns a list of vector store files. */ @@ -167,10 +183,30 @@ export class Files extends APIResource { const fileInfo = await this.upload(vectorStoreId, file, options); return await this.poll(vectorStoreId, fileInfo.id, options); } + + /** + * Retrieve the parsed contents of a vector store file. + */ + content( + vectorStoreId: string, + fileId: string, + options?: Core.RequestOptions, + ): Core.PagePromise { + return this._client.getAPIList( + `/vector_stores/${vectorStoreId}/files/${fileId}/content`, + FileContentResponsesPage, + { ...options, headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers } }, + ); + } } export class VectorStoreFilesPage extends CursorPage {} +/** + * Note: no pagination actually occurs yet, this is for forwards-compatibility. + */ +export class FileContentResponsesPage extends Page {} + /** * A list of files attached to a vector store. */ @@ -217,6 +253,15 @@ export interface VectorStoreFile { */ vector_store_id: string; + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + /** * The strategy used to chunk the file. */ @@ -249,6 +294,18 @@ export interface VectorStoreFileDeleted { object: 'vector_store.file.deleted'; } +export interface FileContentResponse { + /** + * The text content + */ + text?: string; + + /** + * The content type (currently only `"text"`) + */ + type?: string; +} + export interface FileCreateParams { /** * A [File](https://platform.openai.com/docs/api-reference/files) ID that the @@ -257,6 +314,15 @@ export interface FileCreateParams { */ file_id: string; + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes?: Record | null; + /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` * strategy. Only applicable if `file_ids` is non-empty. @@ -264,6 +330,17 @@ export interface FileCreateParams { chunking_strategy?: VectorStoresAPI.FileChunkingStrategyParam; } +export interface FileUpdateParams { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes: Record | null; +} + export interface FileListParams extends CursorPageParams { /** * A cursor for use in pagination. `before` is an object ID that defines your place @@ -286,13 +363,17 @@ export interface FileListParams extends CursorPageParams { } Files.VectorStoreFilesPage = VectorStoreFilesPage; +Files.FileContentResponsesPage = FileContentResponsesPage; export declare namespace Files { export { type VectorStoreFile as VectorStoreFile, type VectorStoreFileDeleted as VectorStoreFileDeleted, + type FileContentResponse as FileContentResponse, VectorStoreFilesPage as VectorStoreFilesPage, + FileContentResponsesPage as FileContentResponsesPage, type FileCreateParams as FileCreateParams, + type FileUpdateParams as FileUpdateParams, type FileListParams as FileListParams, }; } diff --git a/src/resources/beta/vector-stores/index.ts b/src/resources/vector-stores/index.ts similarity index 82% rename from src/resources/beta/vector-stores/index.ts rename to src/resources/vector-stores/index.ts index d587bd160..9cbcbc0b2 100644 --- a/src/resources/beta/vector-stores/index.ts +++ b/src/resources/vector-stores/index.ts @@ -8,14 +8,18 @@ export { } from './file-batches'; export { VectorStoreFilesPage, + FileContentResponsesPage, Files, type VectorStoreFile, type VectorStoreFileDeleted, + type FileContentResponse, type FileCreateParams, + type FileUpdateParams, type FileListParams, } from './files'; export { VectorStoresPage, + VectorStoreSearchResponsesPage, VectorStores, type AutoFileChunkingStrategyParam, type FileChunkingStrategy, @@ -26,7 +30,9 @@ export { type StaticFileChunkingStrategyObjectParam, type VectorStore, type VectorStoreDeleted, + type VectorStoreSearchResponse, type VectorStoreCreateParams, type VectorStoreUpdateParams, type VectorStoreListParams, + type VectorStoreSearchParams, } from './vector-stores'; diff --git a/src/resources/beta/vector-stores/vector-stores.ts b/src/resources/vector-stores/vector-stores.ts similarity index 77% rename from src/resources/beta/vector-stores/vector-stores.ts rename to src/resources/vector-stores/vector-stores.ts index 8438b79da..7d61e7fd6 100644 --- a/src/resources/beta/vector-stores/vector-stores.ts +++ b/src/resources/vector-stores/vector-stores.ts @@ -1,9 +1,9 @@ // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -import { APIResource } from '../../../resource'; -import { isRequestOptions } from '../../../core'; -import * as Core from '../../../core'; -import * as Shared from '../../shared'; +import { APIResource } from '../../resource'; +import { isRequestOptions } from '../../core'; +import * as Core from '../../core'; +import * as Shared from '../shared'; import * as FileBatchesAPI from './file-batches'; import { FileBatchCreateParams, @@ -13,14 +13,17 @@ import { } from './file-batches'; import * as FilesAPI from './files'; import { + FileContentResponse, + FileContentResponsesPage, FileCreateParams, FileListParams, + FileUpdateParams, Files, VectorStoreFile, VectorStoreFileDeleted, VectorStoreFilesPage, } from './files'; -import { CursorPage, type CursorPageParams } from '../../../pagination'; +import { CursorPage, type CursorPageParams, Page } from '../../pagination'; export class VectorStores extends APIResource { files: FilesAPI.Files = new FilesAPI.Files(this._client); @@ -93,10 +96,32 @@ export class VectorStores extends APIResource { headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, }); } + + /** + * Search a vector store for relevant chunks based on a query and file attributes + * filter. + */ + search( + vectorStoreId: string, + body: VectorStoreSearchParams, + options?: Core.RequestOptions, + ): Core.PagePromise { + return this._client.getAPIList(`/vector_stores/${vectorStoreId}/search`, VectorStoreSearchResponsesPage, { + body, + method: 'post', + ...options, + headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers }, + }); + } } export class VectorStoresPage extends CursorPage {} +/** + * Note: no pagination actually occurs yet, this is for forwards-compatibility. + */ +export class VectorStoreSearchResponsesPage extends Page {} + /** * The default strategy. This strategy currently uses a `max_chunk_size_tokens` of * `800` and `chunk_overlap_tokens` of `400`. @@ -155,6 +180,9 @@ export interface StaticFileChunkingStrategyObject { type: 'static'; } +/** + * Customize your own chunking strategy by setting chunk size and chunk overlap. + */ export interface StaticFileChunkingStrategyObjectParam { static: StaticFileChunkingStrategy; @@ -282,6 +310,51 @@ export interface VectorStoreDeleted { object: 'vector_store.deleted'; } +export interface VectorStoreSearchResponse { + /** + * Set of 16 key-value pairs that can be attached to an object. This can be useful + * for storing additional information about the object in a structured format, and + * querying for objects via API or the dashboard. Keys are strings with a maximum + * length of 64 characters. Values are strings with a maximum length of 512 + * characters, booleans, or numbers. + */ + attributes: Record | null; + + /** + * Content chunks from the file. + */ + content: Array; + + /** + * The ID of the vector store file. + */ + file_id: string; + + /** + * The name of the vector store file. + */ + filename: string; + + /** + * The similarity score for the result. + */ + score: number; +} + +export namespace VectorStoreSearchResponse { + export interface Content { + /** + * The text content returned from search. + */ + text: string; + + /** + * The type of content. + */ + type: 'text'; + } +} + export interface VectorStoreCreateParams { /** * The chunking strategy used to chunk the file(s). If not set, will use the `auto` @@ -391,9 +464,50 @@ export interface VectorStoreListParams extends CursorPageParams { order?: 'asc' | 'desc'; } +export interface VectorStoreSearchParams { + /** + * A query string for a search + */ + query: string | Array; + + /** + * A filter to apply based on file attributes. + */ + filters?: Shared.ComparisonFilter | Shared.CompoundFilter; + + /** + * The maximum number of results to return. This number should be between 1 and 50 + * inclusive. + */ + max_num_results?: number; + + /** + * Ranking options for search. + */ + ranking_options?: VectorStoreSearchParams.RankingOptions; + + /** + * Whether to rewrite the natural language query for vector search. + */ + rewrite_query?: boolean; +} + +export namespace VectorStoreSearchParams { + /** + * Ranking options for search. + */ + export interface RankingOptions { + ranker?: 'auto' | 'default-2024-11-15'; + + score_threshold?: number; + } +} + VectorStores.VectorStoresPage = VectorStoresPage; +VectorStores.VectorStoreSearchResponsesPage = VectorStoreSearchResponsesPage; VectorStores.Files = Files; VectorStores.VectorStoreFilesPage = VectorStoreFilesPage; +VectorStores.FileContentResponsesPage = FileContentResponsesPage; VectorStores.FileBatches = FileBatches; export declare namespace VectorStores { @@ -407,18 +521,24 @@ export declare namespace VectorStores { type StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, type VectorStore as VectorStore, type VectorStoreDeleted as VectorStoreDeleted, + type VectorStoreSearchResponse as VectorStoreSearchResponse, VectorStoresPage as VectorStoresPage, + VectorStoreSearchResponsesPage as VectorStoreSearchResponsesPage, type VectorStoreCreateParams as VectorStoreCreateParams, type VectorStoreUpdateParams as VectorStoreUpdateParams, type VectorStoreListParams as VectorStoreListParams, + type VectorStoreSearchParams as VectorStoreSearchParams, }; export { Files as Files, type VectorStoreFile as VectorStoreFile, type VectorStoreFileDeleted as VectorStoreFileDeleted, + type FileContentResponse as FileContentResponse, VectorStoreFilesPage as VectorStoreFilesPage, + FileContentResponsesPage as FileContentResponsesPage, type FileCreateParams as FileCreateParams, + type FileUpdateParams as FileUpdateParams, type FileListParams as FileListParams, }; diff --git a/src/streaming.ts b/src/streaming.ts index 52266154c..25b960314 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -41,7 +41,7 @@ export class Stream implements AsyncIterable { continue; } - if (sse.event === null) { + if (sse.event === null || sse.event.startsWith('response.')) { let data; try { diff --git a/tests/api-resources/chat/completions/completions.test.ts b/tests/api-resources/chat/completions/completions.test.ts index acdd631db..eddf252b1 100644 --- a/tests/api-resources/chat/completions/completions.test.ts +++ b/tests/api-resources/chat/completions/completions.test.ts @@ -43,9 +43,9 @@ describe('resource completions', () => { presence_penalty: -2, reasoning_effort: 'low', response_format: { type: 'text' }, - seed: 0, + seed: -9007199254740991, service_tier: 'auto', - stop: 'string', + stop: '\n', store: true, stream: false, stream_options: { include_usage: true }, @@ -60,6 +60,13 @@ describe('resource completions', () => { top_logprobs: 0, top_p: 1, user: 'user-1234', + web_search_options: { + search_context_size: 'low', + user_location: { + approximate: { city: 'city', country: 'country', region: 'region', timezone: 'timezone' }, + type: 'approximate', + }, + }, }); }); diff --git a/tests/api-resources/responses/input-items.test.ts b/tests/api-resources/responses/input-items.test.ts new file mode 100644 index 000000000..51b86f1b3 --- /dev/null +++ b/tests/api-resources/responses/input-items.test.ts @@ -0,0 +1,40 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', +}); + +describe('resource inputItems', () => { + test('list', async () => { + const responsePromise = client.responses.inputItems.list('response_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.responses.inputItems.list('response_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.responses.inputItems.list( + 'response_id', + { after: 'after', before: 'before', limit: 0, order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/beta/vector-stores/files.test.ts b/tests/api-resources/responses/responses.test.ts similarity index 58% rename from tests/api-resources/beta/vector-stores/files.test.ts rename to tests/api-resources/responses/responses.test.ts index 7c14d4de3..e10722738 100644 --- a/tests/api-resources/beta/vector-stores/files.test.ts +++ b/tests/api-resources/responses/responses.test.ts @@ -8,9 +8,9 @@ const client = new OpenAI({ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', }); -describe('resource files', () => { +describe('resource responses', () => { test('create: only required params', async () => { - const responsePromise = client.beta.vectorStores.files.create('vs_abc123', { file_id: 'file_id' }); + const responsePromise = client.responses.create({ input: 'string', model: 'gpt-4o' }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -21,14 +21,38 @@ describe('resource files', () => { }); test('create: required and optional params', async () => { - const response = await client.beta.vectorStores.files.create('vs_abc123', { - file_id: 'file_id', - chunking_strategy: { type: 'auto' }, + const response = await client.responses.create({ + input: 'string', + model: 'gpt-4o', + include: ['file_search_call.results'], + instructions: 'instructions', + max_output_tokens: 0, + metadata: { foo: 'string' }, + parallel_tool_calls: true, + previous_response_id: 'previous_response_id', + reasoning: { effort: 'low', generate_summary: 'concise' }, + store: true, + stream: false, + temperature: 1, + text: { format: { type: 'text' } }, + tool_choice: 'none', + tools: [ + { + type: 'file_search', + vector_store_ids: ['string'], + filters: { key: 'key', type: 'eq', value: 'string' }, + max_num_results: 0, + ranking_options: { ranker: 'auto', score_threshold: 0 }, + }, + ], + top_p: 1, + truncation: 'auto', + user: 'user-1234', }); }); test('retrieve', async () => { - const responsePromise = client.beta.vectorStores.files.retrieve('vs_abc123', 'file-abc123'); + const responsePromise = client.responses.retrieve('resp_677efb5139a88190b512bc3fef8e535d'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -41,43 +65,25 @@ describe('resource files', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.files.retrieve('vs_abc123', 'file-abc123', { + client.responses.retrieve('resp_677efb5139a88190b512bc3fef8e535d', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); }); - test('list', async () => { - const responsePromise = client.beta.vectorStores.files.list('vector_store_id'); - const rawResponse = await responsePromise.asResponse(); - expect(rawResponse).toBeInstanceOf(Response); - const response = await responsePromise; - expect(response).not.toBeInstanceOf(Response); - const dataAndResponse = await responsePromise.withResponse(); - expect(dataAndResponse.data).toBe(response); - expect(dataAndResponse.response).toBe(rawResponse); - }); - - test('list: request options instead of params are passed correctly', async () => { - // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect( - client.beta.vectorStores.files.list('vector_store_id', { path: '/_stainless_unknown_path' }), - ).rejects.toThrow(OpenAI.NotFoundError); - }); - - test('list: request options and params are passed correctly', async () => { + test('retrieve: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.files.list( - 'vector_store_id', - { after: 'after', before: 'before', filter: 'in_progress', limit: 0, order: 'asc' }, + client.responses.retrieve( + 'resp_677efb5139a88190b512bc3fef8e535d', + { include: ['file_search_call.results'] }, { path: '/_stainless_unknown_path' }, ), ).rejects.toThrow(OpenAI.NotFoundError); }); test('del', async () => { - const responsePromise = client.beta.vectorStores.files.del('vector_store_id', 'file_id'); + const responsePromise = client.responses.del('resp_677efb5139a88190b512bc3fef8e535d'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -90,7 +96,7 @@ describe('resource files', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.files.del('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), + client.responses.del('resp_677efb5139a88190b512bc3fef8e535d', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); }); diff --git a/tests/api-resources/beta/vector-stores/file-batches.test.ts b/tests/api-resources/vector-stores/file-batches.test.ts similarity index 81% rename from tests/api-resources/beta/vector-stores/file-batches.test.ts rename to tests/api-resources/vector-stores/file-batches.test.ts index b714049b4..c0447a838 100644 --- a/tests/api-resources/beta/vector-stores/file-batches.test.ts +++ b/tests/api-resources/vector-stores/file-batches.test.ts @@ -10,9 +10,7 @@ const client = new OpenAI({ describe('resource fileBatches', () => { test('create: only required params', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.create('vs_abc123', { - file_ids: ['string'], - }); + const responsePromise = client.vectorStores.fileBatches.create('vs_abc123', { file_ids: ['string'] }); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -23,14 +21,15 @@ describe('resource fileBatches', () => { }); test('create: required and optional params', async () => { - const response = await client.beta.vectorStores.fileBatches.create('vs_abc123', { + const response = await client.vectorStores.fileBatches.create('vs_abc123', { file_ids: ['string'], + attributes: { foo: 'string' }, chunking_strategy: { type: 'auto' }, }); }); test('retrieve', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123'); + const responsePromise = client.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -43,14 +42,14 @@ describe('resource fileBatches', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123', { + client.vectorStores.fileBatches.retrieve('vs_abc123', 'vsfb_abc123', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('cancel', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id'); + const responsePromise = client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -63,14 +62,14 @@ describe('resource fileBatches', () => { test('cancel: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id', { + client.vectorStores.fileBatches.cancel('vector_store_id', 'batch_id', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('listFiles', async () => { - const responsePromise = client.beta.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id'); + const responsePromise = client.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -83,7 +82,7 @@ describe('resource fileBatches', () => { test('listFiles: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id', { + client.vectorStores.fileBatches.listFiles('vector_store_id', 'batch_id', { path: '/_stainless_unknown_path', }), ).rejects.toThrow(OpenAI.NotFoundError); @@ -92,7 +91,7 @@ describe('resource fileBatches', () => { test('listFiles: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.fileBatches.listFiles( + client.vectorStores.fileBatches.listFiles( 'vector_store_id', 'batch_id', { after: 'after', before: 'before', filter: 'in_progress', limit: 0, order: 'asc' }, diff --git a/tests/api-resources/vector-stores/files.test.ts b/tests/api-resources/vector-stores/files.test.ts new file mode 100644 index 000000000..86a8f9bb4 --- /dev/null +++ b/tests/api-resources/vector-stores/files.test.ts @@ -0,0 +1,132 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +import OpenAI from 'openai'; +import { Response } from 'node-fetch'; + +const client = new OpenAI({ + apiKey: 'My API Key', + baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010', +}); + +describe('resource files', () => { + test('create: only required params', async () => { + const responsePromise = client.vectorStores.files.create('vs_abc123', { file_id: 'file_id' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('create: required and optional params', async () => { + const response = await client.vectorStores.files.create('vs_abc123', { + file_id: 'file_id', + attributes: { foo: 'string' }, + chunking_strategy: { type: 'auto' }, + }); + }); + + test('retrieve', async () => { + const responsePromise = client.vectorStores.files.retrieve('vs_abc123', 'file-abc123'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('retrieve: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.retrieve('vs_abc123', 'file-abc123', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('update: only required params', async () => { + const responsePromise = client.vectorStores.files.update('vs_abc123', 'file-abc123', { + attributes: { foo: 'string' }, + }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('update: required and optional params', async () => { + const response = await client.vectorStores.files.update('vs_abc123', 'file-abc123', { + attributes: { foo: 'string' }, + }); + }); + + test('list', async () => { + const responsePromise = client.vectorStores.files.list('vector_store_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('list: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.list('vector_store_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('list: request options and params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.list( + 'vector_store_id', + { after: 'after', before: 'before', filter: 'in_progress', limit: 0, order: 'asc' }, + { path: '/_stainless_unknown_path' }, + ), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('del', async () => { + const responsePromise = client.vectorStores.files.del('vector_store_id', 'file_id'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('del: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.del('vector_store_id', 'file_id', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); + + test('content', async () => { + const responsePromise = client.vectorStores.files.content('vs_abc123', 'file-abc123'); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('content: request options instead of params are passed correctly', async () => { + // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error + await expect( + client.vectorStores.files.content('vs_abc123', 'file-abc123', { path: '/_stainless_unknown_path' }), + ).rejects.toThrow(OpenAI.NotFoundError); + }); +}); diff --git a/tests/api-resources/beta/vector-stores/vector-stores.test.ts b/tests/api-resources/vector-stores/vector-stores.test.ts similarity index 71% rename from tests/api-resources/beta/vector-stores/vector-stores.test.ts rename to tests/api-resources/vector-stores/vector-stores.test.ts index 806098de8..465904a00 100644 --- a/tests/api-resources/beta/vector-stores/vector-stores.test.ts +++ b/tests/api-resources/vector-stores/vector-stores.test.ts @@ -10,7 +10,7 @@ const client = new OpenAI({ describe('resource vectorStores', () => { test('create', async () => { - const responsePromise = client.beta.vectorStores.create({}); + const responsePromise = client.vectorStores.create({}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -21,7 +21,7 @@ describe('resource vectorStores', () => { }); test('retrieve', async () => { - const responsePromise = client.beta.vectorStores.retrieve('vector_store_id'); + const responsePromise = client.vectorStores.retrieve('vector_store_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -34,12 +34,12 @@ describe('resource vectorStores', () => { test('retrieve: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.retrieve('vector_store_id', { path: '/_stainless_unknown_path' }), + client.vectorStores.retrieve('vector_store_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); test('update', async () => { - const responsePromise = client.beta.vectorStores.update('vector_store_id', {}); + const responsePromise = client.vectorStores.update('vector_store_id', {}); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -50,7 +50,7 @@ describe('resource vectorStores', () => { }); test('list', async () => { - const responsePromise = client.beta.vectorStores.list(); + const responsePromise = client.vectorStores.list(); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -62,7 +62,7 @@ describe('resource vectorStores', () => { test('list: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error - await expect(client.beta.vectorStores.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( + await expect(client.vectorStores.list({ path: '/_stainless_unknown_path' })).rejects.toThrow( OpenAI.NotFoundError, ); }); @@ -70,7 +70,7 @@ describe('resource vectorStores', () => { test('list: request options and params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.list( + client.vectorStores.list( { after: 'after', before: 'before', limit: 0, order: 'asc' }, { path: '/_stainless_unknown_path' }, ), @@ -78,7 +78,7 @@ describe('resource vectorStores', () => { }); test('del', async () => { - const responsePromise = client.beta.vectorStores.del('vector_store_id'); + const responsePromise = client.vectorStores.del('vector_store_id'); const rawResponse = await responsePromise.asResponse(); expect(rawResponse).toBeInstanceOf(Response); const response = await responsePromise; @@ -91,7 +91,28 @@ describe('resource vectorStores', () => { test('del: request options instead of params are passed correctly', async () => { // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error await expect( - client.beta.vectorStores.del('vector_store_id', { path: '/_stainless_unknown_path' }), + client.vectorStores.del('vector_store_id', { path: '/_stainless_unknown_path' }), ).rejects.toThrow(OpenAI.NotFoundError); }); + + test('search: only required params', async () => { + const responsePromise = client.vectorStores.search('vs_abc123', { query: 'string' }); + const rawResponse = await responsePromise.asResponse(); + expect(rawResponse).toBeInstanceOf(Response); + const response = await responsePromise; + expect(response).not.toBeInstanceOf(Response); + const dataAndResponse = await responsePromise.withResponse(); + expect(dataAndResponse.data).toBe(response); + expect(dataAndResponse.response).toBe(rawResponse); + }); + + test('search: required and optional params', async () => { + const response = await client.vectorStores.search('vs_abc123', { + query: 'string', + filters: { key: 'key', type: 'eq', value: 'string' }, + max_num_results: 1, + ranking_options: { ranker: 'auto', score_threshold: 0 }, + rewrite_query: true, + }); + }); }); From 6a4b1d1954d86eda060aaa1683bfebc2d527066b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 16:32:03 +0000 Subject: [PATCH 2/2] release: 4.87.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ jsr.json | 2 +- package.json | 2 +- src/version.ts | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a889d24b4..e8984a56c 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "4.86.2" + ".": "4.87.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 38d54fdc1..2ec7edb2b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 4.87.0 (2025-03-11) + +Full Changelog: [v4.86.2...v4.87.0](https://github.com/openai/openai-node/compare/v4.86.2...v4.87.0) + +### Features + +* **api:** add /v1/responses and built-in tools ([119b584](https://github.com/openai/openai-node/commit/119b5843a18b8014167c8d2031d75c08dbf400a3)) + ## 4.86.2 (2025-03-05) Full Changelog: [v4.86.1...v4.86.2](https://github.com/openai/openai-node/compare/v4.86.1...v4.86.2) diff --git a/jsr.json b/jsr.json index 1c0948aaa..4ac1601e7 100644 --- a/jsr.json +++ b/jsr.json @@ -1,6 +1,6 @@ { "name": "@openai/openai", - "version": "4.86.2", + "version": "4.87.0", "exports": { ".": "./index.ts", "./helpers/zod": "./helpers/zod.ts", diff --git a/package.json b/package.json index 78afb8946..7cf4a385d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openai", - "version": "4.86.2", + "version": "4.87.0", "description": "The official TypeScript library for the OpenAI API", "author": "OpenAI ", "types": "dist/index.d.ts", diff --git a/src/version.ts b/src/version.ts index c43a3c320..2b1fd6541 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const VERSION = '4.86.2'; // x-release-please-version +export const VERSION = '4.87.0'; // x-release-please-version