diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index c99ba658e..2777efed5 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "5.12.0"
+ ".": "5.12.1"
}
diff --git a/.stats.yml b/.stats.yml
index f86fa668b..9c1b4e4c5 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 111
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d6a16b25b969c3e5382e7d413de15bf83d5f7534d5c3ecce64d3a7e847418f9e.yml
-openapi_spec_hash: 0c0bcf4aee9ca2a948dd14b890dfe728
-config_hash: aeff9289bd7f8c8482e4d738c3c2fde1
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f5c45f4ae5c2075cbc603d6910bba3da31c23714c209fbd3fd82a94f634a126b.yml
+openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba
+config_hash: 9a64321968e21ed72f5c0e02164ea00d
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 17d2baca5..f5ee00f3c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,18 @@
# Changelog
+## 5.12.1 (2025-08-07)
+
+Full Changelog: [v5.12.0...v5.12.1](https://github.com/openai/openai-node/compare/v5.12.0...v5.12.1)
+
+### Features
+
+* **api:** adds GPT-5 and new API features: platform.openai.com/docs/guides/gpt-5 ([59acd85](https://github.com/openai/openai-node/commit/59acd85a3e0d4db7f7eca9a287b23ec00191fa68))
+
+
+### Chores
+
+* **internal:** move publish config ([b3d02f6](https://github.com/openai/openai-node/commit/b3d02f6faab5aa8e62998446485e43ebc802d68e))
+
## 5.12.0 (2025-08-05)
Full Changelog: [v5.11.0...v5.12.0](https://github.com/openai/openai-node/compare/v5.11.0...v5.12.0)
diff --git a/api.md b/api.md
index 65d7c7631..c3c6d1c73 100644
--- a/api.md
+++ b/api.md
@@ -6,6 +6,7 @@ Types:
- ChatModel
- ComparisonFilter
- CompoundFilter
+- CustomToolInputFormat
- ErrorObject
- FunctionDefinition
- FunctionParameters
@@ -15,6 +16,8 @@ Types:
- ResponseFormatJSONObject
- ResponseFormatJSONSchema
- ResponseFormatText
+- ResponseFormatTextGrammar
+- ResponseFormatTextPython
- ResponsesModel
# Completions
@@ -40,6 +43,7 @@ Types:
Types:
- ChatCompletion
+- ChatCompletionAllowedToolChoice
- ChatCompletionAssistantMessageParam
- ChatCompletionAudio
- ChatCompletionAudioParam
@@ -49,15 +53,20 @@ Types:
- ChatCompletionContentPartInputAudio
- ChatCompletionContentPartRefusal
- ChatCompletionContentPartText
+- ChatCompletionCustomTool
- ChatCompletionDeleted
- ChatCompletionDeveloperMessageParam
- ChatCompletionFunctionCallOption
- ChatCompletionFunctionMessageParam
+- ChatCompletionFunctionTool
- ChatCompletionMessage
+- ChatCompletionMessageCustomToolCall
+- ChatCompletionMessageFunctionToolCall
- ChatCompletionMessageParam
- ChatCompletionMessageToolCall
- ChatCompletionModality
- ChatCompletionNamedToolChoice
+- ChatCompletionNamedToolChoiceCustom
- ChatCompletionPredictionContent
- ChatCompletionRole
- ChatCompletionStoreMessage
@@ -68,6 +77,7 @@ Types:
- ChatCompletionToolChoiceOption
- ChatCompletionToolMessageParam
- ChatCompletionUserMessageParam
+- ChatCompletionAllowedTools
- ChatCompletionReasoningEffort
Methods:
@@ -638,6 +648,7 @@ Methods:
Types:
- ComputerTool
+- CustomTool
- EasyInputMessage
- FileSearchTool
- FunctionTool
@@ -660,6 +671,10 @@ Types:
- ResponseContentPartAddedEvent
- ResponseContentPartDoneEvent
- ResponseCreatedEvent
+- ResponseCustomToolCall
+- ResponseCustomToolCallInputDeltaEvent
+- ResponseCustomToolCallInputDoneEvent
+- ResponseCustomToolCallOutput
- ResponseError
- ResponseErrorEvent
- ResponseFailedEvent
@@ -729,6 +744,8 @@ Types:
- ResponseWebSearchCallInProgressEvent
- ResponseWebSearchCallSearchingEvent
- Tool
+- ToolChoiceAllowed
+- ToolChoiceCustom
- ToolChoiceFunction
- ToolChoiceMcp
- ToolChoiceOptions
diff --git a/bin/publish-npm b/bin/publish-npm
index fa2243d24..45e8aa808 100644
--- a/bin/publish-npm
+++ b/bin/publish-npm
@@ -58,4 +58,4 @@ else
fi
# Publish with the appropriate tag
-yarn publish --access public --tag "$TAG"
+yarn publish --tag "$TAG"
diff --git a/examples/tool-calls-stream.ts b/examples/tool-calls-stream.ts
index 93f16a245..7cb3ffbbc 100755
--- a/examples/tool-calls-stream.ts
+++ b/examples/tool-calls-stream.ts
@@ -76,8 +76,9 @@ const tools: OpenAI.Chat.Completions.ChatCompletionTool[] = [
},
];
-async function callTool(tool_call: OpenAI.Chat.Completions.ChatCompletionMessageToolCall): Promise {
- if (tool_call.type !== 'function') throw new Error('Unexpected tool_call type:' + tool_call.type);
+async function callTool(
+ tool_call: OpenAI.Chat.Completions.ChatCompletionMessageFunctionToolCall,
+): Promise {
const args = JSON.parse(tool_call.function.arguments);
switch (tool_call.function.name) {
case 'list':
@@ -143,6 +144,9 @@ async function main() {
// If there are tool calls, we generate a new message with the role 'tool' for each tool call.
for (const toolCall of message.tool_calls) {
+ if (toolCall.type !== 'function') {
+ throw new Error(`Unexpected tool call type: ${toolCall.type}`);
+ }
const result = await callTool(toolCall);
const newMessage = {
tool_call_id: toolCall.id,
diff --git a/jsr.json b/jsr.json
index bf3ba1f5a..7a808ad9d 100644
--- a/jsr.json
+++ b/jsr.json
@@ -1,6 +1,6 @@
{
"name": "@openai/openai",
- "version": "5.12.0",
+ "version": "5.12.1",
"exports": {
".": "./index.ts",
"./helpers/zod": "./helpers/zod.ts",
diff --git a/jsr.json.orig b/jsr.json.orig
index c7b99a6f6..3e7c40d5f 100644
--- a/jsr.json.orig
+++ b/jsr.json.orig
@@ -1,6 +1,5 @@
{
"name": "@openai/openai",
-<<<<<<< HEAD
"version": "4.87.4",
"exports": {
".": "./index.ts",
@@ -10,13 +9,6 @@
"imports": {
"zod": "npm:zod@3"
},
-||||||| parent of 0603bcac (chore(internal): version bump (#1393))
- "version": "4.87.3",
- "exports": "./index.ts",
-=======
- "version": "4.87.4",
- "exports": "./index.ts",
->>>>>>> 0603bcac (chore(internal): version bump (#1393))
"publish": {
"exclude": [
"!."
diff --git a/package.json b/package.json
index d5081ea1d..9fc0ea6b8 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "openai",
- "version": "5.12.0",
+ "version": "5.12.1",
"description": "The official TypeScript library for the OpenAI API",
"author": "OpenAI ",
"types": "dist/index.d.ts",
@@ -13,6 +13,9 @@
"**/*"
],
"private": false,
+ "publishConfig": {
+ "access": "public"
+ },
"scripts": {
"test": "./scripts/test",
"build": "./scripts/build",
diff --git a/src/client.ts b/src/client.ts
index 81102c552..217ba5117 100644
--- a/src/client.ts
+++ b/src/client.ts
@@ -141,6 +141,8 @@ import {
} from './resources/vector-stores/vector-stores';
import {
ChatCompletion,
+ ChatCompletionAllowedToolChoice,
+ ChatCompletionAllowedTools,
ChatCompletionAssistantMessageParam,
ChatCompletionAudio,
ChatCompletionAudioParam,
@@ -153,16 +155,21 @@ import {
ChatCompletionCreateParams,
ChatCompletionCreateParamsNonStreaming,
ChatCompletionCreateParamsStreaming,
+ ChatCompletionCustomTool,
ChatCompletionDeleted,
ChatCompletionDeveloperMessageParam,
ChatCompletionFunctionCallOption,
ChatCompletionFunctionMessageParam,
+ ChatCompletionFunctionTool,
ChatCompletionListParams,
ChatCompletionMessage,
+ ChatCompletionMessageCustomToolCall,
+ ChatCompletionMessageFunctionToolCall,
ChatCompletionMessageParam,
ChatCompletionMessageToolCall,
ChatCompletionModality,
ChatCompletionNamedToolChoice,
+ ChatCompletionNamedToolChoiceCustom,
ChatCompletionPredictionContent,
ChatCompletionReasoningEffort,
ChatCompletionRole,
@@ -990,6 +997,7 @@ export declare namespace OpenAI {
export {
Chat as Chat,
type ChatCompletion as ChatCompletion,
+ type ChatCompletionAllowedToolChoice as ChatCompletionAllowedToolChoice,
type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam,
type ChatCompletionAudio as ChatCompletionAudio,
type ChatCompletionAudioParam as ChatCompletionAudioParam,
@@ -999,15 +1007,20 @@ export declare namespace OpenAI {
type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio,
type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal,
type ChatCompletionContentPartText as ChatCompletionContentPartText,
+ type ChatCompletionCustomTool as ChatCompletionCustomTool,
type ChatCompletionDeleted as ChatCompletionDeleted,
type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam,
type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption,
type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam,
+ type ChatCompletionFunctionTool as ChatCompletionFunctionTool,
type ChatCompletionMessage as ChatCompletionMessage,
+ type ChatCompletionMessageCustomToolCall as ChatCompletionMessageCustomToolCall,
+ type ChatCompletionMessageFunctionToolCall as ChatCompletionMessageFunctionToolCall,
type ChatCompletionMessageParam as ChatCompletionMessageParam,
type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall,
type ChatCompletionModality as ChatCompletionModality,
type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice,
+ type ChatCompletionNamedToolChoiceCustom as ChatCompletionNamedToolChoiceCustom,
type ChatCompletionPredictionContent as ChatCompletionPredictionContent,
type ChatCompletionRole as ChatCompletionRole,
type ChatCompletionStoreMessage as ChatCompletionStoreMessage,
@@ -1018,6 +1031,7 @@ export declare namespace OpenAI {
type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption,
type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam,
type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam,
+ type ChatCompletionAllowedTools as ChatCompletionAllowedTools,
type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort,
type ChatCompletionsPage as ChatCompletionsPage,
type ChatCompletionCreateParams as ChatCompletionCreateParams,
@@ -1162,6 +1176,7 @@ export declare namespace OpenAI {
export type ChatModel = API.ChatModel;
export type ComparisonFilter = API.ComparisonFilter;
export type CompoundFilter = API.CompoundFilter;
+ export type CustomToolInputFormat = API.CustomToolInputFormat;
export type ErrorObject = API.ErrorObject;
export type FunctionDefinition = API.FunctionDefinition;
export type FunctionParameters = API.FunctionParameters;
@@ -1171,5 +1186,7 @@ export declare namespace OpenAI {
export type ResponseFormatJSONObject = API.ResponseFormatJSONObject;
export type ResponseFormatJSONSchema = API.ResponseFormatJSONSchema;
export type ResponseFormatText = API.ResponseFormatText;
+ export type ResponseFormatTextGrammar = API.ResponseFormatTextGrammar;
+ export type ResponseFormatTextPython = API.ResponseFormatTextPython;
export type ResponsesModel = API.ResponsesModel;
}
diff --git a/src/lib/AbstractChatCompletionRunner.ts b/src/lib/AbstractChatCompletionRunner.ts
index da6e8d7fe..982c572f6 100644
--- a/src/lib/AbstractChatCompletionRunner.ts
+++ b/src/lib/AbstractChatCompletionRunner.ts
@@ -1,27 +1,27 @@
-import type { CompletionUsage } from '../resources/completions';
+import { OpenAIError } from '../error';
+import type OpenAI from '../index';
+import type { RequestOptions } from '../internal/request-options';
+import { isAutoParsableTool, parseChatCompletion } from '../lib/parser';
import type {
ChatCompletion,
+ ChatCompletionCreateParams,
ChatCompletionMessage,
+ ChatCompletionMessageFunctionToolCall,
ChatCompletionMessageParam,
- ChatCompletionCreateParams,
ChatCompletionTool,
- ChatCompletionMessageToolCall,
+ ParsedChatCompletion,
} from '../resources/chat/completions';
-import { OpenAIError } from '../error';
+import type { CompletionUsage } from '../resources/completions';
+import type { ChatCompletionToolRunnerParams } from './ChatCompletionRunner';
+import type { ChatCompletionStreamingToolRunnerParams } from './ChatCompletionStreamingRunner';
+import { isAssistantMessage, isToolMessage } from './chatCompletionUtils';
+import { BaseEvents, EventStream } from './EventStream';
import {
- type RunnableFunction,
isRunnableFunctionWithParse,
type BaseFunctionsArgs,
+ type RunnableFunction,
type RunnableToolFunction,
} from './RunnableFunction';
-import type { ChatCompletionToolRunnerParams } from './ChatCompletionRunner';
-import type { ChatCompletionStreamingToolRunnerParams } from './ChatCompletionStreamingRunner';
-import { isAssistantMessage, isToolMessage } from './chatCompletionUtils';
-import { BaseEvents, EventStream } from './EventStream';
-import type { ParsedChatCompletion } from '../resources/chat/completions';
-import type OpenAI from '../index';
-import { isAutoParsableTool, parseChatCompletion } from '../lib/parser';
-import type { RequestOptions } from '../internal/request-options';
const DEFAULT_MAX_CHAT_COMPLETIONS = 10;
export interface RunnerOptions extends RequestOptions {
@@ -121,11 +121,11 @@ export class AbstractChatCompletionRunner<
return this.#getFinalMessage();
}
- #getFinalFunctionToolCall(): ChatCompletionMessageToolCall.Function | undefined {
+ #getFinalFunctionToolCall(): ChatCompletionMessageFunctionToolCall.Function | undefined {
for (let i = this.messages.length - 1; i >= 0; i--) {
const message = this.messages[i];
if (isAssistantMessage(message) && message?.tool_calls?.length) {
- return message.tool_calls.at(-1)?.function;
+ return message.tool_calls.filter((x) => x.type === 'function').at(-1)?.function;
}
}
@@ -136,7 +136,7 @@ export class AbstractChatCompletionRunner<
* @returns a promise that resolves with the content of the final FunctionCall, or rejects
* if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage.
*/
- async finalFunctionToolCall(): Promise {
+ async finalFunctionToolCall(): Promise {
await this.done();
return this.#getFinalFunctionToolCall();
}
@@ -260,7 +260,8 @@ export class AbstractChatCompletionRunner<
) {
const role = 'tool' as const;
const { tool_choice = 'auto', stream, ...restParams } = params;
- const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice?.function?.name;
+ const singleFunctionToCall =
+ typeof tool_choice !== 'string' && tool_choice.type === 'function' && tool_choice?.function?.name;
const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {};
// TODO(someday): clean this logic up
@@ -390,13 +391,13 @@ export class AbstractChatCompletionRunner<
}
export interface AbstractChatCompletionRunnerEvents extends BaseEvents {
- functionToolCall: (functionCall: ChatCompletionMessageToolCall.Function) => void;
+ functionToolCall: (functionCall: ChatCompletionMessageFunctionToolCall.Function) => void;
message: (message: ChatCompletionMessageParam) => void;
chatCompletion: (completion: ChatCompletion) => void;
finalContent: (contentSnapshot: string) => void;
finalMessage: (message: ChatCompletionMessageParam) => void;
finalChatCompletion: (completion: ChatCompletion) => void;
- finalFunctionToolCall: (functionCall: ChatCompletionMessageToolCall.Function) => void;
+ finalFunctionToolCall: (functionCall: ChatCompletionMessageFunctionToolCall.Function) => void;
functionToolCallResult: (content: string) => void;
finalFunctionToolCallResult: (content: string) => void;
totalUsage: (usage: CompletionUsage) => void;
diff --git a/src/lib/ChatCompletionStream.ts b/src/lib/ChatCompletionStream.ts
index 5036d4f60..6dc5bc6c4 100644
--- a/src/lib/ChatCompletionStream.ts
+++ b/src/lib/ChatCompletionStream.ts
@@ -1,36 +1,37 @@
+import { partialParse } from '../_vendor/partial-json-parser/parser';
import {
- OpenAIError,
APIUserAbortError,
- LengthFinishReasonError,
ContentFilterFinishReasonError,
+ LengthFinishReasonError,
+ OpenAIError,
} from '../error';
+import OpenAI from '../index';
+import { RequestOptions } from '../internal/request-options';
+import { type ReadableStream } from '../internal/shim-types';
+import {
+ AutoParseableResponseFormat,
+ hasAutoParseableInput,
+ isAutoParsableResponseFormat,
+ isAutoParsableTool,
+ isChatCompletionFunctionTool,
+ maybeParseChatCompletion,
+ shouldParseToolCall,
+} from '../lib/parser';
+import { ChatCompletionFunctionTool, ParsedChatCompletion } from '../resources/chat/completions';
import {
ChatCompletionTokenLogprob,
type ChatCompletion,
type ChatCompletionChunk,
type ChatCompletionCreateParams,
- type ChatCompletionCreateParamsStreaming,
type ChatCompletionCreateParamsBase,
+ type ChatCompletionCreateParamsStreaming,
type ChatCompletionRole,
} from '../resources/chat/completions/completions';
+import { Stream } from '../streaming';
import {
AbstractChatCompletionRunner,
type AbstractChatCompletionRunnerEvents,
} from './AbstractChatCompletionRunner';
-import { type ReadableStream } from '../internal/shim-types';
-import { Stream } from '../streaming';
-import OpenAI from '../index';
-import { ParsedChatCompletion } from '../resources/chat/completions';
-import {
- AutoParseableResponseFormat,
- hasAutoParseableInput,
- isAutoParsableResponseFormat,
- isAutoParsableTool,
- maybeParseChatCompletion,
- shouldParseToolCall,
-} from '../lib/parser';
-import { partialParse } from '../_vendor/partial-json-parser/parser';
-import { RequestOptions } from '../internal/request-options';
export interface ContentDeltaEvent {
delta: string;
@@ -303,8 +304,8 @@ export class ChatCompletionStream
if (toolCallSnapshot.type === 'function') {
const inputTool = this.#params?.tools?.find(
- (tool) => tool.type === 'function' && tool.function.name === toolCallSnapshot.function.name,
- );
+ (tool) => isChatCompletionFunctionTool(tool) && tool.function.name === toolCallSnapshot.function.name,
+ ) as ChatCompletionFunctionTool | undefined; // TS doesn't narrow based on isChatCompletionTool
this._emit('tool_calls.function.arguments.done', {
name: toolCallSnapshot.function.name,
diff --git a/src/lib/parser.ts b/src/lib/parser.ts
index b396767fd..78e116073 100644
--- a/src/lib/parser.ts
+++ b/src/lib/parser.ts
@@ -1,10 +1,11 @@
+import { ContentFilterFinishReasonError, LengthFinishReasonError, OpenAIError } from '../error';
import {
ChatCompletion,
ChatCompletionCreateParams,
- ChatCompletionMessageToolCall,
- ChatCompletionTool,
-} from '../resources/chat/completions';
-import {
+ ChatCompletionCreateParamsBase,
+ ChatCompletionFunctionTool,
+ ChatCompletionMessage,
+ ChatCompletionMessageFunctionToolCall,
ChatCompletionStreamingToolRunnerParams,
ChatCompletionStreamParams,
ChatCompletionToolRunnerParams,
@@ -12,9 +13,8 @@ import {
ParsedChoice,
ParsedFunctionToolCall,
} from '../resources/chat/completions';
-import { ResponseFormatJSONSchema } from '../resources/shared';
-import { ContentFilterFinishReasonError, LengthFinishReasonError, OpenAIError } from '../error';
import { type ResponseFormatTextJSONSchemaConfig } from '../resources/responses/responses';
+import { ResponseFormatJSONSchema } from '../resources/shared';
type AnyChatCompletionCreateParams =
| ChatCompletionCreateParams
@@ -22,6 +22,14 @@ type AnyChatCompletionCreateParams =
| ChatCompletionStreamingToolRunnerParams
| ChatCompletionStreamParams;
+type Unpacked = T extends (infer U)[] ? U : T;
+
+type ToolCall = Unpacked;
+
+export function isChatCompletionFunctionTool(tool: ToolCall): tool is ChatCompletionFunctionTool {
+ return tool !== undefined && 'function' in tool && tool.function !== undefined;
+}
+
export type ExtractParsedContentFromParams =
Params['response_format'] extends AutoParseableResponseFormat ? P : null;
@@ -94,7 +102,7 @@ type ToolOptions = {
export type AutoParseableTool<
OptionsT extends ToolOptions,
HasFunction = OptionsT['function'] extends Function ? true : false,
-> = ChatCompletionTool & {
+> = ChatCompletionFunctionTool & {
__arguments: OptionsT['arguments']; // type-level only
__name: OptionsT['name']; // type-level only
__hasFunction: HasFunction; // type-level only
@@ -105,7 +113,7 @@ export type AutoParseableTool<
};
export function makeParseableTool(
- tool: ChatCompletionTool,
+ tool: ChatCompletionFunctionTool,
{
parser,
callback,
@@ -145,19 +153,23 @@ export function maybeParseChatCompletion<
if (!params || !hasAutoParseableInput(params)) {
return {
...completion,
- choices: completion.choices.map((choice) => ({
- ...choice,
- message: {
- ...choice.message,
- parsed: null,
- ...(choice.message.tool_calls ?
- {
- tool_calls: choice.message.tool_calls,
- }
- : undefined),
- },
- })),
- };
+ choices: completion.choices.map((choice) => {
+ assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
+
+ return {
+ ...choice,
+ message: {
+ ...choice.message,
+ parsed: null,
+ ...(choice.message.tool_calls ?
+ {
+ tool_calls: choice.message.tool_calls,
+ }
+ : undefined),
+ },
+ };
+ }),
+ } as ParsedChatCompletion;
}
return parseChatCompletion(completion, params);
@@ -176,6 +188,8 @@ export function parseChatCompletion<
throw new ContentFilterFinishReasonError();
}
+ assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
+
return {
...choice,
message: {
@@ -191,7 +205,7 @@ export function parseChatCompletion<
parseResponseFormat(params, choice.message.content)
: null,
},
- };
+ } as ParsedChoice;
});
return { ...completion, choices };
@@ -220,9 +234,12 @@ function parseResponseFormat<
function parseToolCall(
params: Params,
- toolCall: ChatCompletionMessageToolCall,
+ toolCall: ChatCompletionMessageFunctionToolCall,
): ParsedFunctionToolCall {
- const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
+ const inputTool = params.tools?.find(
+ (inputTool) =>
+ isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name,
+ ) as ChatCompletionFunctionTool | undefined; // TS doesn't narrow based on isChatCompletionTool
return {
...toolCall,
function: {
@@ -237,14 +254,20 @@ function parseToolCall(
export function shouldParseToolCall(
params: ChatCompletionCreateParams | null | undefined,
- toolCall: ChatCompletionMessageToolCall,
+ toolCall: ChatCompletionMessageFunctionToolCall,
): boolean {
- if (!params) {
+ if (!params || !('tools' in params) || !params.tools) {
return false;
}
- const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
- return isAutoParsableTool(inputTool) || inputTool?.function.strict || false;
+ const inputTool = params.tools?.find(
+ (inputTool) =>
+ isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name,
+ );
+ return (
+ isChatCompletionFunctionTool(inputTool) &&
+ (isAutoParsableTool(inputTool) || inputTool?.function.strict || false)
+ );
}
export function hasAutoParseableInput(params: AnyChatCompletionCreateParams): boolean {
@@ -259,7 +282,19 @@ export function hasAutoParseableInput(params: AnyChatCompletionCreateParams): bo
);
}
-export function validateInputTools(tools: ChatCompletionTool[] | undefined) {
+export function assertToolCallsAreChatCompletionFunctionToolCalls(
+ toolCalls: ChatCompletionMessage['tool_calls'],
+): asserts toolCalls is ChatCompletionMessageFunctionToolCall[] {
+ for (const toolCall of toolCalls || []) {
+ if (toolCall.type !== 'function') {
+ throw new OpenAIError(
+ `Currently only \`function\` tool calls are supported; Received \`${toolCall.type}\``,
+ );
+ }
+ }
+}
+
+export function validateInputTools(tools: ChatCompletionCreateParamsBase['tools']) {
for (const tool of tools ?? []) {
if (tool.type !== 'function') {
throw new OpenAIError(
diff --git a/src/resources/beta/assistants.ts b/src/resources/beta/assistants.ts
index 663ea9aff..14ec22e4d 100644
--- a/src/resources/beta/assistants.ts
+++ b/src/resources/beta/assistants.ts
@@ -1158,12 +1158,11 @@ export interface AssistantCreateParams {
name?: string | null;
/**
- * **o-series models only**
- *
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- * result in faster responses and fewer tokens used on reasoning in a response.
+ * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+ * effort can result in faster responses and fewer tokens used on reasoning in a
+ * response.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
@@ -1363,6 +1362,12 @@ export interface AssistantUpdateParams {
*/
model?:
| (string & {})
+ | 'gpt-5'
+ | 'gpt-5-mini'
+ | 'gpt-5-nano'
+ | 'gpt-5-2025-08-07'
+ | 'gpt-5-mini-2025-08-07'
+ | 'gpt-5-nano-2025-08-07'
| 'gpt-4.1'
| 'gpt-4.1-mini'
| 'gpt-4.1-nano'
@@ -1406,12 +1411,11 @@ export interface AssistantUpdateParams {
name?: string | null;
/**
- * **o-series models only**
- *
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- * result in faster responses and fewer tokens used on reasoning in a response.
+ * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+ * effort can result in faster responses and fewer tokens used on reasoning in a
+ * response.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
diff --git a/src/resources/beta/threads/runs/runs.ts b/src/resources/beta/threads/runs/runs.ts
index 506c6897a..bdd9fc52a 100644
--- a/src/resources/beta/threads/runs/runs.ts
+++ b/src/resources/beta/threads/runs/runs.ts
@@ -723,12 +723,11 @@ export interface RunCreateParamsBase {
parallel_tool_calls?: boolean;
/**
- * Body param: **o-series models only**
- *
- * Constrains effort on reasoning for
+ * Body param: Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- * result in faster responses and fewer tokens used on reasoning in a response.
+ * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+ * effort can result in faster responses and fewer tokens used on reasoning in a
+ * response.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
diff --git a/src/resources/chat/chat.ts b/src/resources/chat/chat.ts
index 5bf388470..e770d9ecd 100644
--- a/src/resources/chat/chat.ts
+++ b/src/resources/chat/chat.ts
@@ -5,6 +5,8 @@ import * as Shared from '../shared';
import * as CompletionsAPI from './completions/completions';
import {
ChatCompletion,
+ ChatCompletionAllowedToolChoice,
+ ChatCompletionAllowedTools,
ChatCompletionAssistantMessageParam,
ChatCompletionAudio,
ChatCompletionAudioParam,
@@ -17,16 +19,21 @@ import {
ChatCompletionCreateParams,
ChatCompletionCreateParamsNonStreaming,
ChatCompletionCreateParamsStreaming,
+ ChatCompletionCustomTool,
ChatCompletionDeleted,
ChatCompletionDeveloperMessageParam,
ChatCompletionFunctionCallOption,
ChatCompletionFunctionMessageParam,
+ ChatCompletionFunctionTool,
ChatCompletionListParams,
ChatCompletionMessage,
+ ChatCompletionMessageCustomToolCall,
+ ChatCompletionMessageFunctionToolCall,
ChatCompletionMessageParam,
ChatCompletionMessageToolCall,
ChatCompletionModality,
ChatCompletionNamedToolChoice,
+ ChatCompletionNamedToolChoiceCustom,
ChatCompletionPredictionContent,
ChatCompletionReasoningEffort,
ChatCompletionRole,
@@ -57,6 +64,7 @@ export declare namespace Chat {
export {
Completions as Completions,
type ChatCompletion as ChatCompletion,
+ type ChatCompletionAllowedToolChoice as ChatCompletionAllowedToolChoice,
type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam,
type ChatCompletionAudio as ChatCompletionAudio,
type ChatCompletionAudioParam as ChatCompletionAudioParam,
@@ -66,15 +74,20 @@ export declare namespace Chat {
type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio,
type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal,
type ChatCompletionContentPartText as ChatCompletionContentPartText,
+ type ChatCompletionCustomTool as ChatCompletionCustomTool,
type ChatCompletionDeleted as ChatCompletionDeleted,
type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam,
type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption,
type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam,
+ type ChatCompletionFunctionTool as ChatCompletionFunctionTool,
type ChatCompletionMessage as ChatCompletionMessage,
+ type ChatCompletionMessageCustomToolCall as ChatCompletionMessageCustomToolCall,
+ type ChatCompletionMessageFunctionToolCall as ChatCompletionMessageFunctionToolCall,
type ChatCompletionMessageParam as ChatCompletionMessageParam,
type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall,
type ChatCompletionModality as ChatCompletionModality,
type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice,
+ type ChatCompletionNamedToolChoiceCustom as ChatCompletionNamedToolChoiceCustom,
type ChatCompletionPredictionContent as ChatCompletionPredictionContent,
type ChatCompletionRole as ChatCompletionRole,
type ChatCompletionStoreMessage as ChatCompletionStoreMessage,
@@ -85,6 +98,7 @@ export declare namespace Chat {
type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption,
type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam,
type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam,
+ type ChatCompletionAllowedTools as ChatCompletionAllowedTools,
type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort,
type ChatCompletionsPage as ChatCompletionsPage,
type ChatCompletionCreateParams as ChatCompletionCreateParams,
diff --git a/src/resources/chat/completions/completions.ts b/src/resources/chat/completions/completions.ts
index 494e8a59c..c5047ddb2 100644
--- a/src/resources/chat/completions/completions.ts
+++ b/src/resources/chat/completions/completions.ts
@@ -203,11 +203,11 @@ export class Completions extends APIResource {
}
}
-export interface ParsedFunction extends ChatCompletionMessageToolCall.Function {
+export interface ParsedFunction extends ChatCompletionMessageFunctionToolCall.Function {
parsed_arguments?: unknown;
}
-export interface ParsedFunctionToolCall extends ChatCompletionMessageToolCall {
+export interface ParsedFunctionToolCall extends ChatCompletionMessageFunctionToolCall {
function: ParsedFunction;
}
@@ -353,6 +353,21 @@ export namespace ChatCompletion {
}
}
+/**
+ * Constrains the tools available to the model to a pre-defined set.
+ */
+export interface ChatCompletionAllowedToolChoice {
+ /**
+ * Constrains the tools available to the model to a pre-defined set.
+ */
+ allowed_tools: ChatCompletionAllowedTools;
+
+ /**
+ * Allowed tool configuration type. Always `allowed_tools`.
+ */
+ type: 'allowed_tools';
+}
+
/**
* Messages sent by the model in response to user messages.
*/
@@ -805,6 +820,87 @@ export interface ChatCompletionContentPartText {
type: 'text';
}
+/**
+ * A custom tool that processes input using a specified format.
+ */
+export interface ChatCompletionCustomTool {
+ /**
+ * Properties of the custom tool.
+ */
+ custom: ChatCompletionCustomTool.Custom;
+
+ /**
+ * The type of the custom tool. Always `custom`.
+ */
+ type: 'custom';
+}
+
+export namespace ChatCompletionCustomTool {
+ /**
+ * Properties of the custom tool.
+ */
+ export interface Custom {
+ /**
+ * The name of the custom tool, used to identify it in tool calls.
+ */
+ name: string;
+
+ /**
+ * Optional description of the custom tool, used to provide more context.
+ */
+ description?: string;
+
+ /**
+ * The input format for the custom tool. Default is unconstrained text.
+ */
+ format?: Custom.Text | Custom.Grammar;
+ }
+
+ export namespace Custom {
+ /**
+ * Unconstrained free-form text.
+ */
+ export interface Text {
+ /**
+ * Unconstrained text format. Always `text`.
+ */
+ type: 'text';
+ }
+
+ /**
+ * A grammar defined by the user.
+ */
+ export interface Grammar {
+ /**
+ * Your chosen grammar.
+ */
+ grammar: Grammar.Grammar;
+
+ /**
+ * Grammar format. Always `grammar`.
+ */
+ type: 'grammar';
+ }
+
+ export namespace Grammar {
+ /**
+ * Your chosen grammar.
+ */
+ export interface Grammar {
+ /**
+ * The grammar definition.
+ */
+ definition: string;
+
+ /**
+ * The syntax of the grammar definition. One of `lark` or `regex`.
+ */
+ syntax: 'lark' | 'regex';
+ }
+ }
+ }
+}
+
export interface ChatCompletionDeleted {
/**
* The ID of the chat completion that was deleted.
@@ -876,6 +972,18 @@ export interface ChatCompletionFunctionMessageParam {
role: 'function';
}
+/**
+ * A function tool that can be used to generate a response.
+ */
+export interface ChatCompletionFunctionTool {
+ function: Shared.FunctionDefinition;
+
+ /**
+ * The type of the tool. Currently, only `function` is supported.
+ */
+ type: 'function';
+}
+
/**
* A chat completion message generated by the model.
*/
@@ -984,19 +1092,46 @@ export namespace ChatCompletionMessage {
}
/**
- * Developer-provided instructions that the model should follow, regardless of
- * messages sent by the user. With o1 models and newer, `developer` messages
- * replace the previous `system` messages.
+ * A call to a custom tool created by the model.
*/
-export type ChatCompletionMessageParam =
- | ChatCompletionDeveloperMessageParam
- | ChatCompletionSystemMessageParam
- | ChatCompletionUserMessageParam
- | ChatCompletionAssistantMessageParam
- | ChatCompletionToolMessageParam
- | ChatCompletionFunctionMessageParam;
+export interface ChatCompletionMessageCustomToolCall {
+ /**
+ * The ID of the tool call.
+ */
+ id: string;
+
+ /**
+ * The custom tool that the model called.
+ */
+ custom: ChatCompletionMessageCustomToolCall.Custom;
-export interface ChatCompletionMessageToolCall {
+ /**
+ * The type of the tool. Always `custom`.
+ */
+ type: 'custom';
+}
+
+export namespace ChatCompletionMessageCustomToolCall {
+ /**
+ * The custom tool that the model called.
+ */
+ export interface Custom {
+ /**
+ * The input for the custom tool call generated by the model.
+ */
+ input: string;
+
+ /**
+ * The name of the custom tool to call.
+ */
+ name: string;
+ }
+}
+
+/**
+ * A call to a function tool created by the model.
+ */
+export interface ChatCompletionMessageFunctionToolCall {
/**
* The ID of the tool call.
*/
@@ -1005,7 +1140,7 @@ export interface ChatCompletionMessageToolCall {
/**
* The function that the model called.
*/
- function: ChatCompletionMessageToolCall.Function;
+ function: ChatCompletionMessageFunctionToolCall.Function;
/**
* The type of the tool. Currently, only `function` is supported.
@@ -1013,7 +1148,7 @@ export interface ChatCompletionMessageToolCall {
type: 'function';
}
-export namespace ChatCompletionMessageToolCall {
+export namespace ChatCompletionMessageFunctionToolCall {
/**
* The function that the model called.
*/
@@ -1033,6 +1168,26 @@ export namespace ChatCompletionMessageToolCall {
}
}
+/**
+ * Developer-provided instructions that the model should follow, regardless of
+ * messages sent by the user. With o1 models and newer, `developer` messages
+ * replace the previous `system` messages.
+ */
+export type ChatCompletionMessageParam =
+ | ChatCompletionDeveloperMessageParam
+ | ChatCompletionSystemMessageParam
+ | ChatCompletionUserMessageParam
+ | ChatCompletionAssistantMessageParam
+ | ChatCompletionToolMessageParam
+ | ChatCompletionFunctionMessageParam;
+
+/**
+ * A call to a function tool created by the model.
+ */
+export type ChatCompletionMessageToolCall =
+ | ChatCompletionMessageFunctionToolCall
+ | ChatCompletionMessageCustomToolCall;
+
export type ChatCompletionModality = 'text' | 'audio';
/**
@@ -1043,7 +1198,7 @@ export interface ChatCompletionNamedToolChoice {
function: ChatCompletionNamedToolChoice.Function;
/**
- * The type of the tool. Currently, only `function` is supported.
+ * For function calling, the type is always `function`.
*/
type: 'function';
}
@@ -1057,6 +1212,28 @@ export namespace ChatCompletionNamedToolChoice {
}
}
+/**
+ * Specifies a tool the model should use. Use to force the model to call a specific
+ * custom tool.
+ */
+export interface ChatCompletionNamedToolChoiceCustom {
+ custom: ChatCompletionNamedToolChoiceCustom.Custom;
+
+ /**
+ * For custom tool calling, the type is always `custom`.
+ */
+ type: 'custom';
+}
+
+export namespace ChatCompletionNamedToolChoiceCustom {
+ export interface Custom {
+ /**
+ * The name of the custom tool to call.
+ */
+ name: string;
+ }
+}
+
/**
* Static predicted output content, such as the content of a text file that is
* being regenerated.
@@ -1101,6 +1278,16 @@ export interface ChatCompletionStoreMessage extends ChatCompletionMessage {
* Options for streaming response. Only set this when you set `stream: true`.
*/
export interface ChatCompletionStreamOptions {
+ /**
+ * When true, stream obfuscation will be enabled. Stream obfuscation adds random
+ * characters to an `obfuscation` field on streaming delta events to normalize
+ * payload sizes as a mitigation to certain side-channel attacks. These obfuscation
+ * fields are included by default, but add a small amount of overhead to the data
+ * stream. You can set `include_obfuscation` to false to optimize for bandwidth if
+ * you trust the network links between your application and the OpenAI API.
+ */
+ include_obfuscation?: boolean;
+
/**
* If set, an additional chunk will be streamed before the `data: [DONE]` message.
* The `usage` field on this chunk shows the token usage statistics for the entire
@@ -1189,14 +1376,10 @@ export namespace ChatCompletionTokenLogprob {
}
}
-export interface ChatCompletionTool {
- function: Shared.FunctionDefinition;
-
- /**
- * The type of the tool. Currently, only `function` is supported.
- */
- type: 'function';
-}
+/**
+ * A function tool that can be used to generate a response.
+ */
+export type ChatCompletionTool = ChatCompletionFunctionTool | ChatCompletionCustomTool;
/**
* Controls which (if any) tool is called by the model. `none` means the model will
@@ -1209,7 +1392,13 @@ export interface ChatCompletionTool {
* `none` is the default when no tools are present. `auto` is the default if tools
* are present.
*/
-export type ChatCompletionToolChoiceOption = 'none' | 'auto' | 'required' | ChatCompletionNamedToolChoice;
+export type ChatCompletionToolChoiceOption =
+ | 'none'
+ | 'auto'
+ | 'required'
+ | ChatCompletionAllowedToolChoice
+ | ChatCompletionNamedToolChoice
+ | ChatCompletionNamedToolChoiceCustom;
export interface ChatCompletionToolMessageParam {
/**
@@ -1250,6 +1439,35 @@ export interface ChatCompletionUserMessageParam {
name?: string;
}
+/**
+ * Constrains the tools available to the model to a pre-defined set.
+ */
+export interface ChatCompletionAllowedTools {
+ /**
+ * Constrains the tools available to the model to a pre-defined set.
+ *
+ * `auto` allows the model to pick from among the allowed tools and generate a
+ * message.
+ *
+ * `required` requires the model to call one or more of the allowed tools.
+ */
+ mode: 'auto' | 'required';
+
+ /**
+ * A list of tool definitions that the model should be allowed to call.
+ *
+ * For the Chat Completions API, the list of tool definitions might look like:
+ *
+ * ```json
+ * [
+ * { "type": "function", "function": { "name": "get_weather" } },
+ * { "type": "function", "function": { "name": "get_time" } }
+ * ]
+ * ```
+ */
+ tools: Array<{ [key: string]: unknown }>;
+}
+
export type ChatCompletionReasoningEffort = Shared.ReasoningEffort | null;
export type ChatCompletionCreateParams =
@@ -1411,12 +1629,11 @@ export interface ChatCompletionCreateParamsBase {
prompt_cache_key?: string;
/**
- * **o-series models only**
- *
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- * result in faster responses and fewer tokens used on reasoning in a response.
+ * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+ * effort can result in faster responses and fewer tokens used on reasoning in a
+ * response.
*/
reasoning_effort?: Shared.ReasoningEffort | null;
@@ -1532,9 +1749,9 @@ export interface ChatCompletionCreateParamsBase {
tool_choice?: ChatCompletionToolChoiceOption;
/**
- * A list of tools the model may call. Currently, only functions are supported as a
- * tool. Use this to provide a list of functions the model may generate JSON inputs
- * for. A max of 128 functions are supported.
+ * A list of tools the model may call. You can provide either
+ * [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
+ * or [function tools](https://platform.openai.com/docs/guides/function-calling).
*/
tools?: Array;
@@ -1564,6 +1781,13 @@ export interface ChatCompletionCreateParamsBase {
*/
user?: string;
+ /**
+ * Constrains the verbosity of the model's response. Lower values will result in
+ * more concise responses, while higher values will result in more verbose
+ * responses. Currently supported values are `low`, `medium`, and `high`.
+ */
+ verbosity?: 'low' | 'medium' | 'high' | null;
+
/**
* This tool searches the web for relevant results to use in a response. Learn more
* about the
@@ -1736,6 +1960,7 @@ Completions.Messages = Messages;
export declare namespace Completions {
export {
type ChatCompletion as ChatCompletion,
+ type ChatCompletionAllowedToolChoice as ChatCompletionAllowedToolChoice,
type ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam,
type ChatCompletionAudio as ChatCompletionAudio,
type ChatCompletionAudioParam as ChatCompletionAudioParam,
@@ -1745,15 +1970,20 @@ export declare namespace Completions {
type ChatCompletionContentPartInputAudio as ChatCompletionContentPartInputAudio,
type ChatCompletionContentPartRefusal as ChatCompletionContentPartRefusal,
type ChatCompletionContentPartText as ChatCompletionContentPartText,
+ type ChatCompletionCustomTool as ChatCompletionCustomTool,
type ChatCompletionDeleted as ChatCompletionDeleted,
type ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam,
type ChatCompletionFunctionCallOption as ChatCompletionFunctionCallOption,
type ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam,
+ type ChatCompletionFunctionTool as ChatCompletionFunctionTool,
type ChatCompletionMessage as ChatCompletionMessage,
+ type ChatCompletionMessageCustomToolCall as ChatCompletionMessageCustomToolCall,
+ type ChatCompletionMessageFunctionToolCall as ChatCompletionMessageFunctionToolCall,
type ChatCompletionMessageParam as ChatCompletionMessageParam,
type ChatCompletionMessageToolCall as ChatCompletionMessageToolCall,
type ChatCompletionModality as ChatCompletionModality,
type ChatCompletionNamedToolChoice as ChatCompletionNamedToolChoice,
+ type ChatCompletionNamedToolChoiceCustom as ChatCompletionNamedToolChoiceCustom,
type ChatCompletionPredictionContent as ChatCompletionPredictionContent,
type ChatCompletionRole as ChatCompletionRole,
type ChatCompletionStoreMessage as ChatCompletionStoreMessage,
@@ -1764,6 +1994,7 @@ export declare namespace Completions {
type ChatCompletionToolChoiceOption as ChatCompletionToolChoiceOption,
type ChatCompletionToolMessageParam as ChatCompletionToolMessageParam,
type ChatCompletionUserMessageParam as ChatCompletionUserMessageParam,
+ type ChatCompletionAllowedTools as ChatCompletionAllowedTools,
type ChatCompletionReasoningEffort as ChatCompletionReasoningEffort,
type ChatCompletionsPage as ChatCompletionsPage,
type ChatCompletionCreateParams as ChatCompletionCreateParams,
diff --git a/src/resources/chat/completions/index.ts b/src/resources/chat/completions/index.ts
index ce1897258..6a379c1d6 100644
--- a/src/resources/chat/completions/index.ts
+++ b/src/resources/chat/completions/index.ts
@@ -3,6 +3,7 @@
export {
Completions,
type ChatCompletion,
+ type ChatCompletionAllowedToolChoice,
type ChatCompletionAssistantMessageParam,
type ChatCompletionAudio,
type ChatCompletionAudioParam,
@@ -12,15 +13,20 @@ export {
type ChatCompletionContentPartInputAudio,
type ChatCompletionContentPartRefusal,
type ChatCompletionContentPartText,
+ type ChatCompletionCustomTool,
type ChatCompletionDeleted,
type ChatCompletionDeveloperMessageParam,
type ChatCompletionFunctionCallOption,
type ChatCompletionFunctionMessageParam,
+ type ChatCompletionFunctionTool,
type ChatCompletionMessage,
+ type ChatCompletionMessageCustomToolCall,
+ type ChatCompletionMessageFunctionToolCall,
type ChatCompletionMessageParam,
type ChatCompletionMessageToolCall,
type ChatCompletionModality,
type ChatCompletionNamedToolChoice,
+ type ChatCompletionNamedToolChoiceCustom,
type ChatCompletionPredictionContent,
type ChatCompletionRole,
type ChatCompletionStoreMessage,
@@ -31,6 +37,7 @@ export {
type ChatCompletionToolChoiceOption,
type ChatCompletionToolMessageParam,
type ChatCompletionUserMessageParam,
+ type ChatCompletionAllowedTools,
type ChatCompletionCreateParams,
type ChatCompletionCreateParamsNonStreaming,
type ChatCompletionCreateParamsStreaming,
diff --git a/src/resources/chat/index.ts b/src/resources/chat/index.ts
index 3e997dd86..3004ebb08 100644
--- a/src/resources/chat/index.ts
+++ b/src/resources/chat/index.ts
@@ -4,6 +4,7 @@ export { Chat } from './chat';
export {
Completions,
type ChatCompletion,
+ type ChatCompletionAllowedToolChoice,
type ChatCompletionAssistantMessageParam,
type ChatCompletionAudio,
type ChatCompletionAudioParam,
@@ -13,15 +14,20 @@ export {
type ChatCompletionContentPartInputAudio,
type ChatCompletionContentPartRefusal,
type ChatCompletionContentPartText,
+ type ChatCompletionCustomTool,
type ChatCompletionDeleted,
type ChatCompletionDeveloperMessageParam,
type ChatCompletionFunctionCallOption,
type ChatCompletionFunctionMessageParam,
+ type ChatCompletionFunctionTool,
type ChatCompletionMessage,
+ type ChatCompletionMessageCustomToolCall,
+ type ChatCompletionMessageFunctionToolCall,
type ChatCompletionMessageParam,
type ChatCompletionMessageToolCall,
type ChatCompletionModality,
type ChatCompletionNamedToolChoice,
+ type ChatCompletionNamedToolChoiceCustom,
type ChatCompletionPredictionContent,
type ChatCompletionRole,
type ChatCompletionStoreMessage,
@@ -32,6 +38,7 @@ export {
type ChatCompletionToolChoiceOption,
type ChatCompletionToolMessageParam,
type ChatCompletionUserMessageParam,
+ type ChatCompletionAllowedTools,
type ChatCompletionCreateParams,
type ChatCompletionCreateParamsNonStreaming,
type ChatCompletionCreateParamsStreaming,
diff --git a/src/resources/evals/runs/runs.ts b/src/resources/evals/runs/runs.ts
index dd3a94cf8..702c4a13d 100644
--- a/src/resources/evals/runs/runs.ts
+++ b/src/resources/evals/runs/runs.ts
@@ -316,7 +316,7 @@ export namespace CreateEvalCompletionsRunDataSource {
* tool. Use this to provide a list of functions the model may generate JSON inputs
* for. A max of 128 functions are supported.
*/
- tools?: Array;
+ tools?: Array;
/**
* An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
diff --git a/src/resources/responses/responses.ts b/src/resources/responses/responses.ts
index 31334a5af..a63eb06d5 100644
--- a/src/resources/responses/responses.ts
+++ b/src/resources/responses/responses.ts
@@ -45,7 +45,8 @@ export type ParsedResponseOutputItem =
| ResponseOutputItem.LocalShellCall
| ResponseOutputItem.McpCall
| ResponseOutputItem.McpListTools
- | ResponseOutputItem.McpApprovalRequest;
+ | ResponseOutputItem.McpApprovalRequest
+ | ResponseCustomToolCall;
export interface ParsedResponse extends Response {
output: Array>;
@@ -228,6 +229,32 @@ export interface ComputerTool {
type: 'computer_use_preview';
}
+/**
+ * A custom tool that processes input using a specified format. Learn more about
+ * [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools).
+ */
+export interface CustomTool {
+ /**
+ * The name of the custom tool, used to identify it in tool calls.
+ */
+ name: string;
+
+ /**
+ * The type of the custom tool. Always `custom`.
+ */
+ type: 'custom';
+
+ /**
+ * Optional description of the custom tool, used to provide more context.
+ */
+ description?: string;
+
+ /**
+ * The input format for the custom tool. Default is unconstrained text.
+ */
+ format?: Shared.CustomToolInputFormat;
+}
+
/**
* A message input to the model with a role indicating instruction following
* hierarchy. Instructions given with the `developer` or `system` role take
@@ -424,7 +451,13 @@ export interface Response {
* response. See the `tools` parameter to see how to specify which tools the model
* can call.
*/
- tool_choice: ToolChoiceOptions | ToolChoiceTypes | ToolChoiceFunction;
+ tool_choice:
+ | ToolChoiceOptions
+ | ToolChoiceAllowed
+ | ToolChoiceTypes
+ | ToolChoiceFunction
+ | ToolChoiceMcp
+ | ToolChoiceCustom;
/**
* An array of tools the model may call while generating a response. You can
@@ -439,8 +472,10 @@ export interface Response {
* Learn more about
* [built-in tools](https://platform.openai.com/docs/guides/tools).
* - **Function calls (custom tools)**: Functions that are defined by you, enabling
- * the model to call your own code. Learn more about
+ * the model to call your own code with strongly typed arguments and outputs.
+ * Learn more about
* [function calling](https://platform.openai.com/docs/guides/function-calling).
+ * You can also use custom tools to call your own code.
*/
tools: Array;
@@ -564,6 +599,13 @@ export interface Response {
* [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
*/
user?: string;
+
+ /**
+ * Constrains the verbosity of the model's response. Lower values will result in
+ * more concise responses, while higher values will result in more verbose
+ * responses. Currently supported values are `low`, `medium`, and `high`.
+ */
+ verbosity?: 'low' | 'medium' | 'high' | null;
}
export namespace Response {
@@ -1318,6 +1360,121 @@ export interface ResponseCreatedEvent {
type: 'response.created';
}
+/**
+ * A call to a custom tool created by the model.
+ */
+export interface ResponseCustomToolCall {
+ /**
+ * An identifier used to map this custom tool call to a tool call output.
+ */
+ call_id: string;
+
+ /**
+ * The input for the custom tool call generated by the model.
+ */
+ input: string;
+
+ /**
+ * The name of the custom tool being called.
+ */
+ name: string;
+
+ /**
+ * The type of the custom tool call. Always `custom_tool_call`.
+ */
+ type: 'custom_tool_call';
+
+ /**
+ * The unique ID of the custom tool call in the OpenAI platform.
+ */
+ id?: string;
+}
+
+/**
+ * Event representing a delta (partial update) to the input of a custom tool call.
+ */
+export interface ResponseCustomToolCallInputDeltaEvent {
+ /**
+ * The incremental input data (delta) for the custom tool call.
+ */
+ delta: string;
+
+ /**
+ * Unique identifier for the API item associated with this event.
+ */
+ item_id: string;
+
+ /**
+ * The index of the output this delta applies to.
+ */
+ output_index: number;
+
+ /**
+ * The sequence number of this event.
+ */
+ sequence_number: number;
+
+ /**
+ * The event type identifier.
+ */
+ type: 'response.custom_tool_call_input.delta';
+}
+
+/**
+ * Event indicating that input for a custom tool call is complete.
+ */
+export interface ResponseCustomToolCallInputDoneEvent {
+ /**
+ * The complete input data for the custom tool call.
+ */
+ input: string;
+
+ /**
+ * Unique identifier for the API item associated with this event.
+ */
+ item_id: string;
+
+ /**
+ * The index of the output this event applies to.
+ */
+ output_index: number;
+
+ /**
+ * The sequence number of this event.
+ */
+ sequence_number: number;
+
+ /**
+ * The event type identifier.
+ */
+ type: 'response.custom_tool_call_input.done';
+}
+
+/**
+ * The output of a custom tool call from your code, being sent back to the model.
+ */
+export interface ResponseCustomToolCallOutput {
+ /**
+ * The call ID, used to map this custom tool call output to a custom tool call.
+ */
+ call_id: string;
+
+ /**
+ * The output from the custom tool call generated by your code.
+ */
+ output: string;
+
+ /**
+ * The type of the custom tool call output. Always `custom_tool_call_output`.
+ */
+ type: 'custom_tool_call_output';
+
+ /**
+ * The unique ID of the custom tool call output in the OpenAI platform.
+ */
+ id?: string;
+}
+
/**
* An error object returned when the model fails to generate a Response.
*/
@@ -2102,6 +2259,8 @@ export type ResponseInputItem =
| ResponseInputItem.McpApprovalRequest
| ResponseInputItem.McpApprovalResponse
| ResponseInputItem.McpCall
+ | ResponseCustomToolCallOutput
+ | ResponseCustomToolCall
| ResponseInputItem.ItemReference;
export namespace ResponseInputItem {
@@ -3110,7 +3269,8 @@ export type ResponseOutputItem =
| ResponseOutputItem.LocalShellCall
| ResponseOutputItem.McpCall
| ResponseOutputItem.McpListTools
- | ResponseOutputItem.McpApprovalRequest;
+ | ResponseOutputItem.McpApprovalRequest
+ | ResponseCustomToolCall;
export namespace ResponseOutputItem {
/**
@@ -4117,7 +4277,9 @@ export type ResponseStreamEvent =
| ResponseMcpListToolsFailedEvent
| ResponseMcpListToolsInProgressEvent
| ResponseOutputTextAnnotationAddedEvent
- | ResponseQueuedEvent;
+ | ResponseQueuedEvent
+ | ResponseCustomToolCallInputDeltaEvent
+ | ResponseCustomToolCallInputDoneEvent;
/**
* Configuration options for a text response from the model. Can be plain text or
@@ -4441,7 +4603,8 @@ export type Tool =
| Tool.Mcp
| Tool.CodeInterpreter
| Tool.ImageGeneration
- | Tool.LocalShell;
+ | Tool.LocalShell
+ | CustomTool;
export namespace Tool {
/**
@@ -4664,6 +4827,56 @@ export namespace Tool {
}
}
+/**
+ * Constrains the tools available to the model to a pre-defined set.
+ */
+export interface ToolChoiceAllowed {
+ /**
+ * Constrains the tools available to the model to a pre-defined set.
+ *
+ * `auto` allows the model to pick from among the allowed tools and generate a
+ * message.
+ *
+ * `required` requires the model to call one or more of the allowed tools.
+ */
+ mode: 'auto' | 'required';
+
+ /**
+ * A list of tool definitions that the model should be allowed to call.
+ *
+ * For the Responses API, the list of tool definitions might look like:
+ *
+ * ```json
+ * [
+ * { "type": "function", "name": "get_weather" },
+ * { "type": "mcp", "server_label": "deepwiki" },
+ * { "type": "image_generation" }
+ * ]
+ * ```
+ */
+ tools: Array<{ [key: string]: unknown }>;
+
+ /**
+ * Allowed tool configuration type. Always `allowed_tools`.
+ */
+ type: 'allowed_tools';
+}
+
+/**
+ * Use this option to force the model to call a specific custom tool.
+ */
+export interface ToolChoiceCustom {
+ /**
+ * The name of the custom tool to call.
+ */
+ name: string;
+
+ /**
+ * For custom tool calling, the type is always `custom`.
+ */
+ type: 'custom';
+}
+
/**
* Use this option to force the model to call a specific function.
*/
@@ -4679,6 +4892,27 @@ export interface ToolChoiceFunction {
type: 'function';
}
+/**
+ * Use this option to force the model to call a specific tool on a remote MCP
+ * server.
+ */
+export interface ToolChoiceMcp {
+ /**
+ * The label of the MCP server to use.
+ */
+ server_label: string;
+
+ /**
+ * For MCP tools, the type is always `mcp`.
+ */
+ type: 'mcp';
+
+ /**
+ * The name of the tool to call on the server.
+ */
+ name?: string | null;
+}
+
/**
* Controls which (if any) tool is called by the model.
*
@@ -4930,6 +5164,11 @@ export interface ResponseCreateParamsBase {
*/
stream?: boolean | null;
+ /**
+ * Options for streaming responses. Only set this when you set `stream: true`.
+ */
+ stream_options?: ResponseCreateParams.StreamOptions | null;
+
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
* make the output more random, while lower values like 0.2 will make it more
@@ -4952,7 +5191,13 @@ export interface ResponseCreateParamsBase {
* response. See the `tools` parameter to see how to specify which tools the model
* can call.
*/
- tool_choice?: ToolChoiceOptions | ToolChoiceTypes | ToolChoiceFunction;
+ tool_choice?:
+ | ToolChoiceOptions
+ | ToolChoiceAllowed
+ | ToolChoiceTypes
+ | ToolChoiceFunction
+ | ToolChoiceMcp
+ | ToolChoiceCustom;
/**
* An array of tools the model may call while generating a response. You can
@@ -4967,8 +5212,10 @@ export interface ResponseCreateParamsBase {
* Learn more about
* [built-in tools](https://platform.openai.com/docs/guides/tools).
* - **Function calls (custom tools)**: Functions that are defined by you, enabling
- * the model to call your own code. Learn more about
+ * the model to call your own code with strongly typed arguments and outputs.
+ * Learn more about
* [function calling](https://platform.openai.com/docs/guides/function-calling).
+ * You can also use custom tools to call your own code.
*/
tools?: Array;
@@ -5001,9 +5248,31 @@ export interface ResponseCreateParamsBase {
* [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
*/
user?: string;
+
+ /**
+ * Constrains the verbosity of the model's response. Lower values will result in
+ * more concise responses, while higher values will result in more verbose
+ * responses. Currently supported values are `low`, `medium`, and `high`.
+ */
+ verbosity?: 'low' | 'medium' | 'high' | null;
}
export namespace ResponseCreateParams {
+ /**
+ * Options for streaming responses. Only set this when you set `stream: true`.
+ */
+ export interface StreamOptions {
+ /**
+ * When true, stream obfuscation will be enabled. Stream obfuscation adds random
+ * characters to an `obfuscation` field on streaming delta events to normalize
+ * payload sizes as a mitigation to certain side-channel attacks. These obfuscation
+ * fields are included by default, but add a small amount of overhead to the data
+ * stream. You can set `include_obfuscation` to false to optimize for bandwidth if
+ * you trust the network links between your application and the OpenAI API.
+ */
+ include_obfuscation?: boolean;
+ }
+
export type ResponseCreateParamsNonStreaming = ResponsesAPI.ResponseCreateParamsNonStreaming;
export type ResponseCreateParamsStreaming = ResponsesAPI.ResponseCreateParamsStreaming;
}
@@ -5041,6 +5310,16 @@ export interface ResponseRetrieveParamsBase {
*/
include?: Array;
+ /**
+ * When true, stream obfuscation will be enabled. Stream obfuscation adds random
+ * characters to an `obfuscation` field on streaming delta events to normalize
+ * payload sizes as a mitigation to certain side-channel attacks. These obfuscation
+ * fields are included by default, but add a small amount of overhead to the data
+ * stream. You can set `include_obfuscation` to false to optimize for bandwidth if
+ * you trust the network links between your application and the OpenAI API.
+ */
+ include_obfuscation?: boolean;
+
/**
* The sequence number of the event after which to start streaming.
*/
@@ -5091,6 +5370,7 @@ Responses.InputItems = InputItems;
export declare namespace Responses {
export {
type ComputerTool as ComputerTool,
+ type CustomTool as CustomTool,
type EasyInputMessage as EasyInputMessage,
type FileSearchTool as FileSearchTool,
type FunctionTool as FunctionTool,
@@ -5113,6 +5393,10 @@ export declare namespace Responses {
type ResponseContentPartAddedEvent as ResponseContentPartAddedEvent,
type ResponseContentPartDoneEvent as ResponseContentPartDoneEvent,
type ResponseCreatedEvent as ResponseCreatedEvent,
+ type ResponseCustomToolCall as ResponseCustomToolCall,
+ type ResponseCustomToolCallInputDeltaEvent as ResponseCustomToolCallInputDeltaEvent,
+ type ResponseCustomToolCallInputDoneEvent as ResponseCustomToolCallInputDoneEvent,
+ type ResponseCustomToolCallOutput as ResponseCustomToolCallOutput,
type ResponseError as ResponseError,
type ResponseErrorEvent as ResponseErrorEvent,
type ResponseFailedEvent as ResponseFailedEvent,
@@ -5182,7 +5466,10 @@ export declare namespace Responses {
type ResponseWebSearchCallInProgressEvent as ResponseWebSearchCallInProgressEvent,
type ResponseWebSearchCallSearchingEvent as ResponseWebSearchCallSearchingEvent,
type Tool as Tool,
+ type ToolChoiceAllowed as ToolChoiceAllowed,
+ type ToolChoiceCustom as ToolChoiceCustom,
type ToolChoiceFunction as ToolChoiceFunction,
+ type ToolChoiceMcp as ToolChoiceMcp,
type ToolChoiceOptions as ToolChoiceOptions,
type ToolChoiceTypes as ToolChoiceTypes,
type WebSearchTool as WebSearchTool,
diff --git a/src/resources/shared.ts b/src/resources/shared.ts
index 5e2a02524..8acf64d5c 100644
--- a/src/resources/shared.ts
+++ b/src/resources/shared.ts
@@ -15,6 +15,13 @@ export type AllModels =
| 'computer-use-preview-2025-03-11';
export type ChatModel =
+ | 'gpt-5'
+ | 'gpt-5-mini'
+ | 'gpt-5-nano'
+ | 'gpt-5-2025-08-07'
+ | 'gpt-5-mini-2025-08-07'
+ | 'gpt-5-nano-2025-08-07'
+ | 'gpt-5-chat-latest'
| 'gpt-4.1'
| 'gpt-4.1-mini'
| 'gpt-4.1-nano'
@@ -116,6 +123,43 @@ export interface CompoundFilter {
type: 'and' | 'or';
}
+/**
+ * The input format for the custom tool. Default is unconstrained text.
+ */
+export type CustomToolInputFormat = CustomToolInputFormat.Text | CustomToolInputFormat.Grammar;
+
+export namespace CustomToolInputFormat {
+ /**
+ * Unconstrained free-form text.
+ */
+ export interface Text {
+ /**
+ * Unconstrained text format. Always `text`.
+ */
+ type: 'text';
+ }
+
+ /**
+ * A grammar defined by the user.
+ */
+ export interface Grammar {
+ /**
+ * The grammar definition.
+ */
+ definition: string;
+
+ /**
+ * The syntax of the grammar definition. One of `lark` or `regex`.
+ */
+ syntax: 'lark' | 'regex';
+
+ /**
+ * Grammar format. Always `grammar`.
+ */
+ type: 'grammar';
+ }
+}
+
export interface ErrorObject {
code: string | null;
@@ -189,12 +233,11 @@ export type Metadata = { [key: string]: string };
*/
export interface Reasoning {
/**
- * **o-series models only**
- *
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- * result in faster responses and fewer tokens used on reasoning in a response.
+ * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+ * effort can result in faster responses and fewer tokens used on reasoning in a
+ * response.
*/
effort?: ReasoningEffort | null;
@@ -216,14 +259,13 @@ export interface Reasoning {
}
/**
- * **o-series models only**
- *
* Constrains effort on reasoning for
* [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
- * supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
- * result in faster responses and fewer tokens used on reasoning in a response.
+ * supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
+ * effort can result in faster responses and fewer tokens used on reasoning in a
+ * response.
*/
-export type ReasoningEffort = 'low' | 'medium' | 'high' | null;
+export type ReasoningEffort = 'minimal' | 'low' | 'medium' | 'high' | null;
/**
* JSON object response format. An older method of generating JSON responses. Using
@@ -298,6 +340,34 @@ export interface ResponseFormatText {
type: 'text';
}
+/**
+ * A custom grammar for the model to follow when generating text. Learn more in the
+ * [custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars).
+ */
+export interface ResponseFormatTextGrammar {
+ /**
+ * The custom grammar for the model to follow.
+ */
+ grammar: string;
+
+ /**
+ * The type of response format being defined. Always `grammar`.
+ */
+ type: 'grammar';
+}
+
+/**
+ * Configure the model to generate valid Python code. See the
+ * [custom grammars guide](https://platform.openai.com/docs/guides/custom-grammars)
+ * for more details.
+ */
+export interface ResponseFormatTextPython {
+ /**
+ * The type of response format being defined. Always `python`.
+ */
+ type: 'python';
+}
+
export type ResponsesModel =
| (string & {})
| ChatModel
diff --git a/src/version.ts b/src/version.ts
index 5ae28544b..f5b55c198 100644
--- a/src/version.ts
+++ b/src/version.ts
@@ -1 +1 @@
-export const VERSION = '5.12.0'; // x-release-please-version
+export const VERSION = '5.12.1'; // x-release-please-version
diff --git a/tests/api-resources/beta/assistants.test.ts b/tests/api-resources/beta/assistants.test.ts
index 8bdbc408e..6551d2d9a 100644
--- a/tests/api-resources/beta/assistants.test.ts
+++ b/tests/api-resources/beta/assistants.test.ts
@@ -26,7 +26,7 @@ describe('resource assistants', () => {
instructions: 'instructions',
metadata: { foo: 'string' },
name: 'name',
- reasoning_effort: 'low',
+ reasoning_effort: 'minimal',
response_format: 'auto',
temperature: 1,
tool_resources: {
diff --git a/tests/api-resources/beta/threads/runs/runs.test.ts b/tests/api-resources/beta/threads/runs/runs.test.ts
index becea1425..1e537d676 100644
--- a/tests/api-resources/beta/threads/runs/runs.test.ts
+++ b/tests/api-resources/beta/threads/runs/runs.test.ts
@@ -38,7 +38,7 @@ describe('resource runs', () => {
metadata: { foo: 'string' },
model: 'string',
parallel_tool_calls: true,
- reasoning_effort: 'low',
+ reasoning_effort: 'minimal',
response_format: 'auto',
stream: false,
temperature: 1,
diff --git a/tests/api-resources/chat/completions/completions.test.ts b/tests/api-resources/chat/completions/completions.test.ts
index 6869136a5..3a2fc3670 100644
--- a/tests/api-resources/chat/completions/completions.test.ts
+++ b/tests/api-resources/chat/completions/completions.test.ts
@@ -41,7 +41,7 @@ describe('resource completions', () => {
prediction: { content: 'string', type: 'content' },
presence_penalty: -2,
prompt_cache_key: 'prompt-cache-key-1234',
- reasoning_effort: 'low',
+ reasoning_effort: 'minimal',
response_format: { type: 'text' },
safety_identifier: 'safety-identifier-1234',
seed: -9007199254740991,
@@ -49,7 +49,7 @@ describe('resource completions', () => {
stop: '\n',
store: true,
stream: false,
- stream_options: { include_usage: true },
+ stream_options: { include_obfuscation: true, include_usage: true },
temperature: 1,
tool_choice: 'none',
tools: [
@@ -61,6 +61,7 @@ describe('resource completions', () => {
top_logprobs: 0,
top_p: 1,
user: 'user-1234',
+ verbosity: 'low',
web_search_options: {
search_context_size: 'low',
user_location: {
diff --git a/tests/api-resources/completions.test.ts b/tests/api-resources/completions.test.ts
index a9d188a2b..d89a98749 100644
--- a/tests/api-resources/completions.test.ts
+++ b/tests/api-resources/completions.test.ts
@@ -34,7 +34,7 @@ describe('resource completions', () => {
seed: 0,
stop: '\n',
stream: false,
- stream_options: { include_usage: true },
+ stream_options: { include_obfuscation: true, include_usage: true },
suffix: 'test.',
temperature: 1,
top_p: 1,
diff --git a/tests/api-resources/responses/responses.test.ts b/tests/api-resources/responses/responses.test.ts
index f234a15bb..3f51d72a2 100644
--- a/tests/api-resources/responses/responses.test.ts
+++ b/tests/api-resources/responses/responses.test.ts
@@ -41,7 +41,12 @@ describe('resource responses', () => {
await expect(
client.responses.retrieve(
'resp_677efb5139a88190b512bc3fef8e535d',
- { include: ['code_interpreter_call.outputs'], starting_after: 0, stream: false },
+ {
+ include: ['code_interpreter_call.outputs'],
+ include_obfuscation: true,
+ starting_after: 0,
+ stream: false,
+ },
{ path: '/_stainless_unknown_path' },
),
).rejects.toThrow(OpenAI.NotFoundError);
diff --git a/tests/lib/ChatCompletionRunFunctions.test.ts b/tests/lib/ChatCompletionRunFunctions.test.ts
index 42ea3de98..f14257ccd 100644
--- a/tests/lib/ChatCompletionRunFunctions.test.ts
+++ b/tests/lib/ChatCompletionRunFunctions.test.ts
@@ -130,12 +130,12 @@ class RunnerListener {
readonly contents: string[] = [];
readonly messages: ChatCompletionMessageParam[] = [];
readonly chatCompletions: OpenAI.Chat.ChatCompletion[] = [];
- readonly functionCalls: OpenAI.Chat.ChatCompletionMessageToolCall.Function[] = [];
+ readonly functionCalls: OpenAI.Chat.ChatCompletionMessageFunctionToolCall.Function[] = [];
readonly functionCallResults: string[] = [];
finalContent: string | null = null;
finalMessage: ChatCompletionMessageParam | undefined;
finalChatCompletion: OpenAI.Chat.ChatCompletion | undefined;
- finalFunctionCall: OpenAI.Chat.ChatCompletionMessageToolCall.Function | undefined;
+ finalFunctionCall: OpenAI.Chat.ChatCompletionMessageFunctionToolCall.Function | undefined;
finalFunctionCallResult: string | undefined;
totalUsage: OpenAI.CompletionUsage | undefined;
error: OpenAIError | undefined;
@@ -247,13 +247,13 @@ class StreamingRunnerListener {
readonly eventContents: [string, string][] = [];
readonly eventMessages: ChatCompletionMessageParam[] = [];
readonly eventChatCompletions: OpenAI.Chat.ChatCompletion[] = [];
- readonly eventFunctionCalls: OpenAI.Chat.ChatCompletionMessageToolCall.Function[] = [];
+ readonly eventFunctionCalls: OpenAI.Chat.ChatCompletionMessageFunctionToolCall.Function[] = [];
readonly eventFunctionCallResults: string[] = [];
finalContent: string | null = null;
finalMessage: ChatCompletionMessageParam | undefined;
finalChatCompletion: OpenAI.Chat.ChatCompletion | undefined;
- finalFunctionCall: OpenAI.Chat.ChatCompletionMessageToolCall.Function | undefined;
+ finalFunctionCall: OpenAI.Chat.ChatCompletionMessageFunctionToolCall.Function | undefined;
finalFunctionCallResult: string | undefined;
error: OpenAIError | undefined;
gotConnect = false;