Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .changeset/hungry-sheep-end.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
'@openai/agents-openai': patch
'@openai/agents-core': patch
---

Add typed reasoning / text options to ModelSettings
6 changes: 2 additions & 4 deletions examples/agent-patterns/agents-as-tools.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,8 @@ const orchestratorAgent = new Agent({
runConfig: {
model: 'gpt-5',
modelSettings: {
providerData: {
reasoning: { effort: 'low' },
text: { verbosity: 'low' },
},
reasoning: { effort: 'low' },
text: { verbosity: 'low' },
},
},
runOptions: {
Expand Down
38 changes: 38 additions & 0 deletions examples/ai-sdk/gpt-5.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import { Agent, run, tool } from '@openai/agents';
import { aisdk } from '@openai/agents-extensions';
import { z } from 'zod';
import { openai } from '@ai-sdk/openai';

export async function main() {
const getWeatherTool = tool({
name: 'get_weather',
description: 'Get the weather for a given city',
parameters: z.object({ city: z.string() }),
execute: async ({ city }) => `The weather in ${city} is sunny`,
});
const agent = new Agent({
name: 'Helpful Assistant',
instructions:
'You are a helpful assistant. When you need to get the weather, you must use tools.',
tools: [getWeatherTool],
model: aisdk(openai('gpt-5-mini')),
modelSettings: {
providerData: {
providerOptions: {
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just to clarify: I didn't make any changes to ai-sdk integration; Passing these options is the standard way for ai-sdk provider for OAI, so I added example code.

openai: {
reasoningEffort: 'minimal',
textVerbosity: 'low',
},
},
},
},
});

const result = await run(
agent,
'Hello what is the weather in San Francisco?',
);
console.log(result.finalOutput);
}

main().catch(console.error);
1 change: 1 addition & 0 deletions examples/ai-sdk/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
"scripts": {
"build-check": "tsc --noEmit",
"start": "tsx index.ts",
"start:gpt-5": "tsx gpt-5.ts",
"start:stream": "tsx stream.ts"
}
}
41 changes: 31 additions & 10 deletions examples/basic/hello-world-gpt-5.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,38 +8,59 @@ const output = z.object({
});

async function main() {
const prompt =
'Tell me about recursion in programming. Quickly responding with a single answer is fine.';

const agent = new Agent({
name: 'GPT-5 Assistant',
model: 'gpt-5',
instructions: "You're a helpful assistant.",
modelSettings: {
providerData: {
reasoning: { effort: 'minimal' },
text: { verbosity: 'low' },
},
reasoning: { effort: 'minimal' },
text: { verbosity: 'low' },
},
outputType: output,
});

const prompt =
'Tell me about recursion in programming. Quickly responding with a single answer is fine.';
const result = await run(agent, prompt);
console.log(result.finalOutput);

// The following code works in the same way:
// const agent2 = agent.clone({
// modelSettings: {
// providerData: {
// reasoning: { effort: 'minimal' },
// text: { verbosity: 'low' },
// }
// },
// });
// const result2 = await run(agent2, prompt);
// console.log(result2.finalOutput);

const completionsAgent = new Agent({
name: 'GPT-5 Assistant',
model: new OpenAIChatCompletionsModel(new OpenAI(), 'gpt-5'),
instructions: "You're a helpful assistant.",
modelSettings: {
providerData: {
reasoning_effort: 'minimal',
verbosity: 'low',
},
reasoning: { effort: 'minimal' },
text: { verbosity: 'low' },
},
outputType: output,
});
const completionsResult = await run(completionsAgent, prompt);
console.log(completionsResult.finalOutput);

// The following code works in the same way:
// const completionsAgent2 = completionsAgent.clone({
// modelSettings: {
// providerData: {
// reasoning_effort: 'minimal',
// verbosity: 'low',
// }
// },
// });
// const completionsResult2 = await run(completionsAgent2, prompt);
// console.log(completionsResult2.finalOutput);
}

if (require.main === module) {
Expand Down
4 changes: 1 addition & 3 deletions examples/basic/hello-world-gpt-oss.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,7 @@ async function main() {
'gpt-oss:20b',
),
instructions: 'You answer questions concisely and to the point.',
modelSettings: {
providerData: { reasoning: { effort: 'low' } },
},
modelSettings: { reasoning: { effort: 'low' } },
});

const question = 'Tell me about recursion in programming.';
Expand Down
11 changes: 2 additions & 9 deletions examples/basic/reasoning.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,8 @@ async function main() {
name: 'Agent',
model: 'gpt-5',
modelSettings: {
providerData: {
reasoning: {
effort: 'high',
summary: 'auto',
},
text: {
verbosity: 'high',
},
},
reasoning: { effort: 'high', summary: 'auto' },
text: { verbosity: 'high' },
},
});

Expand Down
4 changes: 2 additions & 2 deletions examples/tools/web-search-filters.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@ async function main() {
}),
],
modelSettings: {
reasoning: { effort: 'low' },
text: { verbosity: 'low' },
providerData: {
reasoning: { effort: 'low' },
text: { verbosity: 'low' },
// https://platform.openai.com/docs/guides/tools-web-search?api-mode=responses#sources
include: ['web_search_call.action.sources'],
},
Expand Down
12 changes: 5 additions & 7 deletions packages/agents-core/src/defaultModel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -43,13 +43,11 @@ export function getDefaultModelSettings(model?: string): ModelSettings {
const _model = model ?? getDefaultModel();
if (gpt5ReasoningSettingsRequired(_model)) {
return {
providerData: {
// We chose "low" instead of "minimal" because some of the built-in tools
// (e.g., file search, image generation, etc.) do not support "minimal"
// If you want to use "minimal" reasoning effort, you can pass your own model settings
reasoning: { effort: 'low' },
text: { verbosity: 'low' },
},
// We chose "low" instead of "minimal" because some of the built-in tools
// (e.g., file search, image generation, etc.) do not support "minimal"
// If you want to use "minimal" reasoning effort, you can pass your own model settings
reasoning: { effort: 'low' },
text: { verbosity: 'low' },
};
}
return {};
Expand Down
50 changes: 50 additions & 0 deletions packages/agents-core/src/model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,46 @@ export type ModelSettingsToolChoice =
| 'none'
| (string & {});

/**
* Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
* Currently supported values are `minimal`, `low`, `medium`, and `high`.
* Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
*/
export type ModelSettingsReasoningEffort =
| 'minimal'
| 'low'
| 'medium'
| 'high'
| null;

/**
* Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
*/
export type ModelSettingsReasoning = {
/**
* Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
* Currently supported values are `minimal`, `low`, `medium`, and `high`.
* Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
*/
effort?: ModelSettingsReasoningEffort | null;

/**
* A summary of the reasoning performed by the model.
* This can be useful for debugging and understanding the model's reasoning process.
* One of `auto`, `concise`, or `detailed`.
*/
summary?: 'auto' | 'concise' | 'detailed' | null;
};

export interface ModelSettingsText {
/**
* Constrains the verbosity of the model's response.
* Lower values will result in more concise responses, while higher values will result in more verbose responses.
* Currently supported values are `low`, `medium`, and `high`.
*/
verbosity?: 'low' | 'medium' | 'high' | null;
}

/**
* Settings to use when calling an LLM.
*
Expand Down Expand Up @@ -75,6 +115,16 @@ export type ModelSettings = {
*/
store?: boolean;

/**
* The reasoning settings to use when calling the model.
*/
reasoning?: ModelSettingsReasoning;

/**
* The text settings to use when calling the model.
*/
text?: ModelSettingsText;

/**
* Additional provider specific settings to be passed directly to the model
* request.
Expand Down
8 changes: 3 additions & 5 deletions packages/agents-core/test/agent.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import { Handoff, handoff } from '../src/handoff';
import { z } from 'zod';
import { JsonSchemaDefinition, setDefaultModelProvider } from '../src';
import { FakeModelProvider } from './stubs';
import { Runner } from '../src/run';
import { Runner, RunConfig } from '../src/run';

describe('Agent', () => {
afterEach(() => {
Expand Down Expand Up @@ -221,12 +221,10 @@ describe('Agent', () => {
.spyOn(Runner.prototype, 'run')
.mockImplementation(async () => mockResult);

const runConfig = {
const runConfig: Partial<RunConfig> = {
model: 'gpt-5',
modelSettings: {
providerData: {
reasoning: { effort: 'low' },
},
reasoning: { effort: 'low' },
},
};
const runOptions = {
Expand Down
17 changes: 16 additions & 1 deletion packages/agents-openai/src/openaiChatCompletionsModel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@ export class OpenAIChatCompletionsModel implements Model {
top_p: request.modelSettings.topP,
frequency_penalty: request.modelSettings.frequencyPenalty,
presence_penalty: request.modelSettings.presencePenalty,
reasoning_effort: request.modelSettings.reasoning?.effort,
verbosity: request.modelSettings.text?.verbosity,
}
: { base_url: this.#client.baseURL };
const response = await this.#fetchResponse(request, span, false);
Expand Down Expand Up @@ -296,6 +298,19 @@ export class OpenAIChatCompletionsModel implements Model {
span.spanData.input = messages;
}

const providerData = request.modelSettings.providerData ?? {};
if (
request.modelSettings.reasoning &&
request.modelSettings.reasoning.effort
) {
// merge the top-level reasoning.effort into provider data
providerData.reasoning_effort = request.modelSettings.reasoning.effort;
}
if (request.modelSettings.text && request.modelSettings.text.verbosity) {
// merge the top-level text.verbosity into provider data
providerData.verbosity = request.modelSettings.text.verbosity;
}
Comment on lines +301 to +312

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Strip GPT-5-only settings before forwarding to non-GPT-5 models

The new modelSettings.reasoning and .text fields are always translated into reasoning_effort and verbosity when building the chat-completions request. However, the existing compatibility shim that removes GPT‑5-only parameters when a run resolves to a non‑GPT‑5 model still only inspects providerData. If an agent uses the new typed fields and the runtime swaps to a non‑GPT‑5 model (e.g. the runner chooses gpt-4o), these lines will still populate reasoning_effort/verbosity and the OpenAI API rejects the request. The filter needs to include the new top-level fields or these assignments should be gated by model capabilities.

Useful? React with 👍 / 👎.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@codex can you suggest potential improvements for it?

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Summary

Testing

  • ⚠️ pnpm test (not run; only provided review feedback)

View task →

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

thanks, this is helpful. i will make the non-gpt-5 filter more robust


const requestData = {
model: this.#model,
messages,
Expand All @@ -310,7 +325,7 @@ export class OpenAIChatCompletionsModel implements Model {
parallel_tool_calls: parallelToolCalls,
stream,
store: request.modelSettings.store,
...request.modelSettings.providerData,
...providerData,
};

if (logger.dontLogModelData) {
Expand Down
15 changes: 14 additions & 1 deletion packages/agents-openai/src/openaiResponsesModel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -852,7 +852,20 @@ export class OpenAIResponsesModel implements Model {
const toolChoice = getToolChoice(request.modelSettings.toolChoice);
const { text, ...restOfProviderData } =
request.modelSettings.providerData ?? {};
const responseFormat = getResponseFormat(request.outputType, text);
if (request.modelSettings.reasoning) {
// Merge top-level reasoning settings with provider data
restOfProviderData.reasoning = {
...request.modelSettings.reasoning,
...restOfProviderData.reasoning,
};
}
let mergedText = text;
if (request.modelSettings.text) {
// Merge top-level text settings with provider data
mergedText = { ...request.modelSettings.text, ...text };
}
const responseFormat = getResponseFormat(request.outputType, mergedText);

const prompt = getPrompt(request.prompt);

let parallelToolCalls: boolean | undefined = undefined;
Expand Down