Skip to content

Commit 3115177

Browse files
authored
Add typed reasoning / text options to ModelSettings (#513)
1 parent ea2328e commit 3115177

File tree

17 files changed

+415
-49
lines changed

17 files changed

+415
-49
lines changed

.changeset/hungry-sheep-end.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
'@openai/agents-openai': patch
3+
'@openai/agents-core': patch
4+
---
5+
6+
Add typed reasoning / text options to ModelSettings

examples/agent-patterns/agents-as-tools.ts

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,10 +33,8 @@ const orchestratorAgent = new Agent({
3333
runConfig: {
3434
model: 'gpt-5',
3535
modelSettings: {
36-
providerData: {
37-
reasoning: { effort: 'low' },
38-
text: { verbosity: 'low' },
39-
},
36+
reasoning: { effort: 'low' },
37+
text: { verbosity: 'low' },
4038
},
4139
},
4240
runOptions: {

examples/ai-sdk/gpt-5.ts

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
import { Agent, run, tool } from '@openai/agents';
2+
import { aisdk } from '@openai/agents-extensions';
3+
import { z } from 'zod';
4+
import { openai } from '@ai-sdk/openai';
5+
6+
export async function main() {
7+
const getWeatherTool = tool({
8+
name: 'get_weather',
9+
description: 'Get the weather for a given city',
10+
parameters: z.object({ city: z.string() }),
11+
execute: async ({ city }) => `The weather in ${city} is sunny`,
12+
});
13+
const agent = new Agent({
14+
name: 'Helpful Assistant',
15+
instructions:
16+
'You are a helpful assistant. When you need to get the weather, you must use tools.',
17+
tools: [getWeatherTool],
18+
model: aisdk(openai('gpt-5-mini')),
19+
modelSettings: {
20+
providerData: {
21+
providerOptions: {
22+
openai: {
23+
reasoningEffort: 'minimal',
24+
textVerbosity: 'low',
25+
},
26+
},
27+
},
28+
},
29+
});
30+
31+
const result = await run(
32+
agent,
33+
'Hello what is the weather in San Francisco?',
34+
);
35+
console.log(result.finalOutput);
36+
}
37+
38+
main().catch(console.error);

examples/ai-sdk/package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
"scripts": {
1212
"build-check": "tsc --noEmit",
1313
"start": "tsx index.ts",
14+
"start:gpt-5": "tsx gpt-5.ts",
1415
"start:stream": "tsx stream.ts"
1516
}
1617
}

examples/basic/hello-world-gpt-5.ts

Lines changed: 31 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -8,38 +8,59 @@ const output = z.object({
88
});
99

1010
async function main() {
11+
const prompt =
12+
'Tell me about recursion in programming. Quickly responding with a single answer is fine.';
13+
1114
const agent = new Agent({
1215
name: 'GPT-5 Assistant',
1316
model: 'gpt-5',
1417
instructions: "You're a helpful assistant.",
1518
modelSettings: {
16-
providerData: {
17-
reasoning: { effort: 'minimal' },
18-
text: { verbosity: 'low' },
19-
},
19+
reasoning: { effort: 'minimal' },
20+
text: { verbosity: 'low' },
2021
},
2122
outputType: output,
2223
});
2324

24-
const prompt =
25-
'Tell me about recursion in programming. Quickly responding with a single answer is fine.';
2625
const result = await run(agent, prompt);
2726
console.log(result.finalOutput);
2827

28+
// The following code works in the same way:
29+
// const agent2 = agent.clone({
30+
// modelSettings: {
31+
// providerData: {
32+
// reasoning: { effort: 'minimal' },
33+
// text: { verbosity: 'low' },
34+
// }
35+
// },
36+
// });
37+
// const result2 = await run(agent2, prompt);
38+
// console.log(result2.finalOutput);
39+
2940
const completionsAgent = new Agent({
3041
name: 'GPT-5 Assistant',
3142
model: new OpenAIChatCompletionsModel(new OpenAI(), 'gpt-5'),
3243
instructions: "You're a helpful assistant.",
3344
modelSettings: {
34-
providerData: {
35-
reasoning_effort: 'minimal',
36-
verbosity: 'low',
37-
},
45+
reasoning: { effort: 'minimal' },
46+
text: { verbosity: 'low' },
3847
},
3948
outputType: output,
4049
});
4150
const completionsResult = await run(completionsAgent, prompt);
4251
console.log(completionsResult.finalOutput);
52+
53+
// The following code works in the same way:
54+
// const completionsAgent2 = completionsAgent.clone({
55+
// modelSettings: {
56+
// providerData: {
57+
// reasoning_effort: 'minimal',
58+
// verbosity: 'low',
59+
// }
60+
// },
61+
// });
62+
// const completionsResult2 = await run(completionsAgent2, prompt);
63+
// console.log(completionsResult2.finalOutput);
4364
}
4465

4566
if (require.main === module) {

examples/basic/hello-world-gpt-oss.ts

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,7 @@ async function main() {
2525
'gpt-oss:20b',
2626
),
2727
instructions: 'You answer questions concisely and to the point.',
28-
modelSettings: {
29-
providerData: { reasoning: { effort: 'low' } },
30-
},
28+
modelSettings: { reasoning: { effort: 'low' } },
3129
});
3230

3331
const question = 'Tell me about recursion in programming.';

examples/basic/reasoning.ts

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -9,15 +9,8 @@ async function main() {
99
name: 'Agent',
1010
model: 'gpt-5',
1111
modelSettings: {
12-
providerData: {
13-
reasoning: {
14-
effort: 'high',
15-
summary: 'auto',
16-
},
17-
text: {
18-
verbosity: 'high',
19-
},
20-
},
12+
reasoning: { effort: 'high', summary: 'auto' },
13+
text: { verbosity: 'high' },
2114
},
2215
});
2316

examples/tools/web-search-filters.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,9 @@ async function main() {
2121
}),
2222
],
2323
modelSettings: {
24+
reasoning: { effort: 'low' },
25+
text: { verbosity: 'low' },
2426
providerData: {
25-
reasoning: { effort: 'low' },
26-
text: { verbosity: 'low' },
2727
// https://platform.openai.com/docs/guides/tools-web-search?api-mode=responses#sources
2828
include: ['web_search_call.action.sources'],
2929
},

packages/agents-core/src/defaultModel.ts

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -43,13 +43,11 @@ export function getDefaultModelSettings(model?: string): ModelSettings {
4343
const _model = model ?? getDefaultModel();
4444
if (gpt5ReasoningSettingsRequired(_model)) {
4545
return {
46-
providerData: {
47-
// We chose "low" instead of "minimal" because some of the built-in tools
48-
// (e.g., file search, image generation, etc.) do not support "minimal"
49-
// If you want to use "minimal" reasoning effort, you can pass your own model settings
50-
reasoning: { effort: 'low' },
51-
text: { verbosity: 'low' },
52-
},
46+
// We chose "low" instead of "minimal" because some of the built-in tools
47+
// (e.g., file search, image generation, etc.) do not support "minimal"
48+
// If you want to use "minimal" reasoning effort, you can pass your own model settings
49+
reasoning: { effort: 'low' },
50+
text: { verbosity: 'low' },
5351
};
5452
}
5553
return {};

packages/agents-core/src/model.ts

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,46 @@ export type ModelSettingsToolChoice =
1818
| 'none'
1919
| (string & {});
2020

21+
/**
22+
* Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
23+
* Currently supported values are `minimal`, `low`, `medium`, and `high`.
24+
* Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
25+
*/
26+
export type ModelSettingsReasoningEffort =
27+
| 'minimal'
28+
| 'low'
29+
| 'medium'
30+
| 'high'
31+
| null;
32+
33+
/**
34+
* Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
35+
*/
36+
export type ModelSettingsReasoning = {
37+
/**
38+
* Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
39+
* Currently supported values are `minimal`, `low`, `medium`, and `high`.
40+
* Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
41+
*/
42+
effort?: ModelSettingsReasoningEffort | null;
43+
44+
/**
45+
* A summary of the reasoning performed by the model.
46+
* This can be useful for debugging and understanding the model's reasoning process.
47+
* One of `auto`, `concise`, or `detailed`.
48+
*/
49+
summary?: 'auto' | 'concise' | 'detailed' | null;
50+
};
51+
52+
export interface ModelSettingsText {
53+
/**
54+
* Constrains the verbosity of the model's response.
55+
* Lower values will result in more concise responses, while higher values will result in more verbose responses.
56+
* Currently supported values are `low`, `medium`, and `high`.
57+
*/
58+
verbosity?: 'low' | 'medium' | 'high' | null;
59+
}
60+
2161
/**
2262
* Settings to use when calling an LLM.
2363
*
@@ -75,6 +115,16 @@ export type ModelSettings = {
75115
*/
76116
store?: boolean;
77117

118+
/**
119+
* The reasoning settings to use when calling the model.
120+
*/
121+
reasoning?: ModelSettingsReasoning;
122+
123+
/**
124+
* The text settings to use when calling the model.
125+
*/
126+
text?: ModelSettingsText;
127+
78128
/**
79129
* Additional provider specific settings to be passed directly to the model
80130
* request.

0 commit comments

Comments
 (0)