Skip to content

Commit ffaa770

Browse files
committed
Add a quick opt-in option to switch to gpt-5 and fix issues around gpt-5
1 parent d88aee2 commit ffaa770

File tree

9 files changed

+155
-11
lines changed

9 files changed

+155
-11
lines changed

examples/basic/hello-world-gpt-5.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,8 @@ async function main() {
2121
outputType: output,
2222
});
2323

24-
const prompt = 'Tell me about recursion in programming.';
24+
const prompt =
25+
'Tell me about recursion in programming. Quickly responding with a single answer is fine.';
2526
const result = await run(agent, prompt);
2627
console.log(result.finalOutput);
2728

examples/basic/hello-world.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,10 @@ async function main() {
66
instructions: 'You only respond in haikus.',
77
});
88

9-
const result = await run(agent, 'Tell me about recursion in programming.');
9+
const result = await run(
10+
agent,
11+
'Tell me about recursion in programming. Quickly responding with a single answer is fine.',
12+
);
1013
console.log(result.finalOutput);
1114
// Example output:
1215
// Function calls itself,

examples/basic/reasoning.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,16 @@ const THINKING_PREFIX = styleText(['bgGray', 'black'], 'Thought');
77
async function main() {
88
const agent = new Agent({
99
name: 'Agent',
10-
model: 'o3',
10+
model: 'gpt-5',
1111
modelSettings: {
1212
providerData: {
1313
reasoning: {
1414
effort: 'high',
1515
summary: 'auto',
1616
},
17+
text: {
18+
verbosity: 'high',
19+
},
1720
},
1821
},
1922
});

examples/financial-research-agent/agents.ts

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ export type FinancialSearchPlan = z.infer<typeof FinancialSearchPlan>;
4646
export const plannerAgent = new Agent({
4747
name: 'FinancialPlannerAgent',
4848
instructions: plannerPrompt,
49-
model: 'o3-mini',
49+
model: 'gpt-5-mini',
5050
outputType: FinancialSearchPlan,
5151
});
5252

@@ -69,6 +69,7 @@ Focus on key numbers, events, or quotes that will be useful to a financial analy
6969
export const searchAgent = new Agent({
7070
name: 'FinancialSearchAgent',
7171
instructions: searchAgentPrompt,
72+
model: 'gpt-4.1',
7273
tools: [webSearchTool()],
7374
modelSettings: { toolChoice: 'required' },
7475
});
@@ -92,7 +93,7 @@ export type VerificationResult = z.infer<typeof VerificationResult>;
9293
export const verifierAgent = new Agent({
9394
name: 'VerificationAgent',
9495
instructions: verifierPrompt,
95-
model: 'gpt-4o',
96+
model: 'gpt-4.1',
9697
outputType: VerificationResult,
9798
});
9899

packages/agents-core/src/agent.ts

Lines changed: 27 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,11 @@ import type { InputGuardrail, OutputGuardrail } from './guardrail';
44
import { AgentHooks } from './lifecycle';
55
import { getAllMcpTools, type MCPServer } from './mcp';
66
import type { Model, ModelSettings, Prompt } from './model';
7+
import {
8+
getDefaultModelSettings,
9+
gpt5ReasoningSettingsRequired,
10+
isGpt5Default,
11+
} from './defaultModel';
712
import type { RunContext } from './runContext';
813
import {
914
type FunctionTool,
@@ -165,8 +170,10 @@ export interface AgentConfiguration<
165170
handoffOutputTypeWarningEnabled?: boolean;
166171

167172
/**
168-
* The model implementation to use when invoking the LLM. By default, if not set, the agent will
169-
* use the default model configured in modelSettings.defaultModel
173+
* The model implementation to use when invoking the LLM.
174+
*
175+
* By default, if not set, the agent will use the default model returned by
176+
* getDefaultModel (currently "gpt-4.1").
170177
*/
171178
model: string | Model;
172179

@@ -348,7 +355,7 @@ export class Agent<
348355
this.handoffDescription = config.handoffDescription ?? '';
349356
this.handoffs = config.handoffs ?? [];
350357
this.model = config.model ?? '';
351-
this.modelSettings = config.modelSettings ?? {};
358+
this.modelSettings = config.modelSettings ?? getDefaultModelSettings();
352359
this.tools = config.tools ?? [];
353360
this.mcpServers = config.mcpServers ?? [];
354361
this.inputGuardrails = config.inputGuardrails ?? [];
@@ -359,6 +366,23 @@ export class Agent<
359366
this.toolUseBehavior = config.toolUseBehavior ?? 'run_llm_again';
360367
this.resetToolChoice = config.resetToolChoice ?? true;
361368

369+
if (
370+
// The user sets a non-default model
371+
config.model !== undefined &&
372+
// The default model is gpt-5
373+
isGpt5Default() &&
374+
// However, the specified model is not a gpt-5 model
375+
(typeof config.model !== 'string' ||
376+
!gpt5ReasoningSettingsRequired(config.model)) &&
377+
// The model settings are not customized for the specified model
378+
config.modelSettings === undefined
379+
) {
380+
// In this scenario, we should use a generic model settings
381+
// because non-gpt-5 models are not compatible with the default gpt-5 model settings.
382+
// This is a best-effort attempt to make the agent work with non-gpt-5 models.
383+
this.modelSettings = {};
384+
}
385+
362386
// --- Runtime warning for handoff output type compatibility ---
363387
if (
364388
config.handoffOutputTypeWarningEnabled === undefined ||
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
import { loadEnv } from './config';
2+
import { ModelSettings } from './model';
3+
4+
export const OPENAI_DEFAULT_MODEL_ENV_VARIABLE_NAME = 'OPENAI_DEFAULT_MODEL';
5+
6+
/**
7+
* Returns True if the model name is a GPT-5 model and reasoning settings are required.
8+
*/
9+
export function gpt5ReasoningSettingsRequired(modelName: string): boolean {
10+
if (modelName.startsWith('gpt-5-chat')) {
11+
// gpt-5-chat-latest does not require reasoning settings
12+
return false;
13+
}
14+
// matches any of gpt-5 models
15+
return modelName.startsWith('gpt-5');
16+
}
17+
18+
/**
19+
* Returns True if the default model is a GPT-5 model.
20+
* This is used to determine if the default model settings are compatible with GPT-5 models.
21+
* If the default model is not a GPT-5 model, the model settings are compatible with other models.
22+
*/
23+
export function isGpt5Default(): boolean {
24+
return gpt5ReasoningSettingsRequired(getDefaultModel());
25+
}
26+
27+
/**
28+
* Returns the default model name.
29+
*/
30+
export function getDefaultModel(): string {
31+
const env = loadEnv();
32+
return (
33+
env[OPENAI_DEFAULT_MODEL_ENV_VARIABLE_NAME]?.toLowerCase() ?? 'gpt-4.1'
34+
);
35+
}
36+
37+
/**
38+
* Returns the default model settings.
39+
* If the default model is a GPT-5 model, returns the GPT-5 default model settings.
40+
* Otherwise, returns the legacy default model settings.
41+
*/
42+
export function getDefaultModelSettings(model?: string): ModelSettings {
43+
const _model = model ?? getDefaultModel();
44+
if (gpt5ReasoningSettingsRequired(_model)) {
45+
return {
46+
providerData: {
47+
// We chose "low" instead of "minimal" because some of the built-in tools
48+
// (e.g., file search, image generation, etc.) do not support "minimal"
49+
// If you want to use "minimal" reasoning effort, you can pass your own model settings
50+
reasoning: { effort: 'low' },
51+
text: { verbosity: 'low' },
52+
},
53+
};
54+
}
55+
return {};
56+
}

packages/agents-core/src/index.ts

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,13 @@ export {
9494
SerializedTool,
9595
SerializedOutputType,
9696
} from './model';
97+
export {
98+
OPENAI_DEFAULT_MODEL_ENV_VARIABLE_NAME,
99+
gpt5ReasoningSettingsRequired,
100+
getDefaultModel,
101+
getDefaultModelSettings,
102+
isGpt5Default,
103+
} from './defaultModel';
97104
export { setDefaultModelProvider } from './providers';
98105
export { RunResult, StreamedRunResult } from './result';
99106
export {

packages/agents-core/src/run.ts

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ import { RunAgentUpdatedStreamEvent, RunRawModelStreamEvent } from './events';
5454
import { RunState } from './runState';
5555
import { StreamEventResponseCompleted } from './types/protocol';
5656
import { convertAgentOutputTypeToSerializable } from './utils/tools';
57+
import { gpt5ReasoningSettingsRequired, isGpt5Default } from './defaultModel';
5758

5859
const DEFAULT_MAX_TURNS = 10;
5960

@@ -369,6 +370,14 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
369370
...this.config.modelSettings,
370371
...state._currentAgent.modelSettings,
371372
};
373+
const agentModel = state._currentAgent.model;
374+
const agentModelSettings = state._currentAgent.modelSettings;
375+
modelSettings = sanitizeModelSettingsForNonGpt5Runner(
376+
agentModel,
377+
agentModelSettings,
378+
model,
379+
modelSettings,
380+
);
372381
modelSettings = maybeResetToolChoice(
373382
state._currentAgent,
374383
state._toolUseTracker,
@@ -709,6 +718,14 @@ export class Runner extends RunHooks<any, AgentOutputType<unknown>> {
709718
...this.config.modelSettings,
710719
...currentAgent.modelSettings,
711720
};
721+
const agentModel = currentAgent.model;
722+
const agentModelSettings = currentAgent.modelSettings;
723+
modelSettings = sanitizeModelSettingsForNonGpt5Runner(
724+
agentModel,
725+
agentModelSettings,
726+
model,
727+
modelSettings,
728+
);
712729
modelSettings = maybeResetToolChoice(
713730
currentAgent,
714731
result.state._toolUseTracker,
@@ -1029,3 +1046,36 @@ export async function run<TAgent extends Agent<any, any>, TContext = undefined>(
10291046
return await runner.run(agent, input, options);
10301047
}
10311048
}
1049+
1050+
/**
1051+
* When the default model is a GPT-5 variant, agents may carry GPT-5-specific providerData
1052+
* (e.g., reasoning effort, text verbosity). If a run resolves to a non-GPT-5 model and the
1053+
* agent relied on the default model (i.e., no explicit model set), these GPT-5-only settings
1054+
* are incompatible and should be stripped to avoid runtime errors.
1055+
*/
1056+
function sanitizeModelSettingsForNonGpt5Runner(
1057+
agentModel: string | Model,
1058+
agentModelSettings: ModelSettings,
1059+
runnerModel: string | Model,
1060+
modelSettings: ModelSettings,
1061+
): ModelSettings {
1062+
if (
1063+
// gpt-5 is enabled for the default model for agents
1064+
isGpt5Default() &&
1065+
// no explicitly set model for the agent
1066+
typeof agentModel === 'string' &&
1067+
agentModel === Agent.DEFAULT_MODEL_PLACEHOLDER &&
1068+
// this runner uses a non-gpt-5 model
1069+
(typeof runnerModel !== 'string' ||
1070+
!gpt5ReasoningSettingsRequired(runnerModel)) &&
1071+
(agentModelSettings.providerData?.reasoning ||
1072+
agentModelSettings.providerData?.text?.verbosity ||
1073+
(agentModelSettings.providerData as any)?.reasoning_effort)
1074+
) {
1075+
// the incompatible parameters should be removed to avoid runtime errors
1076+
delete modelSettings.providerData?.reasoning;
1077+
delete (modelSettings.providerData as any)?.text?.verbosity;
1078+
delete (modelSettings.providerData as any)?.reasoning_effort;
1079+
}
1080+
return modelSettings;
1081+
}

packages/agents-openai/src/openaiProvider.ts

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
1-
import { Model, ModelProvider } from '@openai/agents-core';
1+
import { Model, ModelProvider, getDefaultModel } from '@openai/agents-core';
22
import OpenAI from 'openai';
33
import {
4-
DEFAULT_OPENAI_MODEL,
54
getDefaultOpenAIClient,
65
getDefaultOpenAIKey,
76
shouldUseResponsesByDefault,
@@ -65,7 +64,7 @@ export class OpenAIProvider implements ModelProvider {
6564
}
6665

6766
async getModel(modelName?: string | undefined): Promise<Model> {
68-
const model = modelName || DEFAULT_OPENAI_MODEL;
67+
const model = modelName || getDefaultModel();
6968
const useResponses = this.#useResponses ?? shouldUseResponsesByDefault();
7069

7170
if (useResponses) {

0 commit comments

Comments
 (0)