Skip to content

Commit d4512d6

Browse files
Numman AliNumman Ali
authored andcommitted
Clamp minimal reasoning for GPT-5.x
1 parent a83e24a commit d4512d6

File tree

3 files changed

+20
-10
lines changed

3 files changed

+20
-10
lines changed

lib/request/request-transformer.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -273,9 +273,9 @@ export function getReasoningConfig(
273273
effort = "low";
274274
}
275275

276-
// Normalize "minimal" to "low" for Codex families
277-
// Codex CLI presets are low/medium/high (or xhigh for Codex Max / GPT-5.2 Codex)
278-
if (isCodex && effort === "minimal") {
276+
// Normalize "minimal" to "low" for all non-mini models
277+
// The ChatGPT Codex backend does not accept "minimal" (supports none/low/medium/high).
278+
if (effort === "minimal") {
279279
effort = "low";
280280
}
281281

test/config.test.ts

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -69,10 +69,10 @@ describe('Configuration Parsing', () => {
6969
expect(defaultReasoning.summary).toBe('auto');
7070
});
7171

72-
it('should use minimal effort for lightweight models (nano/mini)', () => {
72+
it('should normalize lightweight defaults to low effort (nano/mini)', () => {
7373
const nanoReasoning = getReasoningConfig('gpt-5-nano', {});
7474

75-
expect(nanoReasoning.effort).toBe('minimal');
75+
expect(nanoReasoning.effort).toBe('low');
7676
expect(nanoReasoning.summary).toBe('auto');
7777
});
7878

@@ -84,11 +84,11 @@ describe('Configuration Parsing', () => {
8484
expect(codexMinimalReasoning.summary).toBe('auto');
8585
});
8686

87-
it('should preserve "minimal" effort for non-codex models', () => {
87+
it('should normalize "minimal" effort for non-codex models', () => {
8888
const gpt5MinimalConfig = { reasoningEffort: 'minimal' as const };
8989
const gpt5MinimalReasoning = getReasoningConfig('gpt-5', gpt5MinimalConfig);
9090

91-
expect(gpt5MinimalReasoning.effort).toBe('minimal');
91+
expect(gpt5MinimalReasoning.effort).toBe('low');
9292
});
9393

9494
it('should handle high effort setting', () => {
@@ -134,7 +134,7 @@ describe('Configuration Parsing', () => {
134134
describe('Model-specific behavior', () => {
135135
it('should detect lightweight models correctly', () => {
136136
const miniReasoning = getReasoningConfig('gpt-5-mini', {});
137-
expect(miniReasoning.effort).toBe('minimal');
137+
expect(miniReasoning.effort).toBe('low');
138138
});
139139

140140
it('should detect codex models correctly', () => {

test/request-transformer.test.ts

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1057,7 +1057,7 @@ describe('Request Transformer Module', () => {
10571057
expect(result.reasoning?.effort).toBe('low');
10581058
});
10591059

1060-
it('should preserve minimal for non-codex models', async () => {
1060+
it('should normalize minimal to low for non-codex models', async () => {
10611061
const body: RequestBody = {
10621062
model: 'gpt-5',
10631063
input: [],
@@ -1067,7 +1067,7 @@ describe('Request Transformer Module', () => {
10671067
models: {},
10681068
};
10691069
const result = await transformRequestBody(body, codexInstructions, userConfig);
1070-
expect(result.reasoning?.effort).toBe('minimal');
1070+
expect(result.reasoning?.effort).toBe('low');
10711071
});
10721072

10731073
it('should use minimal effort for lightweight models', async () => {
@@ -1079,6 +1079,16 @@ describe('Request Transformer Module', () => {
10791079
expect(result.reasoning?.effort).toBe('medium');
10801080
});
10811081

1082+
it('should normalize minimal to low when provided by the host', async () => {
1083+
const body: RequestBody = {
1084+
model: 'gpt-5-nano',
1085+
input: [],
1086+
reasoning: { effort: 'minimal' },
1087+
};
1088+
const result = await transformRequestBody(body, codexInstructions);
1089+
expect(result.reasoning?.effort).toBe('low');
1090+
});
1091+
10821092
it('should convert orphaned function_call_output to message to preserve context', async () => {
10831093
const body: RequestBody = {
10841094
model: 'gpt-5-codex',

0 commit comments

Comments
 (0)