Skip to content

Commit 10eeece

Browse files
committed
fix: remove temperature parameter for GPT-5 models
GPT-5 no longer supports the temperature parameter in its API. This commit removes the temperature field from GPT-5 requests to prevent API errors when using LiteLLM or other proxies. Fixes #6965
1 parent 5e07bc4 commit 10eeece

File tree

3 files changed

+10
-7
lines changed

3 files changed

+10
-7
lines changed

src/api/providers/__tests__/openai-native.spec.ts

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -634,7 +634,8 @@ describe("OpenAiNativeHandler", () => {
634634
expect(body1).toContain('"effort":"medium"')
635635
expect(body1).toContain('"summary":"auto"')
636636
expect(body1).toContain('"verbosity":"medium"')
637-
expect(body1).toContain('"temperature":1')
637+
// GPT-5 no longer supports temperature parameter
638+
expect(body1).not.toContain('"temperature"')
638639
expect(body1).toContain('"max_output_tokens"')
639640

640641
// Verify the streamed content
@@ -856,7 +857,8 @@ describe("OpenAiNativeHandler", () => {
856857
expect(body2).toContain('"effort":"low"')
857858
expect(body2).toContain('"summary":"auto"')
858859
expect(body2).toContain('"verbosity":"medium"')
859-
expect(body2).toContain('"temperature":1')
860+
// GPT-5 no longer supports temperature parameter
861+
expect(body2).not.toContain('"temperature"')
860862
expect(body2).toContain('"max_output_tokens"')
861863

862864
// Clean up
@@ -906,7 +908,8 @@ describe("OpenAiNativeHandler", () => {
906908
expect(body3).toContain('"effort":"minimal"')
907909
expect(body3).toContain('"summary":"auto"')
908910
expect(body3).toContain('"verbosity":"high"')
909-
expect(body3).toContain('"temperature":1')
911+
// GPT-5 no longer supports temperature parameter
912+
expect(body3).not.toContain('"temperature"')
910913
expect(body3).toContain('"max_output_tokens"')
911914

912915
// Clean up

src/api/providers/openai-native.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -273,7 +273,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
273273
stream: boolean
274274
reasoning?: { effort: ReasoningEffortWithMinimal; summary?: "auto" }
275275
text?: { verbosity: VerbosityLevel }
276-
temperature?: number
276+
// temperature parameter removed - GPT-5 no longer supports it
277277
max_output_tokens?: number
278278
previous_response_id?: string
279279
}
@@ -289,7 +289,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
289289
},
290290
}),
291291
text: { verbosity: (verbosity || "medium") as VerbosityLevel },
292-
temperature: this.options.modelTemperature ?? GPT5_DEFAULT_TEMPERATURE,
292+
// GPT-5 no longer supports temperature parameter
293293
// Explicitly include the calculated max output tokens for GPT‑5.
294294
// Use the per-request reserved output computed by Roo (params.maxTokens from getModelParams).
295295
...(model.maxTokens ? { max_output_tokens: model.maxTokens } : {}),

src/api/transform/model-params.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -142,9 +142,9 @@ export function getModelParams({
142142
reasoning: getAnthropicReasoning({ model, reasoningBudget, reasoningEffort, settings }),
143143
}
144144
} else if (format === "openai") {
145-
// Special case for o1 and o3-mini, which don't support temperature.
145+
// Special case for o1, o3-mini, and GPT-5 models, which don't support temperature.
146146
// TODO: Add a `supportsTemperature` field to the model info.
147-
if (modelId.startsWith("o1") || modelId.startsWith("o3-mini")) {
147+
if (modelId.startsWith("o1") || modelId.startsWith("o3-mini") || modelId.startsWith("gpt-5")) {
148148
params.temperature = undefined
149149
}
150150

0 commit comments

Comments
 (0)