Skip to content

Commit 889e925

Browse files
Fix: Remove temperature parameter for Azure OpenAI reasoning models (#5116)
* Fix temperature parameter error for Azure OpenAI reasoning models * Fix tests: Update O3 family model tests to expect temperature: undefined - Updated failing tests in openai.spec.ts to expect temperature: undefined for O3 models - This aligns with the PR changes that remove temperature parameter for Azure OpenAI o1, o3, and o4 models - All 4 previously failing tests now pass --------- Co-authored-by: Daniel Riccio <[email protected]>
1 parent e559bee commit 889e925

File tree

2 files changed

+7
-7
lines changed

2 files changed

+7
-7
lines changed

src/api/providers/__tests__/openai.spec.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -599,7 +599,7 @@ describe("OpenAiHandler", () => {
599599
stream: true,
600600
stream_options: { include_usage: true },
601601
reasoning_effort: "medium",
602-
temperature: 0.5,
602+
temperature: undefined,
603603
// O3 models do not support deprecated max_tokens but do support max_completion_tokens
604604
max_completion_tokens: 32000,
605605
}),
@@ -640,7 +640,7 @@ describe("OpenAiHandler", () => {
640640
stream: true,
641641
stream_options: { include_usage: true },
642642
reasoning_effort: "medium",
643-
temperature: 0.7,
643+
temperature: undefined,
644644
}),
645645
{},
646646
)
@@ -682,7 +682,7 @@ describe("OpenAiHandler", () => {
682682
{ role: "user", content: "Hello!" },
683683
],
684684
reasoning_effort: "medium",
685-
temperature: 0.3,
685+
temperature: undefined,
686686
// O3 models do not support deprecated max_tokens but do support max_completion_tokens
687687
max_completion_tokens: 65536, // Using default maxTokens from o3Options
688688
}),
@@ -712,7 +712,7 @@ describe("OpenAiHandler", () => {
712712

713713
expect(mockCreate).toHaveBeenCalledWith(
714714
expect.objectContaining({
715-
temperature: 0, // Default temperature
715+
temperature: undefined, // Temperature is not supported for O3 models
716716
}),
717717
{},
718718
)

src/api/providers/openai.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
8686
const deepseekReasoner = modelId.includes("deepseek-reasoner") || enabledR1Format
8787
const ark = modelUrl.includes(".volces.com")
8888

89-
if (modelId.startsWith("o3-mini")) {
89+
if (modelId.includes("o1") || modelId.includes("o3") || modelId.includes("o4")) {
9090
yield* this.handleO3FamilyMessage(modelId, systemPrompt, messages)
9191
return
9292
}
@@ -306,7 +306,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
306306
stream: true,
307307
...(isGrokXAI ? {} : { stream_options: { include_usage: true } }),
308308
reasoning_effort: modelInfo.reasoningEffort,
309-
temperature: this.options.modelTemperature ?? 0,
309+
temperature: undefined,
310310
}
311311

312312
// O3 family models do not support the deprecated max_tokens parameter
@@ -331,7 +331,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
331331
...convertToOpenAiMessages(messages),
332332
],
333333
reasoning_effort: modelInfo.reasoningEffort,
334-
temperature: this.options.modelTemperature ?? 0,
334+
temperature: undefined,
335335
}
336336

337337
// O3 family models do not support the deprecated max_tokens parameter

0 commit comments

Comments
 (0)