Skip to content

Commit 65257da

Browse files
authored
Merge pull request #704 from RooVetGit/o3_reasoning_effort
Add o3-mini-high and o3-mini-low
2 parents 7d6cce6 + d047c8b commit 65257da

File tree

4 files changed

+41
-3
lines changed

4 files changed

+41
-3
lines changed

.changeset/kind-balloons-grin.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"roo-cline": patch
3+
---
4+
5+
Add o3-mini-high and o3-mini-low

src/api/providers/__tests__/openai-native.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ describe("OpenAiNativeHandler", () => {
300300
expect(mockCreate).toHaveBeenCalledWith({
301301
model: "o3-mini",
302302
messages: [{ role: "user", content: "Test prompt" }],
303-
temperature: 0,
303+
reasoning_effort: "medium",
304304
})
305305
})
306306

src/api/providers/openai-native.ts

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,12 +48,15 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
4848
}
4949
break
5050
}
51-
case "o3-mini": {
51+
case "o3-mini":
52+
case "o3-mini-low":
53+
case "o3-mini-high": {
5254
const stream = await this.client.chat.completions.create({
53-
model: this.getModel().id,
55+
model: "o3-mini",
5456
messages: [{ role: "developer", content: systemPrompt }, ...convertToOpenAiMessages(messages)],
5557
stream: true,
5658
stream_options: { include_usage: true },
59+
reasoning_effort: this.getModel().info.reasoningEffort,
5760
})
5861

5962
for await (const chunk of stream) {
@@ -132,6 +135,16 @@ export class OpenAiNativeHandler implements ApiHandler, SingleCompletionHandler
132135
messages: [{ role: "user", content: prompt }],
133136
}
134137
break
138+
case "o3-mini":
139+
case "o3-mini-low":
140+
case "o3-mini-high":
141+
// o3 doesn't support non-1 temp
142+
requestOptions = {
143+
model: "o3-mini",
144+
messages: [{ role: "user", content: prompt }],
145+
reasoning_effort: this.getModel().info.reasoningEffort,
146+
}
147+
break
135148
default:
136149
requestOptions = {
137150
model: modelId,

src/shared/api.ts

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@ export interface ModelInfo {
8080
cacheWritesPrice?: number
8181
cacheReadsPrice?: number
8282
description?: string
83+
reasoningEffort?: "low" | "medium" | "high"
8384
}
8485

8586
// Anthropic
@@ -517,6 +518,25 @@ export const openAiNativeModels = {
517518
supportsPromptCache: false,
518519
inputPrice: 1.1,
519520
outputPrice: 4.4,
521+
reasoningEffort: "medium",
522+
},
523+
"o3-mini-high": {
524+
maxTokens: 100_000,
525+
contextWindow: 200_000,
526+
supportsImages: false,
527+
supportsPromptCache: false,
528+
inputPrice: 1.1,
529+
outputPrice: 4.4,
530+
reasoningEffort: "high",
531+
},
532+
"o3-mini-low": {
533+
maxTokens: 100_000,
534+
contextWindow: 200_000,
535+
supportsImages: false,
536+
supportsPromptCache: false,
537+
inputPrice: 1.1,
538+
outputPrice: 4.4,
539+
reasoningEffort: "low",
520540
},
521541
o1: {
522542
maxTokens: 100_000,

0 commit comments

Comments
 (0)