Skip to content

Commit d5ee39f

Browse files
ctemrubens
authored andcommitted
Add model reasoning effort
1 parent 31c855e commit d5ee39f

File tree

6 files changed

+40
-40
lines changed

6 files changed

+40
-40
lines changed

src/api/providers/constants.ts

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,12 @@
1+
export const DEFAULT_HEADERS = {
2+
"HTTP-Referer": "https://github.com/RooVetGit/Roo-Cline",
3+
"X-Title": "Roo Code",
4+
}
5+
16
export const ANTHROPIC_DEFAULT_MAX_TOKENS = 8192
27

38
export const DEEP_SEEK_DEFAULT_TEMPERATURE = 0.6
9+
10+
export const AZURE_AI_INFERENCE_PATH = "/models/chat/completions"
11+
12+
export const REASONING_MODELS = new Set(["x-ai/grok-3-mini-beta", "grok-3-mini-beta", "grok-3-mini-fast-beta"])

src/api/providers/openai.ts

Lines changed: 4 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -15,17 +15,10 @@ import { convertToSimpleMessages } from "../transform/simple-format"
1515
import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
1616
import { BaseProvider } from "./base-provider"
1717
import { XmlMatcher } from "../../utils/xml-matcher"
18-
import { DEEP_SEEK_DEFAULT_TEMPERATURE } from "./constants"
19-
20-
export const defaultHeaders = {
21-
"HTTP-Referer": "https://github.com/RooVetGit/Roo-Cline",
22-
"X-Title": "Roo Code",
23-
}
18+
import { DEEP_SEEK_DEFAULT_TEMPERATURE, DEFAULT_HEADERS, AZURE_AI_INFERENCE_PATH } from "./constants"
2419

2520
export interface OpenAiHandlerOptions extends ApiHandlerOptions {}
2621

27-
const AZURE_AI_INFERENCE_PATH = "/models/chat/completions"
28-
2922
export class OpenAiHandler extends BaseProvider implements SingleCompletionHandler {
3023
protected options: OpenAiHandlerOptions
3124
private client: OpenAI
@@ -45,7 +38,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
4538
this.client = new OpenAI({
4639
baseURL,
4740
apiKey,
48-
defaultHeaders,
41+
defaultHeaders: DEFAULT_HEADERS,
4942
defaultQuery: { "api-version": this.options.azureApiVersion || "2024-05-01-preview" },
5043
})
5144
} else if (isAzureOpenAi) {
@@ -56,7 +49,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
5649
apiKey,
5750
apiVersion: this.options.azureApiVersion || azureOpenAiDefaultApiVersion,
5851
defaultHeaders: {
59-
...defaultHeaders,
52+
...DEFAULT_HEADERS,
6053
...(this.options.openAiHostHeader ? { Host: this.options.openAiHostHeader } : {}),
6154
},
6255
})
@@ -65,7 +58,7 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
6558
baseURL,
6659
apiKey,
6760
defaultHeaders: {
68-
...defaultHeaders,
61+
...DEFAULT_HEADERS,
6962
...(this.options.openAiHostHeader ? { Host: this.options.openAiHostHeader } : {}),
7063
},
7164
})

src/api/providers/openrouter.ts

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,9 @@ import { convertToOpenAiMessages } from "../transform/openai-format"
99
import { ApiStreamChunk, ApiStreamUsageChunk } from "../transform/stream"
1010
import { convertToR1Format } from "../transform/r1-format"
1111

12-
import { DEEP_SEEK_DEFAULT_TEMPERATURE } from "./constants"
12+
import { DEFAULT_HEADERS, DEEP_SEEK_DEFAULT_TEMPERATURE } from "./constants"
1313
import { getModelParams, SingleCompletionHandler } from ".."
1414
import { BaseProvider } from "./base-provider"
15-
import { defaultHeaders } from "./openai"
1615

1716
const OPENROUTER_DEFAULT_PROVIDER_NAME = "[default]"
1817

@@ -40,7 +39,7 @@ export class OpenRouterHandler extends BaseProvider implements SingleCompletionH
4039
const baseURL = this.options.openRouterBaseUrl || "https://openrouter.ai/api/v1"
4140
const apiKey = this.options.openRouterApiKey ?? "not-provided"
4241

43-
this.client = new OpenAI({ baseURL, apiKey, defaultHeaders })
42+
this.client = new OpenAI({ baseURL, apiKey, defaultHeaders: DEFAULT_HEADERS })
4443
}
4544

4645
override async *createMessage(

src/api/providers/xai.ts

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,10 @@ import { Anthropic } from "@anthropic-ai/sdk"
22
import OpenAI from "openai"
33
import { ApiHandlerOptions, XAIModelId, ModelInfo, xaiDefaultModelId, xaiModels } from "../../shared/api"
44
import { ApiStream } from "../transform/stream"
5-
import { BaseProvider } from "./base-provider"
65
import { convertToOpenAiMessages } from "../transform/openai-format"
6+
import { DEFAULT_HEADERS, REASONING_MODELS } from "./constants"
7+
import { BaseProvider } from "./base-provider"
78
import { SingleCompletionHandler } from ".."
8-
import { ChatCompletionReasoningEffort } from "openai/resources/chat/completions.mjs"
9-
import { defaultHeaders } from "./openai"
109

1110
export class XAIHandler extends BaseProvider implements SingleCompletionHandler {
1211
protected options: ApiHandlerOptions
@@ -18,35 +17,29 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler
1817
this.client = new OpenAI({
1918
baseURL: "https://api.x.ai/v1",
2019
apiKey: this.options.xaiApiKey ?? "not-provided",
21-
defaultHeaders: defaultHeaders,
20+
defaultHeaders: DEFAULT_HEADERS,
2221
})
2322
}
2423

25-
override getModel(): { id: string; info: ModelInfo } {
24+
override getModel() {
2625
const modelId = this.options.apiModelId
26+
2727
if (modelId && modelId in xaiModels) {
2828
const id = modelId as XAIModelId
2929
return { id, info: xaiModels[id] }
3030
}
31+
3132
return {
3233
id: xaiDefaultModelId,
3334
info: xaiModels[xaiDefaultModelId],
35+
reasoningEffort: REASONING_MODELS.has(xaiDefaultModelId) ? this.options.reasoningEffort : undefined,
3436
}
3537
}
3638

3739
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
38-
const modelId = this.getModel().id
39-
const modelInfo = this.getModel().info
40+
const { id: modelId, info: modelInfo, reasoningEffort } = this.getModel()
4041

41-
// Special handling for Grok-3-mini models which support reasoning_effort
42-
let reasoningEffort: ChatCompletionReasoningEffort | undefined
43-
if (modelId.includes("3-mini") && this.options.reasoningEffort) {
44-
if (["low", "high"].includes(this.options.reasoningEffort)) {
45-
reasoningEffort = this.options.reasoningEffort as ChatCompletionReasoningEffort
46-
}
47-
}
48-
49-
// Use the OpenAI-compatible API
42+
// Use the OpenAI-compatible API.
5043
const stream = await this.client.chat.completions.create({
5144
model: modelId,
5245
max_tokens: modelInfo.maxTokens,
@@ -59,6 +52,7 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler
5952

6053
for await (const chunk of stream) {
6154
const delta = chunk.choices[0]?.delta
55+
6256
if (delta?.content) {
6357
yield {
6458
type: "text",
@@ -78,7 +72,7 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler
7872
type: "usage",
7973
inputTokens: chunk.usage.prompt_tokens || 0,
8074
outputTokens: chunk.usage.completion_tokens || 0,
81-
// X.AI might include these fields in the future, handle them if present
75+
// X.AI might include these fields in the future, handle them if present.
8276
cacheReadTokens:
8377
"cache_read_input_tokens" in chunk.usage ? (chunk.usage as any).cache_read_input_tokens : 0,
8478
cacheWriteTokens:
@@ -91,16 +85,21 @@ export class XAIHandler extends BaseProvider implements SingleCompletionHandler
9185
}
9286

9387
async completePrompt(prompt: string): Promise<string> {
88+
const { id: modelId, reasoningEffort } = this.getModel()
89+
9490
try {
9591
const response = await this.client.chat.completions.create({
96-
model: this.getModel().id,
92+
model: modelId,
9793
messages: [{ role: "user", content: prompt }],
94+
...(reasoningEffort ? { reasoning_effort: reasoningEffort } : {}),
9895
})
96+
9997
return response.choices[0]?.message.content || ""
10098
} catch (error) {
10199
if (error instanceof Error) {
102100
throw new Error(`xAI completion error: ${error.message}`)
103101
}
102+
104103
throw error
105104
}
106105
}

webview-ui/src/components/settings/ApiOptions.tsx

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1562,13 +1562,6 @@ const ApiOptions = ({
15621562
</div>
15631563
)}
15641564

1565-
{selectedProvider === "openrouter" && REASONING_MODELS.has(selectedModelId) && (
1566-
<ReasoningEffort
1567-
apiConfiguration={apiConfiguration}
1568-
setApiConfigurationField={setApiConfigurationField}
1569-
/>
1570-
)}
1571-
15721565
{selectedProvider === "glama" && (
15731566
<ModelPicker
15741567
apiConfiguration={apiConfiguration}
@@ -1713,6 +1706,13 @@ const ApiOptions = ({
17131706
</>
17141707
)}
17151708

1709+
{REASONING_MODELS.has(selectedModelId) && (
1710+
<ReasoningEffort
1711+
apiConfiguration={apiConfiguration}
1712+
setApiConfigurationField={setApiConfigurationField}
1713+
/>
1714+
)}
1715+
17161716
{!fromWelcomeView && (
17171717
<>
17181718
<DiffSettingsControl

webview-ui/src/components/settings/constants.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,4 +50,4 @@ export const VERTEX_REGIONS = [
5050
{ value: "asia-southeast1", label: "asia-southeast1" },
5151
]
5252

53-
export const REASONING_MODELS = new Set(["x-ai/grok-3-mini-beta"])
53+
export const REASONING_MODELS = new Set(["x-ai/grok-3-mini-beta", "grok-3-mini-beta", "grok-3-mini-fast-beta"])

0 commit comments

Comments
 (0)