Skip to content

Commit 2075f26

Browse files
feat: Add Reasoning Effort setting for OpenAI Compatible provider (#2906)
1 parent 21ea433 commit 2075f26

File tree

27 files changed

+318
-28
lines changed

27 files changed

+318
-28
lines changed

.changeset/odd-ligers-press.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"roo-cline": minor
3+
---
4+
5+
Add Reasoning Effort setting for OpenAI Compatible provider

package-lock.json

Lines changed: 11 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -469,6 +469,7 @@
469469
"@types/jest": "^29.5.14",
470470
"@types/mocha": "^10.0.10",
471471
"@types/node": "20.x",
472+
"@types/node-cache": "^4.1.3",
472473
"@types/node-ipc": "^9.2.3",
473474
"@types/string-similarity": "^4.0.2",
474475
"@typescript-eslint/eslint-plugin": "^7.14.1",

src/api/providers/__tests__/openai.test.ts

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
// npx jest src/api/providers/__tests__/openai.test.ts
2+
13
import { OpenAiHandler } from "../openai"
24
import { ApiHandlerOptions } from "../../../shared/api"
35
import { Anthropic } from "@anthropic-ai/sdk"
@@ -155,6 +157,39 @@ describe("OpenAiHandler", () => {
155157
expect(textChunks).toHaveLength(1)
156158
expect(textChunks[0].text).toBe("Test response")
157159
})
160+
it("should include reasoning_effort when reasoning effort is enabled", async () => {
161+
const reasoningOptions: ApiHandlerOptions = {
162+
...mockOptions,
163+
enableReasoningEffort: true,
164+
openAiCustomModelInfo: { contextWindow: 128_000, supportsPromptCache: false, reasoningEffort: "high" },
165+
}
166+
const reasoningHandler = new OpenAiHandler(reasoningOptions)
167+
const stream = reasoningHandler.createMessage(systemPrompt, messages)
168+
// Consume the stream to trigger the API call
169+
for await (const _chunk of stream) {
170+
}
171+
// Assert the mockCreate was called with reasoning_effort
172+
expect(mockCreate).toHaveBeenCalled()
173+
const callArgs = mockCreate.mock.calls[0][0]
174+
expect(callArgs.reasoning_effort).toBe("high")
175+
})
176+
177+
it("should not include reasoning_effort when reasoning effort is disabled", async () => {
178+
const noReasoningOptions: ApiHandlerOptions = {
179+
...mockOptions,
180+
enableReasoningEffort: false,
181+
openAiCustomModelInfo: { contextWindow: 128_000, supportsPromptCache: false },
182+
}
183+
const noReasoningHandler = new OpenAiHandler(noReasoningOptions)
184+
const stream = noReasoningHandler.createMessage(systemPrompt, messages)
185+
// Consume the stream to trigger the API call
186+
for await (const _chunk of stream) {
187+
}
188+
// Assert the mockCreate was called without reasoning_effort
189+
expect(mockCreate).toHaveBeenCalled()
190+
const callArgs = mockCreate.mock.calls[0][0]
191+
expect(callArgs.reasoning_effort).toBeUndefined()
192+
})
158193
})
159194

160195
describe("error handling", () => {

src/exports/roo-code.d.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,7 @@ type ProviderSettings = {
8787
openAiUseAzure?: boolean | undefined
8888
azureApiVersion?: string | undefined
8989
openAiStreamingEnabled?: boolean | undefined
90+
enableReasoningEffort?: boolean | undefined
9091
ollamaModelId?: string | undefined
9192
ollamaBaseUrl?: string | undefined
9293
vsCodeLmModelSelector?:

src/exports/types.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@ type ProviderSettings = {
8888
openAiUseAzure?: boolean | undefined
8989
azureApiVersion?: string | undefined
9090
openAiStreamingEnabled?: boolean | undefined
91+
enableReasoningEffort?: boolean | undefined
9192
ollamaModelId?: string | undefined
9293
ollamaBaseUrl?: string | undefined
9394
vsCodeLmModelSelector?:

src/schemas/index.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -355,6 +355,7 @@ export const providerSettingsSchema = z.object({
355355
openAiUseAzure: z.boolean().optional(),
356356
azureApiVersion: z.string().optional(),
357357
openAiStreamingEnabled: z.boolean().optional(),
358+
enableReasoningEffort: z.boolean().optional(),
358359
// Ollama
359360
ollamaModelId: z.string().optional(),
360361
ollamaBaseUrl: z.string().optional(),
@@ -453,6 +454,7 @@ const providerSettingsRecord: ProviderSettingsRecord = {
453454
openAiUseAzure: undefined,
454455
azureApiVersion: undefined,
455456
openAiStreamingEnabled: undefined,
457+
enableReasoningEffort: undefined,
456458
// Ollama
457459
ollamaModelId: undefined,
458460
ollamaBaseUrl: undefined,

src/utils/__tests__/enhance-prompt.test.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ describe("enhancePrompt", () => {
1313
apiProvider: "openai",
1414
openAiApiKey: "test-key",
1515
openAiBaseUrl: "https://api.openai.com/v1",
16+
enableReasoningEffort: false,
1617
}
1718

1819
beforeEach(() => {
@@ -97,6 +98,7 @@ describe("enhancePrompt", () => {
9798
apiProvider: "openrouter",
9899
openRouterApiKey: "test-key",
99100
openRouterModelId: "test-model",
101+
enableReasoningEffort: false,
100102
}
101103

102104
// Mock successful enhancement

webview-ui/src/components/settings/ApiOptions.tsx

Lines changed: 42 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,12 @@
11
import React, { memo, useCallback, useEffect, useMemo, useState } from "react"
2-
import { useAppTranslation } from "@/i18n/TranslationContext"
3-
import { Trans } from "react-i18next"
4-
import { getRequestyAuthUrl, getOpenRouterAuthUrl, getGlamaAuthUrl } from "@src/oauth/urls"
52
import { useDebounce, useEvent } from "react-use"
3+
import { Trans } from "react-i18next"
64
import { LanguageModelChatSelector } from "vscode"
75
import { Checkbox } from "vscrui"
86
import { VSCodeLink, VSCodeRadio, VSCodeRadioGroup, VSCodeTextField } from "@vscode/webview-ui-toolkit/react"
97
import { ExternalLinkIcon } from "@radix-ui/react-icons"
108

9+
import { ReasoningEffort as ReasoningEffortType } from "@roo/schemas"
1110
import {
1211
ApiConfiguration,
1312
ModelInfo,
@@ -21,21 +20,22 @@ import {
2120
ApiProvider,
2221
} from "@roo/shared/api"
2322
import { ExtensionMessage } from "@roo/shared/ExtensionMessage"
24-
import { AWS_REGIONS } from "@roo/shared/aws_regions"
2523

2624
import { vscode } from "@src/utils/vscode"
2725
import { validateApiConfiguration, validateModelId, validateBedrockArn } from "@src/utils/validate"
28-
import { useRouterModels } from "@/components/ui/hooks/useRouterModels"
29-
import { useSelectedModel } from "@/components/ui/hooks/useSelectedModel"
26+
import { useAppTranslation } from "@src/i18n/TranslationContext"
27+
import { useRouterModels } from "@src/components/ui/hooks/useRouterModels"
28+
import { useSelectedModel } from "@src/components/ui/hooks/useSelectedModel"
3029
import {
3130
useOpenRouterModelProviders,
3231
OPENROUTER_DEFAULT_PROVIDER_NAME,
3332
} from "@src/components/ui/hooks/useOpenRouterModelProviders"
3433
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue, Button } from "@src/components/ui"
34+
import { getRequestyAuthUrl, getOpenRouterAuthUrl, getGlamaAuthUrl } from "@src/oauth/urls"
3535

3636
import { VSCodeButtonLink } from "../common/VSCodeButtonLink"
3737

38-
import { MODELS_BY_PROVIDER, PROVIDERS, VERTEX_REGIONS, REASONING_MODELS } from "./constants"
38+
import { MODELS_BY_PROVIDER, PROVIDERS, VERTEX_REGIONS, REASONING_MODELS, AWS_REGIONS } from "./constants"
3939
import { ModelInfoView } from "./ModelInfoView"
4040
import { ModelPicker } from "./ModelPicker"
4141
import { ApiErrorMessage } from "./ApiErrorMessage"
@@ -851,6 +851,41 @@ const ApiOptions = ({
851851
)}
852852
</div>
853853

854+
<div className="flex flex-col gap-1">
855+
<Checkbox
856+
checked={apiConfiguration.enableReasoningEffort ?? false}
857+
onChange={(checked: boolean) => {
858+
setApiConfigurationField("enableReasoningEffort", checked)
859+
860+
if (!checked) {
861+
const { reasoningEffort: _, ...openAiCustomModelInfo } =
862+
apiConfiguration.openAiCustomModelInfo || openAiModelInfoSaneDefaults
863+
864+
setApiConfigurationField("openAiCustomModelInfo", openAiCustomModelInfo)
865+
}
866+
}}>
867+
{t("settings:providers.setReasoningLevel")}
868+
</Checkbox>
869+
{!!apiConfiguration.enableReasoningEffort && (
870+
<ReasoningEffort
871+
apiConfiguration={{
872+
...apiConfiguration,
873+
reasoningEffort: apiConfiguration.openAiCustomModelInfo?.reasoningEffort,
874+
}}
875+
setApiConfigurationField={(field, value) => {
876+
if (field === "reasoningEffort") {
877+
const openAiCustomModelInfo =
878+
apiConfiguration.openAiCustomModelInfo || openAiModelInfoSaneDefaults
879+
880+
setApiConfigurationField("openAiCustomModelInfo", {
881+
...openAiCustomModelInfo,
882+
reasoningEffort: value as ReasoningEffortType,
883+
})
884+
}
885+
}}
886+
/>
887+
)}
888+
</div>
854889
<div className="flex flex-col gap-3">
855890
<div className="text-sm text-vscode-descriptionForeground whitespace-pre-line">
856891
{t("settings:providers.customModel.capabilities")}

0 commit comments

Comments
 (0)