Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions evals/packages/types/src/roo-code.ts
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,7 @@ const genericProviderSettingsSchema = z.object({
rateLimitSeconds: z.number().optional(),

// Model reasoning.
enableReasoningEffort: z.boolean().optional(),
setReasoningEffort: z.boolean().optional(),
reasoningEffort: reasoningEffortsSchema.optional(),
modelMaxTokens: z.number().optional(),
modelMaxThinkingTokens: z.number().optional(),
Expand Down Expand Up @@ -395,7 +395,7 @@ const openAiSchema = z.object({
openAiUseAzure: z.boolean().optional(),
azureApiVersion: z.string().optional(),
openAiStreamingEnabled: z.boolean().optional(),
enableReasoningEffort: z.boolean().optional(),
setReasoningEffort: z.boolean().optional(),
openAiHostHeader: z.string().optional(), // Keep temporarily for backward compatibility during migration.
openAiHeaders: z.record(z.string(), z.string()).optional(),
})
Expand Down Expand Up @@ -663,7 +663,7 @@ const providerSettingsRecord: ProviderSettingsRecord = {
openAiUseAzure: undefined,
azureApiVersion: undefined,
openAiStreamingEnabled: undefined,
enableReasoningEffort: undefined,
setReasoningEffort: undefined,
openAiHostHeader: undefined, // Keep temporarily for backward compatibility during migration
openAiHeaders: undefined,
// Ollama
Expand Down
4 changes: 2 additions & 2 deletions packages/types/src/provider-settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ const baseProviderSettingsSchema = z.object({
rateLimitSeconds: z.number().optional(),

// Model reasoning.
enableReasoningEffort: z.boolean().optional(),
setReasoningEffort: z.boolean().optional(),
reasoningEffort: reasoningEffortsSchema.optional(),
modelMaxTokens: z.number().optional(),
modelMaxThinkingTokens: z.number().optional(),
Expand Down Expand Up @@ -331,7 +331,7 @@ export const PROVIDER_SETTINGS_KEYS = keysOf<ProviderSettings>()([
"codeIndexOpenAiKey",
"codeIndexQdrantApiKey",
// Reasoning
"enableReasoningEffort",
"setReasoningEffort",
"reasoningEffort",
"modelMaxTokens",
"modelMaxThinkingTokens",
Expand Down
4 changes: 2 additions & 2 deletions src/api/providers/__tests__/openai.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ describe("OpenAiHandler", () => {
it("should include reasoning_effort when reasoning effort is enabled", async () => {
const reasoningOptions: ApiHandlerOptions = {
...mockOptions,
enableReasoningEffort: true,
setReasoningEffort: true,
openAiCustomModelInfo: {
contextWindow: 128_000,
supportsPromptCache: false,
Expand All @@ -184,7 +184,7 @@ describe("OpenAiHandler", () => {
it("should not include reasoning_effort when reasoning effort is disabled", async () => {
const noReasoningOptions: ApiHandlerOptions = {
...mockOptions,
enableReasoningEffort: false,
setReasoningEffort: false,
openAiCustomModelInfo: { contextWindow: 128_000, supportsPromptCache: false },
}
const noReasoningHandler = new OpenAiHandler(noReasoningOptions)
Expand Down
7 changes: 4 additions & 3 deletions src/api/providers/gemini.ts
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,10 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
return {
id,
info,
thinkingConfig: this.options.modelMaxThinkingTokens
? { thinkingBudget: this.options.modelMaxThinkingTokens }
: undefined,
thinkingConfig:
this.options.setReasoningEffort && this.options.modelMaxThinkingTokens !== undefined
? { thinkingBudget: this.options.modelMaxThinkingTokens }
: undefined,
maxOutputTokens: this.options.modelMaxTokens ?? info.maxTokens ?? undefined,
}
}
Expand Down
7 changes: 4 additions & 3 deletions src/api/providers/vertex.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,10 @@ export class VertexHandler extends GeminiHandler implements SingleCompletionHand
return {
id,
info,
thinkingConfig: this.options.modelMaxThinkingTokens
? { thinkingBudget: this.options.modelMaxThinkingTokens }
: undefined,
thinkingConfig:
this.options.setReasoningEffort && this.options.modelMaxThinkingTokens !== undefined
? { thinkingBudget: this.options.modelMaxThinkingTokens }
: undefined,
maxOutputTokens: this.options.modelMaxTokens ?? info.maxTokens ?? undefined,
}
}
Expand Down
14 changes: 7 additions & 7 deletions src/api/transform/__tests__/model-params.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ describe("getModelParams", () => {
})
})

it("should handle supportsReasoningBudget with enableReasoningEffort setting", () => {
it("should handle supportsReasoningBudget with setReasoningEffort setting", () => {
const model: ModelInfo = {
...baseModel,
maxTokens: 2000,
Expand All @@ -216,7 +216,7 @@ describe("getModelParams", () => {

const result = getModelParams({
...anthropicParams,
settings: { enableReasoningEffort: true },
settings: { setReasoningEffort: true },
model,
})

Expand All @@ -228,7 +228,7 @@ describe("getModelParams", () => {
})
})

it("should not use reasoning budget when supportsReasoningBudget is true but enableReasoningEffort is false", () => {
it("should not use reasoning budget when supportsReasoningBudget is true but setReasoningEffort is false", () => {
const model: ModelInfo = {
...baseModel,
maxTokens: 2000,
Expand All @@ -237,7 +237,7 @@ describe("getModelParams", () => {

const result = getModelParams({
...anthropicParams,
settings: { enableReasoningEffort: false },
settings: { setReasoningEffort: false },
model,
})

Expand Down Expand Up @@ -537,7 +537,7 @@ describe("getModelParams", () => {
it("should keep model maxTokens for hybrid models when using reasoning budget", () => {
const result = getModelParams({
...anthropicParams,
settings: { enableReasoningEffort: true },
settings: { setReasoningEffort: true },
model,
})

Expand All @@ -560,7 +560,7 @@ describe("getModelParams", () => {
// Only reasoning budget should be used (takes precedence)
const result = getModelParams({
...anthropicParams,
settings: { enableReasoningEffort: true },
settings: { setReasoningEffort: true },
model,
})

Expand Down Expand Up @@ -645,7 +645,7 @@ describe("getModelParams", () => {
const result = getModelParams({
...anthropicParams,
settings: {
enableReasoningEffort: true,
setReasoningEffort: true,
modelMaxTokens: 20000,
modelMaxThinkingTokens: 10000,
modelTemperature: 0.8,
Expand Down
18 changes: 9 additions & 9 deletions src/api/transform/__tests__/reasoning.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ describe("reasoning.ts", () => {
}

const settingsWithEnabled: ProviderSettings = {
enableReasoningEffort: true,
setReasoningEffort: true,
}

const options = {
Expand Down Expand Up @@ -108,7 +108,7 @@ describe("reasoning.ts", () => {
}

const settingsWithBoth: ProviderSettings = {
enableReasoningEffort: true,
setReasoningEffort: true,
reasoningEffort: "low",
}

Expand Down Expand Up @@ -189,14 +189,14 @@ describe("reasoning.ts", () => {
expect(result).toEqual({ max_tokens: 0 })
})

it("should not use reasoning budget when supportsReasoningBudget is true but enableReasoningEffort is false", () => {
it("should not use reasoning budget when supportsReasoningBudget is true but setReasoningEffort is false", () => {
const modelWithSupported: ModelInfo = {
...baseModel,
supportsReasoningBudget: true,
}

const settingsWithDisabled: ProviderSettings = {
enableReasoningEffort: false,
setReasoningEffort: false,
}

const options = {
Expand Down Expand Up @@ -252,7 +252,7 @@ describe("reasoning.ts", () => {
}

const settingsWithEnabled: ProviderSettings = {
enableReasoningEffort: true,
setReasoningEffort: true,
}

const options = {
Expand All @@ -274,14 +274,14 @@ describe("reasoning.ts", () => {
expect(result).toBeUndefined()
})

it("should return undefined when supportsReasoningBudget is true but enableReasoningEffort is false", () => {
it("should return undefined when supportsReasoningBudget is true but setReasoningEffort is false", () => {
const modelWithSupported: ModelInfo = {
...baseModel,
supportsReasoningBudget: true,
}

const settingsWithDisabled: ProviderSettings = {
enableReasoningEffort: false,
setReasoningEffort: false,
}

const options = {
Expand Down Expand Up @@ -513,7 +513,7 @@ describe("reasoning.ts", () => {
}

const settingsWithEnabled: ProviderSettings = {
enableReasoningEffort: true,
setReasoningEffort: true,
}

const options = {
Expand Down Expand Up @@ -583,7 +583,7 @@ describe("reasoning.ts", () => {
}

const settingsWithBoth: ProviderSettings = {
enableReasoningEffort: true,
setReasoningEffort: true,
reasoningEffort: "high",
}

Expand Down
4 changes: 2 additions & 2 deletions src/api/transform/model-params.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import type { ModelInfo, ProviderSettings } from "@roo-code/types"

import { ANTHROPIC_DEFAULT_MAX_TOKENS } from "../providers/constants"
import { shouldUseReasoningBudget, shouldUseReasoningEffort } from "../../shared/api"
import { shouldSetReasoningBudget, shouldUseReasoningEffort } from "../../shared/api"

import {
type AnthropicReasoningParams,
Expand Down Expand Up @@ -67,7 +67,7 @@ export function getModelParams({
let reasoningBudget: ModelParams["reasoningBudget"] = undefined
let reasoningEffort: ModelParams["reasoningEffort"] = undefined

if (shouldUseReasoningBudget({ model, settings })) {
if (shouldSetReasoningBudget({ model, settings })) {
// "Hybrid" reasoning models use the `reasoningBudget` parameter.
maxTokens = customMaxTokens ?? maxTokens

Expand Down
6 changes: 3 additions & 3 deletions src/api/transform/reasoning.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ import OpenAI from "openai"

import type { ModelInfo, ProviderSettings } from "@roo-code/types"

import { shouldUseReasoningBudget, shouldUseReasoningEffort } from "../../shared/api"
import { shouldSetReasoningBudget, shouldUseReasoningEffort } from "../../shared/api"

type ReasoningEffort = "low" | "medium" | "high"

Expand All @@ -30,7 +30,7 @@ export const getOpenRouterReasoning = ({
reasoningEffort,
settings,
}: GetModelReasoningOptions): OpenRouterReasoningParams | undefined =>
shouldUseReasoningBudget({ model, settings })
shouldSetReasoningBudget({ model, settings })
? { max_tokens: reasoningBudget }
: shouldUseReasoningEffort({ model, settings })
? { effort: reasoningEffort }
Expand All @@ -41,7 +41,7 @@ export const getAnthropicReasoning = ({
reasoningBudget,
settings,
}: GetModelReasoningOptions): AnthropicReasoningParams | undefined =>
shouldUseReasoningBudget({ model, settings }) ? { type: "enabled", budget_tokens: reasoningBudget! } : undefined
shouldSetReasoningBudget({ model, settings }) ? { type: "enabled", budget_tokens: reasoningBudget! } : undefined

export const getOpenAiReasoning = ({
model,
Expand Down
26 changes: 26 additions & 0 deletions src/core/config/ProviderSettingsManager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ export const providerProfilesSchema = z.object({
rateLimitSecondsMigrated: z.boolean().optional(),
diffSettingsMigrated: z.boolean().optional(),
openAiHeadersMigrated: z.boolean().optional(),
manualThinkingBudgetMigrated: z.boolean().optional(),
})
.optional(),
})
Expand All @@ -48,6 +49,7 @@ export class ProviderSettingsManager {
rateLimitSecondsMigrated: true, // Mark as migrated on fresh installs
diffSettingsMigrated: true, // Mark as migrated on fresh installs
openAiHeadersMigrated: true, // Mark as migrated on fresh installs
manualThinkingBudgetMigrated: true, // Mark as migrated on fresh installs
},
}

Expand Down Expand Up @@ -113,6 +115,7 @@ export class ProviderSettingsManager {
rateLimitSecondsMigrated: false,
diffSettingsMigrated: false,
openAiHeadersMigrated: false,
manualThinkingBudgetMigrated: false,
} // Initialize with default values
isDirty = true
}
Expand All @@ -135,6 +138,12 @@ export class ProviderSettingsManager {
isDirty = true
}

if (!providerProfiles.migrations.manualThinkingBudgetMigrated) {
await this.migrateManualThinkingBudget(providerProfiles)
providerProfiles.migrations.manualThinkingBudgetMigrated = true
isDirty = true
}

if (isDirty) {
await this.store(providerProfiles)
}
Expand Down Expand Up @@ -228,6 +237,23 @@ export class ProviderSettingsManager {
}
}

private async migrateManualThinkingBudget(providerProfiles: ProviderProfiles) {
try {
for (const [_name, apiConfig] of Object.entries(providerProfiles.apiConfigs)) {
// For existing users who have modelMaxThinkingTokens set, enable manual control
// This maintains backward compatibility - if they were manually setting thinking tokens,
// they should continue to have manual control enabled
if (apiConfig.modelMaxThinkingTokens !== undefined && apiConfig.setReasoningEffort === undefined) {
apiConfig.setReasoningEffort = true
}
// For new users or existing users without thinking tokens set,
// default to false (automatic mode) - this is handled by the UI component's default logic
}
} catch (error) {
console.error(`[MigrateManualThinkingBudget] Failed to migrate manual thinking budget settings:`, error)
}
}

/**
* List all available configs with metadata.
*/
Expand Down
Loading
Loading