Skip to content

Commit 48ed76b

Browse files
committed
Merge branch 'main' into litellm-model-refresh-simple
2 parents 065c48b + 8729027 commit 48ed76b

File tree

71 files changed

+2762
-1118
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

71 files changed

+2762
-1118
lines changed

.changeset/sweet-turtles-wink.md

Lines changed: 0 additions & 5 deletions
This file was deleted.

.changeset/ten-bags-hang.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"roo-cline": patch
3+
---
4+
5+
Fix max tokens in task header

CHANGELOG.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,12 @@
11
# Roo Code Changelog
22

3+
## [3.18.2] - 2025-05-23
4+
5+
- Fix vscode-material-icons in the filer picker
6+
- Fix global settings export
7+
- Respect user-configured terminal integration timeout (thanks @KJ7LNW)
8+
- Contex condensing enhancements (thanks @SannidhyaSah)
9+
310
## [3.18.1] - 2025-05-22
411

512
- Add support for Claude Sonnet 4 and Claude Opus 4 models with thinking variants in Anthropic, Bedrock, and Vertex (thanks @shariqriazz!)

evals/packages/types/src/roo-code.ts

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -335,12 +335,14 @@ export type ProviderSettingsEntry = z.infer<typeof providerSettingsEntrySchema>
335335

336336
const genericProviderSettingsSchema = z.object({
337337
includeMaxTokens: z.boolean().optional(),
338-
reasoningEffort: reasoningEffortsSchema.optional(),
339338
diffEnabled: z.boolean().optional(),
340339
fuzzyMatchThreshold: z.number().optional(),
341340
modelTemperature: z.number().nullish(),
342341
rateLimitSeconds: z.number().optional(),
343-
// Claude 3.7 Sonnet Thinking
342+
343+
// Model reasoning.
344+
enableReasoningEffort: z.boolean().optional(),
345+
reasoningEffort: reasoningEffortsSchema.optional(),
344346
modelMaxTokens: z.number().optional(),
345347
modelMaxThinkingTokens: z.number().optional(),
346348
})

package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
"clean": "turbo clean --log-order grouped --output-logs new-only && rimraf dist out bin .vite-port .turbo",
1515
"build": "pnpm --filter roo-cline vsix",
1616
"build:nightly": "pnpm --filter @roo-code/vscode-nightly vsix",
17+
"generate-types": "pnpm --filter roo-cline generate-types",
1718
"changeset:version": "cp CHANGELOG.md src/CHANGELOG.md && changeset version && cp -vf src/CHANGELOG.md .",
1819
"knip": "pnpm --filter @roo-code/build build && knip --include files",
1920
"update-contributors": "node scripts/update-contributors.js"

src/api/__tests__/index.test.ts

Lines changed: 0 additions & 257 deletions
This file was deleted.

src/api/index.ts

Lines changed: 1 addition & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,6 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
2-
import { BetaThinkingConfigParam } from "@anthropic-ai/sdk/resources/beta/messages/index.mjs"
32

4-
import { ProviderSettings, ModelInfo, ApiHandlerOptions } from "../shared/api"
5-
import { ANTHROPIC_DEFAULT_MAX_TOKENS } from "./providers/constants"
3+
import { ProviderSettings, ModelInfo } from "../shared/api"
64
import { GlamaHandler } from "./providers/glama"
75
import { AnthropicHandler } from "./providers/anthropic"
86
import { AwsBedrockHandler } from "./providers/bedrock"
@@ -101,45 +99,3 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
10199
return new AnthropicHandler(options)
102100
}
103101
}
104-
105-
export function getModelParams({
106-
options,
107-
model,
108-
defaultMaxTokens,
109-
defaultTemperature = 0,
110-
defaultReasoningEffort,
111-
}: {
112-
options: ApiHandlerOptions
113-
model: ModelInfo
114-
defaultMaxTokens?: number
115-
defaultTemperature?: number
116-
defaultReasoningEffort?: "low" | "medium" | "high"
117-
}) {
118-
const {
119-
modelMaxTokens: customMaxTokens,
120-
modelMaxThinkingTokens: customMaxThinkingTokens,
121-
modelTemperature: customTemperature,
122-
reasoningEffort: customReasoningEffort,
123-
} = options
124-
125-
let maxTokens = model.maxTokens ?? defaultMaxTokens
126-
let thinking: BetaThinkingConfigParam | undefined = undefined
127-
let temperature = customTemperature ?? defaultTemperature
128-
const reasoningEffort = customReasoningEffort ?? defaultReasoningEffort
129-
130-
if (model.thinking) {
131-
// Only honor `customMaxTokens` for thinking models.
132-
maxTokens = customMaxTokens ?? maxTokens
133-
134-
// Clamp the thinking budget to be at most 80% of max tokens and at
135-
// least 1024 tokens.
136-
const maxBudgetTokens = Math.floor((maxTokens || ANTHROPIC_DEFAULT_MAX_TOKENS) * 0.8)
137-
const budgetTokens = Math.max(Math.min(customMaxThinkingTokens ?? maxBudgetTokens, maxBudgetTokens), 1024)
138-
thinking = { type: "enabled", budget_tokens: budgetTokens }
139-
140-
// Anthropic "Thinking" models require a temperature of 1.0.
141-
temperature = 1.0
142-
}
143-
144-
return { maxTokens, thinking, temperature, reasoningEffort }
145-
}

0 commit comments

Comments
 (0)