Skip to content

Commit d9ba9a9

Browse files
committed
Merge remote-tracking branch 'upstream/main' into update-io-intelligence-getmodels
# Conflicts: # src/core/webview/__tests__/ClineProvider.spec.ts # src/core/webview/__tests__/webviewMessageHandler.spec.ts
2 parents ef3749a + 98b8d5b commit d9ba9a9

File tree

124 files changed

+2075
-299
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

124 files changed

+2075
-299
lines changed

CHANGELOG.md

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,32 @@
11
# Roo Code Changelog
22

3+
## [3.29.0] - 2025-10-24
4+
5+
![3.29.0 Release - Intelligent File Reading](/releases/3.29.0-release.png)
6+
7+
- Add token-budget based file reading with intelligent preview to avoid context overruns (thanks @daniel-lxs!)
8+
- Enable browser-use tool for all image-capable models (#8116 by @hannesrudolph, PR by @app/roomote!)
9+
- Add dynamic model loading for Roo Code Cloud provider (thanks @app/roomote!)
10+
- Fix: Respect nested .gitignore files in search_files (#7921 by @hannesrudolph, PR by @daniel-lxs)
11+
- Fix: Preserve trailing newlines in stripLineNumbers for apply_diff (#8020 by @liyi3c, PR by @app/roomote)
12+
- Fix: Exclude max tokens field for models that don't support it in export (#7944 by @hannesrudolph, PR by @elianiva)
13+
- Retry API requests on stream failures instead of aborting task (thanks @daniel-lxs!)
14+
- Improve auto-approve button responsiveness (thanks @daniel-lxs!)
15+
- Add checkpoint initialization timeout settings and fix checkpoint timeout warnings (#7843 by @NaccOll, PR by @NaccOll)
16+
- Always show checkpoint restore options regardless of change detection (thanks @daniel-lxs!)
17+
- Improve checkpoint menu translations (thanks @daniel-lxs!)
18+
- Add GLM-4.6-turbo model to chutes ai provider (thanks @mohammad154!)
19+
- Add Claude Haiku 4.5 to prompt caching models (thanks @hannesrudolph!)
20+
- Expand Z.ai model coverage with GLM-4.5-X, AirX, Flash (thanks @hannesrudolph!)
21+
- Update Mistral Medium model name (#8362 by @ThomsenDrake, PR by @ThomsenDrake)
22+
- Remove GPT-5 instructions/reasoning_summary from UI message metadata to prevent ui_messages.json bloat (thanks @hannesrudolph!)
23+
- Normalize docs-extractor audience tags; remove admin/stakeholder; strip tool invocations (thanks @hannesrudolph!)
24+
- Update X/Twitter username from roo_code to roocode (thanks @app/roomote!)
25+
- Update Configuring Profiles video link (thanks @app/roomote!)
26+
- Fix link text for Roomote Control in README (thanks @laz-001!)
27+
- Remove verbose error for cloud agents (thanks @cte!)
28+
- Try 5s status mutation timeout (thanks @cte!)
29+
330
## [3.28.18] - 2025-10-17
431

532
- Fix: Remove request content from UI messages to improve performance and reduce clutter (#5601 by @MuriloFP, #8594 by @multivac2x, #8690 by @hannesrudolph, PR by @mrubens)

packages/types/src/global-settings.ts

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,21 @@ export const DEFAULT_WRITE_DELAY_MS = 1000
2929
*/
3030
export const DEFAULT_TERMINAL_OUTPUT_CHARACTER_LIMIT = 50_000
3131

32+
/**
33+
* Minimum checkpoint timeout in seconds.
34+
*/
35+
export const MIN_CHECKPOINT_TIMEOUT_SECONDS = 10
36+
37+
/**
38+
* Maximum checkpoint timeout in seconds.
39+
*/
40+
export const MAX_CHECKPOINT_TIMEOUT_SECONDS = 60
41+
42+
/**
43+
* Default checkpoint timeout in seconds.
44+
*/
45+
export const DEFAULT_CHECKPOINT_TIMEOUT_SECONDS = 15
46+
3247
/**
3348
* GlobalSettings
3449
*/
@@ -97,6 +112,12 @@ export const globalSettingsSchema = z.object({
97112
cachedChromeHostUrl: z.string().optional(),
98113

99114
enableCheckpoints: z.boolean().optional(),
115+
checkpointTimeout: z
116+
.number()
117+
.int()
118+
.min(MIN_CHECKPOINT_TIMEOUT_SECONDS)
119+
.max(MAX_CHECKPOINT_TIMEOUT_SECONDS)
120+
.optional(),
100121

101122
ttsEnabled: z.boolean().optional(),
102123
ttsSpeed: z.number().optional(),

packages/types/src/provider-settings.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ import {
1919
moonshotModels,
2020
openAiNativeModels,
2121
qwenCodeModels,
22-
rooModels,
2322
sambaNovaModels,
2423
vertexModels,
2524
vscodeLlmModels,
@@ -49,6 +48,7 @@ export const dynamicProviders = [
4948
"requesty",
5049
"unbound",
5150
"glama",
51+
"roo",
5252
] as const
5353

5454
export type DynamicProvider = (typeof dynamicProviders)[number]
@@ -677,7 +677,7 @@ export const MODELS_BY_PROVIDER: Record<
677677
models: Object.keys(openAiNativeModels),
678678
},
679679
"qwen-code": { id: "qwen-code", label: "Qwen Code", models: Object.keys(qwenCodeModels) },
680-
roo: { id: "roo", label: "Roo", models: Object.keys(rooModels) },
680+
roo: { id: "roo", label: "Roo Code Cloud", models: [] },
681681
sambanova: {
682682
id: "sambanova",
683683
label: "SambaNova",

packages/types/src/providers/mistral.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,9 @@ export const mistralDefaultModelId: MistralModelId = "codestral-latest"
77

88
export const mistralModels = {
99
"magistral-medium-latest": {
10-
maxTokens: 41_000,
11-
contextWindow: 41_000,
12-
supportsImages: false,
10+
maxTokens: 8192,
11+
contextWindow: 128_000,
12+
supportsImages: true,
1313
supportsPromptCache: false,
1414
inputPrice: 2.0,
1515
outputPrice: 5.0,
Lines changed: 47 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -1,53 +1,49 @@
1+
import { z } from "zod"
2+
13
import type { ModelInfo } from "../model.js"
24

3-
export type RooModelId =
4-
| "xai/grok-code-fast-1"
5-
| "roo/code-supernova-1-million"
6-
| "xai/grok-4-fast"
7-
| "deepseek/deepseek-chat-v3.1"
8-
9-
export const rooDefaultModelId: RooModelId = "xai/grok-code-fast-1"
10-
11-
export const rooModels = {
12-
"xai/grok-code-fast-1": {
13-
maxTokens: 16_384,
14-
contextWindow: 262_144,
15-
supportsImages: false,
16-
supportsPromptCache: true,
17-
inputPrice: 0,
18-
outputPrice: 0,
19-
description:
20-
"A reasoning model that is blazing fast and excels at agentic coding, accessible for free through Roo Code Cloud for a limited time. (Note: the free prompts and completions are logged by xAI and used to improve the model.)",
21-
},
22-
"roo/code-supernova-1-million": {
23-
maxTokens: 30_000,
24-
contextWindow: 1_000_000,
25-
supportsImages: true,
26-
supportsPromptCache: true,
27-
inputPrice: 0,
28-
outputPrice: 0,
29-
description:
30-
"A versatile agentic coding stealth model with a 1M token context window that supports image inputs, accessible for free through Roo Code Cloud for a limited time. (Note: the free prompts and completions are logged by the model provider and used to improve the model.)",
31-
},
32-
"xai/grok-4-fast": {
33-
maxTokens: 30_000,
34-
contextWindow: 2_000_000,
35-
supportsImages: false,
36-
supportsPromptCache: false,
37-
inputPrice: 0,
38-
outputPrice: 0,
39-
description:
40-
"Grok 4 Fast is xAI's latest multimodal model with SOTA cost-efficiency and a 2M token context window. (Note: prompts and completions are logged by xAI and used to improve the model.)",
41-
deprecated: true,
42-
},
43-
"deepseek/deepseek-chat-v3.1": {
44-
maxTokens: 16_384,
45-
contextWindow: 163_840,
46-
supportsImages: false,
47-
supportsPromptCache: false,
48-
inputPrice: 0,
49-
outputPrice: 0,
50-
description:
51-
"DeepSeek-V3.1 is a large hybrid reasoning model (671B parameters, 37B active). It extends the DeepSeek-V3 base with a two-phase long-context training process, reaching up to 128K tokens, and uses FP8 microscaling for efficient inference.",
52-
},
53-
} as const satisfies Record<string, ModelInfo>
5+
/**
6+
* Roo Code Cloud is a dynamic provider - models are loaded from the /v1/models API endpoint.
7+
* Default model ID used as fallback when no model is specified.
8+
*/
9+
export const rooDefaultModelId = "xai/grok-code-fast-1"
10+
11+
/**
12+
* Empty models object maintained for type compatibility.
13+
* All model data comes dynamically from the API.
14+
*/
15+
export const rooModels = {} as const satisfies Record<string, ModelInfo>
16+
17+
/**
18+
* Roo Code Cloud API response schemas
19+
*/
20+
21+
export const RooPricingSchema = z.object({
22+
input: z.string(),
23+
output: z.string(),
24+
input_cache_read: z.string().optional(),
25+
input_cache_write: z.string().optional(),
26+
})
27+
28+
export const RooModelSchema = z.object({
29+
id: z.string(),
30+
object: z.literal("model"),
31+
created: z.number(),
32+
owned_by: z.string(),
33+
name: z.string(),
34+
description: z.string(),
35+
context_window: z.number(),
36+
max_tokens: z.number(),
37+
type: z.literal("language"),
38+
tags: z.array(z.string()).optional(),
39+
pricing: RooPricingSchema,
40+
deprecated: z.boolean().optional(),
41+
})
42+
43+
export const RooModelsResponseSchema = z.object({
44+
object: z.literal("list"),
45+
data: z.array(RooModelSchema),
46+
})
47+
48+
export type RooModel = z.infer<typeof RooModelSchema>
49+
export type RooModelsResponse = z.infer<typeof RooModelsResponseSchema>

releases/3.29.0-release.png

1.03 MB
Loading

src/__tests__/extension.spec.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,6 +168,7 @@ vi.mock("../activate", () => ({
168168

169169
vi.mock("../i18n", () => ({
170170
initializeI18n: vi.fn(),
171+
t: vi.fn((key) => key),
171172
}))
172173

173174
describe("extension.ts", () => {

src/api/providers/__tests__/chutes.spec.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -460,10 +460,13 @@ describe("ChutesHandler", () => {
460460
const messageGenerator = handlerWithModel.createMessage(systemPrompt, messages)
461461
await messageGenerator.next()
462462

463+
// Centralized 20% cap should apply to OpenAI-compatible providers like Chutes
464+
const expectedMaxTokens = Math.min(modelInfo.maxTokens, Math.ceil(modelInfo.contextWindow * 0.2))
465+
463466
expect(mockCreate).toHaveBeenCalledWith(
464467
expect.objectContaining({
465468
model: modelId,
466-
max_tokens: modelInfo.maxTokens,
469+
max_tokens: expectedMaxTokens,
467470
temperature: 0.5,
468471
messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]),
469472
stream: true,

src/api/providers/__tests__/roo.spec.ts

Lines changed: 22 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// npx vitest run api/providers/__tests__/roo.spec.ts
22

33
import { Anthropic } from "@anthropic-ai/sdk"
4-
import { rooDefaultModelId, rooModels } from "@roo-code/types"
4+
import { rooDefaultModelId } from "@roo-code/types"
55

66
import { ApiHandlerOptions } from "../../../shared/api"
77

@@ -301,16 +301,19 @@ describe("RooHandler", () => {
301301
const modelInfo = handler.getModel()
302302
expect(modelInfo.id).toBe(mockOptions.apiModelId)
303303
expect(modelInfo.info).toBeDefined()
304-
// xai/grok-code-fast-1 is a valid model in rooModels
305-
expect(modelInfo.info).toBe(rooModels["xai/grok-code-fast-1"])
304+
// Models are loaded dynamically, so we just verify the structure
305+
expect(modelInfo.info.maxTokens).toBeDefined()
306+
expect(modelInfo.info.contextWindow).toBeDefined()
306307
})
307308

308309
it("should return default model when no model specified", () => {
309310
const handlerWithoutModel = new RooHandler({})
310311
const modelInfo = handlerWithoutModel.getModel()
311312
expect(modelInfo.id).toBe(rooDefaultModelId)
312313
expect(modelInfo.info).toBeDefined()
313-
expect(modelInfo.info).toBe(rooModels[rooDefaultModelId])
314+
// Models are loaded dynamically
315+
expect(modelInfo.info.maxTokens).toBeDefined()
316+
expect(modelInfo.info.contextWindow).toBeDefined()
314317
})
315318

316319
it("should handle unknown model ID with fallback info", () => {
@@ -320,24 +323,27 @@ describe("RooHandler", () => {
320323
const modelInfo = handlerWithUnknownModel.getModel()
321324
expect(modelInfo.id).toBe("unknown-model-id")
322325
expect(modelInfo.info).toBeDefined()
323-
// Should return fallback info for unknown models
324-
expect(modelInfo.info.maxTokens).toBe(16_384)
325-
expect(modelInfo.info.contextWindow).toBe(262_144)
326-
expect(modelInfo.info.supportsImages).toBe(false)
327-
expect(modelInfo.info.supportsPromptCache).toBe(true)
328-
expect(modelInfo.info.inputPrice).toBe(0)
329-
expect(modelInfo.info.outputPrice).toBe(0)
326+
// Should return fallback info for unknown models (dynamic models will be merged in real usage)
327+
expect(modelInfo.info.maxTokens).toBeDefined()
328+
expect(modelInfo.info.contextWindow).toBeDefined()
329+
expect(modelInfo.info.supportsImages).toBeDefined()
330+
expect(modelInfo.info.supportsPromptCache).toBeDefined()
331+
expect(modelInfo.info.inputPrice).toBeDefined()
332+
expect(modelInfo.info.outputPrice).toBeDefined()
330333
})
331334

332-
it("should return correct model info for all Roo models", () => {
333-
// Test each model in rooModels
334-
const modelIds = Object.keys(rooModels) as Array<keyof typeof rooModels>
335+
it("should handle any model ID since models are loaded dynamically", () => {
336+
// Test with various model IDs - they should all work since models are loaded dynamically
337+
const testModelIds = ["xai/grok-code-fast-1", "roo/sonic", "deepseek/deepseek-chat-v3.1"]
335338

336-
for (const modelId of modelIds) {
339+
for (const modelId of testModelIds) {
337340
const handlerWithModel = new RooHandler({ apiModelId: modelId })
338341
const modelInfo = handlerWithModel.getModel()
339342
expect(modelInfo.id).toBe(modelId)
340-
expect(modelInfo.info).toBe(rooModels[modelId])
343+
expect(modelInfo.info).toBeDefined()
344+
// Verify the structure has required fields
345+
expect(modelInfo.info.maxTokens).toBeDefined()
346+
expect(modelInfo.info.contextWindow).toBeDefined()
341347
}
342348
})
343349
})

src/api/providers/__tests__/zai.spec.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -280,10 +280,13 @@ describe("ZAiHandler", () => {
280280
const messageGenerator = handlerWithModel.createMessage(systemPrompt, messages)
281281
await messageGenerator.next()
282282

283+
// Centralized 20% cap should apply to OpenAI-compatible providers like Z AI
284+
const expectedMaxTokens = Math.min(modelInfo.maxTokens, Math.ceil(modelInfo.contextWindow * 0.2))
285+
283286
expect(mockCreate).toHaveBeenCalledWith(
284287
expect.objectContaining({
285288
model: modelId,
286-
max_tokens: modelInfo.maxTokens,
289+
max_tokens: expectedMaxTokens,
287290
temperature: ZAI_DEFAULT_TEMPERATURE,
288291
messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]),
289292
stream: true,

0 commit comments

Comments
 (0)