Skip to content

Commit aa653db

Browse files
Merge branch 'main' into feature/add-valkey
2 parents 0482834 + cb0a58e commit aa653db

File tree

118 files changed

+3511
-967
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

118 files changed

+3511
-967
lines changed

.dockerignore

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,6 @@ src/node_modules
8080
!webview-ui/
8181
!packages/evals/.docker/entrypoints/runner.sh
8282
!packages/build/
83-
!packages/cloud/
8483
!packages/config-eslint/
8584
!packages/config-typescript/
8685
!packages/evals/

CHANGELOG.md

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,36 @@
11
# Roo Code Changelog
22

3+
## [3.25.8] - 2025-08-06
4+
5+
- Fix: Prevent disabled MCP servers from starting processes and show correct status (#6036 by @hannesrudolph, PR by @app/roomote)
6+
- Fix: Handle current directory path "." correctly in codebase_search tool (#6514 by @hannesrudolph, PR by @app/roomote)
7+
- Fix: Trim whitespace from OpenAI base URL to fix model detection (#6559 by @vauhochzett, PR by @app/roomote)
8+
- Feat: Reduce Gemini 2.5 Pro minimum thinking budget to 128 (thanks @app/roomote!)
9+
- Fix: Improve handling of net::ERR_ABORTED errors in URL fetching (#6632 by @QuinsZouls, PR by @app/roomote)
10+
- Fix: Recover from error state when Qdrant becomes available (#6660 by @hannesrudolph, PR by @app/roomote)
11+
- Fix: Resolve memory leak in ChatView virtual scrolling implementation (thanks @xyOz-dev!)
12+
- Add: Swift files to fallback list (#5857 by @niteshbalusu11, #6555 by @sealad886, PR by @niteshbalusu11)
13+
- Feat: Clamp default model max tokens to 20% of context window (thanks @mrubens!)
14+
15+
## [3.25.7] - 2025-08-05
16+
17+
- Add support for Claude Opus 4.1
18+
- Add Fireworks AI provider (#6653 by @ershang-fireworks, PR by @ershang-fireworks)
19+
- Add Z AI provider (thanks @jues!)
20+
- Add Groq support for GPT-OSS
21+
- Add Cerebras support for GPT-OSS
22+
- Add code indexing support for multiple folders similar to task history (#6197 by @NaccOll, PR by @NaccOll)
23+
- Make mode selection dropdowns responsive (#6423 by @AyazKaan, PR by @AyazKaan)
24+
- Redesigned task header and task history (thanks @brunobergher!)
25+
- Fix checkpoints timing and ensure checkpoints work properly (#4827 by @mrubens, PR by @NaccOll)
26+
- Fix empty mode names from being saved (#5766 by @kfxmvp, PR by @app/roomote)
27+
- Fix MCP server creation when setting is disabled (#6607 by @characharm, PR by @app/roomote)
28+
- Update highlight layer style and align to textarea (#6647 by @NaccOll, PR by @NaccOll)
29+
- Fix UI for approving chained commands
30+
- Use assistantMessageParser class instead of parseAssistantMessage (#5340 by @qdaxb, PR by @qdaxb)
31+
- Conditionally include reminder section based on todo list config (thanks @NaccOll!)
32+
- Task and TaskProvider event emitter cleanup with new events (thanks @cte!)
33+
334
## [3.25.6] - 2025-08-01
435

536
- Set horizon-beta model max tokens to 32k for OpenRouter (requested by @hannesrudolph, PR by @app/roomote)

package.json

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,9 @@
2323
"changeset:version": "cp CHANGELOG.md src/CHANGELOG.md && changeset version && cp -vf src/CHANGELOG.md .",
2424
"knip": "knip --include files",
2525
"update-contributors": "node scripts/update-contributors.js",
26-
"evals": "dotenvx run -f packages/evals/.env.development packages/evals/.env.local -- docker compose -f packages/evals/docker-compose.yml --profile server --profile runner up --build --scale runner=0"
26+
"evals": "dotenvx run -f packages/evals/.env.development packages/evals/.env.local -- docker compose -f packages/evals/docker-compose.yml --profile server --profile runner up --build --scale runner=0",
27+
"link-workspace-packages": "node scripts/link-packages.js",
28+
"unlink-workspace-packages": "node scripts/link-packages.js --unlink"
2729
},
2830
"devDependencies": {
2931
"@changesets/cli": "^2.27.10",

packages/evals/Dockerfile.runner

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,6 @@ WORKDIR /roo/repo
8484
RUN mkdir -p \
8585
scripts \
8686
packages/build \
87-
packages/cloud \
8887
packages/config-eslint \
8988
packages/config-typescript \
9089
packages/evals \
@@ -99,7 +98,6 @@ COPY ./pnpm-lock.yaml ./
9998
COPY ./pnpm-workspace.yaml ./
10099
COPY ./scripts/bootstrap.mjs ./scripts/
101100
COPY ./packages/build/package.json ./packages/build/
102-
COPY ./packages/cloud/package.json ./packages/cloud/
103101
COPY ./packages/config-eslint/package.json ./packages/config-eslint/
104102
COPY ./packages/config-typescript/package.json ./packages/config-typescript/
105103
COPY ./packages/evals/package.json ./packages/evals/

packages/types/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "@roo-code/types",
3-
"private": true,
3+
"version": "0.0.0",
44
"type": "module",
55
"main": "./dist/index.cjs",
66
"exports": {

packages/types/src/providers/anthropic.ts

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,18 @@ export const anthropicModels = {
1818
cacheReadsPrice: 0.3, // $0.30 per million tokens
1919
supportsReasoningBudget: true,
2020
},
21+
"claude-opus-4-1-20250805": {
22+
maxTokens: 8192,
23+
contextWindow: 200_000,
24+
supportsImages: true,
25+
supportsComputerUse: true,
26+
supportsPromptCache: true,
27+
inputPrice: 15.0, // $15 per million input tokens
28+
outputPrice: 75.0, // $75 per million output tokens
29+
cacheWritesPrice: 18.75, // $18.75 per million tokens
30+
cacheReadsPrice: 1.5, // $1.50 per million tokens
31+
supportsReasoningBudget: true,
32+
},
2133
"claude-opus-4-20250514": {
2234
maxTokens: 32_000, // Overridden to 8k if `enableReasoningEffort` is false.
2335
contextWindow: 200_000,

packages/types/src/providers/bedrock.ts

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,21 @@ export const bedrockModels = {
8282
maxCachePoints: 4,
8383
cachableFields: ["system", "messages", "tools"],
8484
},
85+
"anthropic.claude-opus-4-1-20250805-v1:0": {
86+
maxTokens: 8192,
87+
contextWindow: 200_000,
88+
supportsImages: true,
89+
supportsComputerUse: true,
90+
supportsPromptCache: true,
91+
supportsReasoningBudget: true,
92+
inputPrice: 15.0,
93+
outputPrice: 75.0,
94+
cacheWritesPrice: 18.75,
95+
cacheReadsPrice: 1.5,
96+
minTokensPerCachePoint: 1024,
97+
maxCachePoints: 4,
98+
cachableFields: ["system", "messages", "tools"],
99+
},
85100
"anthropic.claude-opus-4-20250514-v1:0": {
86101
maxTokens: 8192,
87102
contextWindow: 200_000,

packages/types/src/providers/cerebras.ts

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,4 +63,14 @@ export const cerebrasModels = {
6363
description: "SOTA performance with ~1500 tokens/s",
6464
supportsReasoningEffort: true,
6565
},
66+
"gpt-oss-120b": {
67+
maxTokens: 8000,
68+
contextWindow: 64000,
69+
supportsImages: false,
70+
supportsPromptCache: false,
71+
inputPrice: 0,
72+
outputPrice: 0,
73+
description:
74+
"OpenAI GPT OSS model with ~2800 tokens/s\n\n• 64K context window\n• Excels at efficient reasoning across science, math, and coding",
75+
},
6676
} as const satisfies Record<string, ModelInfo>

packages/types/src/providers/claude-code.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,14 @@ export const claudeCodeModels = {
4848
supportsReasoningBudget: false,
4949
requiredReasoningBudget: false,
5050
},
51+
"claude-opus-4-1-20250805": {
52+
...anthropicModels["claude-opus-4-1-20250805"],
53+
supportsImages: false,
54+
supportsPromptCache: true, // Claude Code does report cache tokens
55+
supportsReasoningEffort: false,
56+
supportsReasoningBudget: false,
57+
requiredReasoningBudget: false,
58+
},
5159
"claude-opus-4-20250514": {
5260
...anthropicModels["claude-opus-4-20250514"],
5361
supportsImages: false,

packages/types/src/providers/groq.ts

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,8 @@ export type GroqModelId =
1111
| "qwen/qwen3-32b"
1212
| "deepseek-r1-distill-llama-70b"
1313
| "moonshotai/kimi-k2-instruct"
14+
| "openai/gpt-oss-120b"
15+
| "openai/gpt-oss-20b"
1416

1517
export const groqDefaultModelId: GroqModelId = "llama-3.3-70b-versatile" // Defaulting to Llama3 70B Versatile
1618

@@ -97,4 +99,24 @@ export const groqModels = {
9799
outputPrice: 3.0,
98100
description: "Moonshot AI Kimi K2 Instruct 1T model, 128K context.",
99101
},
102+
"openai/gpt-oss-120b": {
103+
maxTokens: 32766,
104+
contextWindow: 131072,
105+
supportsImages: false,
106+
supportsPromptCache: false,
107+
inputPrice: 0.15,
108+
outputPrice: 0.75,
109+
description:
110+
"GPT-OSS 120B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 128 experts.",
111+
},
112+
"openai/gpt-oss-20b": {
113+
maxTokens: 32768,
114+
contextWindow: 131072,
115+
supportsImages: false,
116+
supportsPromptCache: false,
117+
inputPrice: 0.1,
118+
outputPrice: 0.5,
119+
description:
120+
"GPT-OSS 20B is OpenAI's flagship open source model, built on a Mixture-of-Experts (MoE) architecture with 20 billion parameters and 32 experts.",
121+
},
100122
} as const satisfies Record<string, ModelInfo>

0 commit comments

Comments
 (0)