Skip to content

Commit 0231f6f

Browse files
committed
Merge branch 'main' into add_api_key_env_vars
Signed-off-by: Geoff Wilson <[email protected]>
2 parents 0879bcd + 7b0f489 commit 0231f6f

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

70 files changed

+1119
-316
lines changed

CHANGELOG.md

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,22 @@
11
# Roo Code Changelog
22

3+
## [3.25.12] - 2025-08-12
4+
5+
- Update: Claude Sonnet 4 context window configurable to 1 million tokens in Anthropic provider (thanks @daniel-lxs!)
6+
- Add: Minimal reasoning support to OpenRouter (thanks @daniel-lxs!)
7+
- Fix: Add configurable API request timeout for local providers (#6521 by @dabockster, PR by @app/roomote)
8+
- Fix: Add --no-sandbox flag to browser launch options (#6632 by @QuinsZouls, PR by @QuinsZouls)
9+
- Fix: Ensure JSON files respect .rooignore during indexing (#6690 by @evermoving, PR by @app/roomote)
10+
- Add: New Chutes provider models (#6698 by @fstandhartinger, PR by @app/roomote)
11+
- Add: OpenAI gpt-oss models to Amazon Bedrock dropdown (#6752 by @josh-clanton-powerschool, PR by @app/roomote)
12+
- Fix: Correct tool repetition detector to not block first tool call when limit is 1 (#6834 by @NaccOll, PR by @app/roomote)
13+
- Fix: Improve checkpoint service initialization handling (thanks @NaccOll!)
14+
- Update: Improve zh-TW Traditional Chinese locale (thanks @PeterDaveHello!)
15+
- Add: Task expand and collapse translations (thanks @app/roomote!)
16+
- Update: Exclude GPT-5 models from 20% context window output token cap (thanks @app/roomote!)
17+
- Fix: Truncate long model names in model selector to prevent overflow (thanks @app/roomote!)
18+
- Add: Requesty base url support (thanks @requesty-JohnCosta27!)
19+
320
## [3.25.11] - 2025-08-11
421

522
- Add: Native OpenAI provider support for Codex Mini model (#5386 by @KJ7LNW, PR by @daniel-lxs)

packages/types/src/provider-settings.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,7 @@ const anthropicSchema = apiModelIdProviderModelSchema.extend({
100100
anthropicConfigUseEnvVars: z.boolean().optional(),
101101
anthropicBaseUrl: z.string().optional(),
102102
anthropicUseAuthToken: z.boolean().optional(),
103+
anthropicBeta1MContext: z.boolean().optional(), // Enable 'context-1m-2025-08-07' beta for 1M context window
103104
})
104105

105106
const claudeCodeSchema = apiModelIdProviderModelSchema.extend({

packages/types/src/providers/anthropic.ts

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,15 +8,25 @@ export const anthropicDefaultModelId: AnthropicModelId = "claude-sonnet-4-202505
88
export const anthropicModels = {
99
"claude-sonnet-4-20250514": {
1010
maxTokens: 64_000, // Overridden to 8k if `enableReasoningEffort` is false.
11-
contextWindow: 200_000,
11+
contextWindow: 200_000, // Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07'
1212
supportsImages: true,
1313
supportsComputerUse: true,
1414
supportsPromptCache: true,
15-
inputPrice: 3.0, // $3 per million input tokens
16-
outputPrice: 15.0, // $15 per million output tokens
15+
inputPrice: 3.0, // $3 per million input tokens (≤200K context)
16+
outputPrice: 15.0, // $15 per million output tokens (≤200K context)
1717
cacheWritesPrice: 3.75, // $3.75 per million tokens
1818
cacheReadsPrice: 0.3, // $0.30 per million tokens
1919
supportsReasoningBudget: true,
20+
// Tiered pricing for extended context (requires beta flag 'context-1m-2025-08-07')
21+
tiers: [
22+
{
23+
contextWindow: 1_000_000, // 1M tokens with beta flag
24+
inputPrice: 6.0, // $6 per million input tokens (>200K context)
25+
outputPrice: 22.5, // $22.50 per million output tokens (>200K context)
26+
cacheWritesPrice: 7.5, // $7.50 per million tokens (>200K context)
27+
cacheReadsPrice: 0.6, // $0.60 per million tokens (>200K context)
28+
},
29+
],
2030
},
2131
"claude-opus-4-1-20250805": {
2232
maxTokens: 8192,

packages/types/src/providers/bedrock.ts

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -221,6 +221,26 @@ export const bedrockModels = {
221221
inputPrice: 1.35,
222222
outputPrice: 5.4,
223223
},
224+
"openai.gpt-oss-20b-1:0": {
225+
maxTokens: 8192,
226+
contextWindow: 128_000,
227+
supportsImages: false,
228+
supportsComputerUse: false,
229+
supportsPromptCache: false,
230+
inputPrice: 0.5,
231+
outputPrice: 1.5,
232+
description: "GPT-OSS 20B - Optimized for low latency and local/specialized use cases",
233+
},
234+
"openai.gpt-oss-120b-1:0": {
235+
maxTokens: 8192,
236+
contextWindow: 128_000,
237+
supportsImages: false,
238+
supportsComputerUse: false,
239+
supportsPromptCache: false,
240+
inputPrice: 2.0,
241+
outputPrice: 6.0,
242+
description: "GPT-OSS 120B - Production-ready, general-purpose, high-reasoning model",
243+
},
224244
"meta.llama3-3-70b-instruct-v1:0": {
225245
maxTokens: 8192,
226246
contextWindow: 128_000,
Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
// npx vitest run api/providers/__tests__/lm-studio-timeout.spec.ts
2+
3+
import { LmStudioHandler } from "../lm-studio"
4+
import { ApiHandlerOptions } from "../../../shared/api"
5+
6+
// Mock the timeout config utility
7+
vitest.mock("../utils/timeout-config", () => ({
8+
getApiRequestTimeout: vitest.fn(),
9+
}))
10+
11+
import { getApiRequestTimeout } from "../utils/timeout-config"
12+
13+
// Mock OpenAI
14+
const mockOpenAIConstructor = vitest.fn()
15+
vitest.mock("openai", () => {
16+
return {
17+
__esModule: true,
18+
default: vitest.fn().mockImplementation((config) => {
19+
mockOpenAIConstructor(config)
20+
return {
21+
chat: {
22+
completions: {
23+
create: vitest.fn(),
24+
},
25+
},
26+
}
27+
}),
28+
}
29+
})
30+
31+
describe("LmStudioHandler timeout configuration", () => {
32+
beforeEach(() => {
33+
vitest.clearAllMocks()
34+
})
35+
36+
it("should use default timeout of 600 seconds when no configuration is set", () => {
37+
;(getApiRequestTimeout as any).mockReturnValue(600000)
38+
39+
const options: ApiHandlerOptions = {
40+
apiModelId: "llama2",
41+
lmStudioModelId: "llama2",
42+
lmStudioBaseUrl: "http://localhost:1234",
43+
}
44+
45+
new LmStudioHandler(options)
46+
47+
expect(getApiRequestTimeout).toHaveBeenCalled()
48+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
49+
expect.objectContaining({
50+
baseURL: "http://localhost:1234/v1",
51+
apiKey: "noop",
52+
timeout: 600000, // 600 seconds in milliseconds
53+
}),
54+
)
55+
})
56+
57+
it("should use custom timeout when configuration is set", () => {
58+
;(getApiRequestTimeout as any).mockReturnValue(1200000) // 20 minutes
59+
60+
const options: ApiHandlerOptions = {
61+
apiModelId: "llama2",
62+
lmStudioModelId: "llama2",
63+
lmStudioBaseUrl: "http://localhost:1234",
64+
}
65+
66+
new LmStudioHandler(options)
67+
68+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
69+
expect.objectContaining({
70+
timeout: 1200000, // 1200 seconds in milliseconds
71+
}),
72+
)
73+
})
74+
75+
it("should handle zero timeout (no timeout)", () => {
76+
;(getApiRequestTimeout as any).mockReturnValue(0)
77+
78+
const options: ApiHandlerOptions = {
79+
apiModelId: "llama2",
80+
lmStudioModelId: "llama2",
81+
}
82+
83+
new LmStudioHandler(options)
84+
85+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
86+
expect.objectContaining({
87+
timeout: 0, // No timeout
88+
}),
89+
)
90+
})
91+
})
Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
// npx vitest run api/providers/__tests__/ollama-timeout.spec.ts
2+
3+
import { OllamaHandler } from "../ollama"
4+
import { ApiHandlerOptions } from "../../../shared/api"
5+
6+
// Mock the timeout config utility
7+
vitest.mock("../utils/timeout-config", () => ({
8+
getApiRequestTimeout: vitest.fn(),
9+
}))
10+
11+
import { getApiRequestTimeout } from "../utils/timeout-config"
12+
13+
// Mock OpenAI
14+
const mockOpenAIConstructor = vitest.fn()
15+
vitest.mock("openai", () => {
16+
return {
17+
__esModule: true,
18+
default: vitest.fn().mockImplementation((config) => {
19+
mockOpenAIConstructor(config)
20+
return {
21+
chat: {
22+
completions: {
23+
create: vitest.fn(),
24+
},
25+
},
26+
}
27+
}),
28+
}
29+
})
30+
31+
describe("OllamaHandler timeout configuration", () => {
32+
beforeEach(() => {
33+
vitest.clearAllMocks()
34+
})
35+
36+
it("should use default timeout of 600 seconds when no configuration is set", () => {
37+
;(getApiRequestTimeout as any).mockReturnValue(600000)
38+
39+
const options: ApiHandlerOptions = {
40+
apiModelId: "llama2",
41+
ollamaModelId: "llama2",
42+
ollamaBaseUrl: "http://localhost:11434",
43+
}
44+
45+
new OllamaHandler(options)
46+
47+
expect(getApiRequestTimeout).toHaveBeenCalled()
48+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
49+
expect.objectContaining({
50+
baseURL: "http://localhost:11434/v1",
51+
apiKey: "ollama",
52+
timeout: 600000, // 600 seconds in milliseconds
53+
}),
54+
)
55+
})
56+
57+
it("should use custom timeout when configuration is set", () => {
58+
;(getApiRequestTimeout as any).mockReturnValue(3600000) // 1 hour
59+
60+
const options: ApiHandlerOptions = {
61+
apiModelId: "llama2",
62+
ollamaModelId: "llama2",
63+
}
64+
65+
new OllamaHandler(options)
66+
67+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
68+
expect.objectContaining({
69+
timeout: 3600000, // 3600 seconds in milliseconds
70+
}),
71+
)
72+
})
73+
74+
it("should handle zero timeout (no timeout)", () => {
75+
;(getApiRequestTimeout as any).mockReturnValue(0)
76+
77+
const options: ApiHandlerOptions = {
78+
apiModelId: "llama2",
79+
ollamaModelId: "llama2",
80+
ollamaBaseUrl: "http://localhost:11434",
81+
}
82+
83+
new OllamaHandler(options)
84+
85+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
86+
expect.objectContaining({
87+
timeout: 0, // No timeout
88+
}),
89+
)
90+
})
91+
92+
it("should use default base URL when not provided", () => {
93+
;(getApiRequestTimeout as any).mockReturnValue(600000)
94+
95+
const options: ApiHandlerOptions = {
96+
apiModelId: "llama2",
97+
ollamaModelId: "llama2",
98+
}
99+
100+
new OllamaHandler(options)
101+
102+
expect(mockOpenAIConstructor).toHaveBeenCalledWith(
103+
expect.objectContaining({
104+
baseURL: "http://localhost:11434/v1",
105+
}),
106+
)
107+
})
108+
})

0 commit comments

Comments
 (0)