Skip to content

Commit 995f20f

Browse files
authored
Merge branch 'main' into feature/sap-ai-core-provider
2 parents 4b10dce + f02a2bb commit 995f20f

File tree

114 files changed

+3156
-1430
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

114 files changed

+3156
-1430
lines changed

CHANGELOG.md

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,25 @@
11
# Roo Code Changelog
22

3+
## [3.26.0] - 2025-08-26
4+
5+
- Sonic -> Grok Code Fast
6+
- feat: Add Qwen Code CLI API Support with OAuth Authentication (thanks @evinelias and Cline!)
7+
- feat: Add Deepseek v3.1 to Fireworks AI provider (#7374 by @dmarkey, PR by @app/roomote)
8+
- Add a built-in /init slash command (thanks @mrubens and @hannesrudolph!)
9+
- Fix: Make auto approve toggle trigger stay (#3909 by @kyle-apex, PR by @elianiva)
10+
- Fix: Preserve user input when selecting follow-up choices (#7316 by @teihome, PR by @daniel-lxs)
11+
- Fix: Handle Mistral thinking content as reasoning chunks (#6842 by @Biotrioo, PR by @app/roomote)
12+
- Fix: Resolve newTaskRequireTodos setting not working correctly (thanks @hannesrudolph!)
13+
- Fix: Requesty model listing (#7377 by @dtrugman, PR by @dtrugman)
14+
- feat: Hide static providers with no models from provider list (thanks @daniel-lxs!)
15+
- Add todos parameter to new_task tool usage in issue-fixer mode (thanks @hannesrudolph!)
16+
- Handle substitution patterns in command validation (thanks @mrubens!)
17+
- Mark code-workspace files as protected (thanks @mrubens!)
18+
- Update list of default allowed commands (thanks @mrubens!)
19+
- Follow symlinks in rooignore checks (thanks @mrubens!)
20+
- Show cache read and write prices for OpenRouter inference providers (thanks @chrarnoldus!)
21+
- chore(deps): Update dependency drizzle-kit to v0.31.4 (thanks @app/renovate!)
22+
323
## [3.25.23] - 2025-08-22
424

525
- feat: add custom base URL support for Requesty provider (thanks @requesty-JohnCosta27!)

apps/web-evals/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
"@radix-ui/react-slider": "^1.2.4",
2525
"@radix-ui/react-slot": "^1.1.2",
2626
"@radix-ui/react-tabs": "^1.1.3",
27-
"@radix-ui/react-tooltip": "^1.1.8",
27+
"@radix-ui/react-tooltip": "^1.2.8",
2828
"@roo-code/evals": "workspace:^",
2929
"@roo-code/types": "workspace:^",
3030
"@tanstack/react-query": "^5.69.0",

packages/types/npm/package.metadata.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "@roo-code/types",
3-
"version": "1.60.0",
3+
"version": "1.61.0",
44
"description": "TypeScript type definitions for Roo Code.",
55
"publishConfig": {
66
"access": "public",

packages/types/src/__tests__/provider-settings.test.ts

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,26 @@ describe("getApiProtocol", () => {
3939
})
4040
})
4141

42+
describe("Vercel AI Gateway provider", () => {
43+
it("should return 'anthropic' for vercel-ai-gateway provider with anthropic models", () => {
44+
expect(getApiProtocol("vercel-ai-gateway", "anthropic/claude-3-opus")).toBe("anthropic")
45+
expect(getApiProtocol("vercel-ai-gateway", "anthropic/claude-3.5-sonnet")).toBe("anthropic")
46+
expect(getApiProtocol("vercel-ai-gateway", "ANTHROPIC/claude-sonnet-4")).toBe("anthropic")
47+
expect(getApiProtocol("vercel-ai-gateway", "anthropic/claude-opus-4.1")).toBe("anthropic")
48+
})
49+
50+
it("should return 'openai' for vercel-ai-gateway provider with non-anthropic models", () => {
51+
expect(getApiProtocol("vercel-ai-gateway", "openai/gpt-4")).toBe("openai")
52+
expect(getApiProtocol("vercel-ai-gateway", "google/gemini-pro")).toBe("openai")
53+
expect(getApiProtocol("vercel-ai-gateway", "meta/llama-3")).toBe("openai")
54+
expect(getApiProtocol("vercel-ai-gateway", "mistral/mixtral")).toBe("openai")
55+
})
56+
57+
it("should return 'openai' for vercel-ai-gateway provider without model", () => {
58+
expect(getApiProtocol("vercel-ai-gateway")).toBe("openai")
59+
})
60+
})
61+
4262
describe("Other providers", () => {
4363
it("should return 'openai' for non-anthropic providers regardless of model", () => {
4464
expect(getApiProtocol("openrouter", "claude-3-opus")).toBe("openai")

packages/types/src/global-settings.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,7 @@ export const SECRET_STATE_KEYS = [
198198
"fireworksApiKey",
199199
"featherlessApiKey",
200200
"ioIntelligenceApiKey",
201+
"vercelAiGatewayApiKey",
201202
] as const satisfies readonly (keyof ProviderSettings)[]
202203
export type SecretState = Pick<ProviderSettings, (typeof SECRET_STATE_KEYS)[number]>
203204

packages/types/src/provider-settings.ts

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ export const providerNames = [
6868
"io-intelligence",
6969
"roo",
7070
"sapaicore",
71+
"vercel-ai-gateway",
7172
] as const
7273

7374
export const providerNamesSchema = z.enum(providerNames)
@@ -331,6 +332,9 @@ const sapAiCoreSchema = apiModelIdProviderModelSchema.extend({
331332
sapAiCoreBaseUrl: z.string().optional(),
332333
reasoningEffort: z.string().optional(),
333334
thinkingBudgetTokens: z.number().optional(),
335+
const vercelAiGatewaySchema = baseProviderSettingsSchema.extend({
336+
vercelAiGatewayApiKey: z.string().optional(),
337+
vercelAiGatewayModelId: z.string().optional(),
334338
})
335339

336340
const defaultSchema = z.object({
@@ -373,6 +377,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
373377
qwenCodeSchema.merge(z.object({ apiProvider: z.literal("qwen-code") })),
374378
rooSchema.merge(z.object({ apiProvider: z.literal("roo") })),
375379
sapAiCoreSchema.merge(z.object({ apiProvider: z.literal("sapaicore") })),
380+
vercelAiGatewaySchema.merge(z.object({ apiProvider: z.literal("vercel-ai-gateway") })),
376381
defaultSchema,
377382
])
378383

@@ -413,6 +418,7 @@ export const providerSettingsSchema = z.object({
413418
...qwenCodeSchema.shape,
414419
...rooSchema.shape,
415420
...sapAiCoreSchema.shape,
421+
...vercelAiGatewaySchema.shape,
416422
...codebaseIndexProviderSchema.shape,
417423
})
418424

@@ -439,6 +445,7 @@ export const MODEL_ID_KEYS: Partial<keyof ProviderSettings>[] = [
439445
"litellmModelId",
440446
"huggingFaceModelId",
441447
"ioIntelligenceModelId",
448+
"vercelAiGatewayModelId",
442449
]
443450

444451
export const getModelId = (settings: ProviderSettings): string | undefined => {
@@ -458,6 +465,11 @@ export const getApiProtocol = (provider: ProviderName | undefined, modelId?: str
458465
return "anthropic"
459466
}
460467

468+
// Vercel AI Gateway uses anthropic protocol for anthropic models
469+
if (provider && provider === "vercel-ai-gateway" && modelId && modelId.toLowerCase().startsWith("anthropic/")) {
470+
return "anthropic"
471+
}
472+
461473
return "openai"
462474
}
463475

@@ -560,6 +572,7 @@ export const MODELS_BY_PROVIDER: Record<
560572
openrouter: { id: "openrouter", label: "OpenRouter", models: [] },
561573
requesty: { id: "requesty", label: "Requesty", models: [] },
562574
unbound: { id: "unbound", label: "Unbound", models: [] },
575+
"vercel-ai-gateway": { id: "vercel-ai-gateway", label: "Vercel AI Gateway", models: [] },
563576
}
564577

565578
export const dynamicProviders = [
@@ -569,6 +582,7 @@ export const dynamicProviders = [
569582
"openrouter",
570583
"requesty",
571584
"unbound",
585+
"vercel-ai-gateway",
572586
] as const satisfies readonly ProviderName[]
573587

574588
export type DynamicProvider = (typeof dynamicProviders)[number]

packages/types/src/providers/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,4 +28,5 @@ export * from "./unbound.js"
2828
export * from "./vertex.js"
2929
export * from "./vscode-llm.js"
3030
export * from "./xai.js"
31+
export * from "./vercel-ai-gateway.js"
3132
export * from "./zai.js"
Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,19 @@
11
import type { ModelInfo } from "../model.js"
22

33
// Roo provider with single model
4-
export type RooModelId = "roo/sonic"
4+
export type RooModelId = "xai/grok-code-fast-1"
55

6-
export const rooDefaultModelId: RooModelId = "roo/sonic"
6+
export const rooDefaultModelId: RooModelId = "xai/grok-code-fast-1"
77

88
export const rooModels = {
9-
"roo/sonic": {
9+
"xai/grok-code-fast-1": {
1010
maxTokens: 16_384,
1111
contextWindow: 262_144,
1212
supportsImages: false,
1313
supportsPromptCache: true,
1414
inputPrice: 0,
1515
outputPrice: 0,
1616
description:
17-
"A stealth reasoning model that is blazing fast and excels at agentic coding, accessible for free through Roo Code Cloud for a limited time. (Note: prompts and completions are logged by the model creator and used to improve the model.)",
17+
"A reasoning model that is blazing fast and excels at agentic coding, accessible for free through Roo Code Cloud for a limited time. (Note: the free prompts and completions are logged by xAI and used to improve the model.)",
1818
},
1919
} as const satisfies Record<string, ModelInfo>
Lines changed: 102 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,102 @@
1+
import type { ModelInfo } from "../model.js"
2+
3+
// https://ai-gateway.vercel.sh/v1/
4+
export const vercelAiGatewayDefaultModelId = "anthropic/claude-sonnet-4"
5+
6+
export const VERCEL_AI_GATEWAY_PROMPT_CACHING_MODELS = new Set([
7+
"anthropic/claude-3-haiku",
8+
"anthropic/claude-3-opus",
9+
"anthropic/claude-3.5-haiku",
10+
"anthropic/claude-3.5-sonnet",
11+
"anthropic/claude-3.7-sonnet",
12+
"anthropic/claude-opus-4",
13+
"anthropic/claude-opus-4.1",
14+
"anthropic/claude-sonnet-4",
15+
"openai/gpt-4.1",
16+
"openai/gpt-4.1-mini",
17+
"openai/gpt-4.1-nano",
18+
"openai/gpt-4o",
19+
"openai/gpt-4o-mini",
20+
"openai/gpt-5",
21+
"openai/gpt-5-mini",
22+
"openai/gpt-5-nano",
23+
"openai/o1",
24+
"openai/o3",
25+
"openai/o3-mini",
26+
"openai/o4-mini",
27+
])
28+
29+
export const VERCEL_AI_GATEWAY_VISION_ONLY_MODELS = new Set([
30+
"alibaba/qwen-3-14b",
31+
"alibaba/qwen-3-235b",
32+
"alibaba/qwen-3-30b",
33+
"alibaba/qwen-3-32b",
34+
"alibaba/qwen3-coder",
35+
"amazon/nova-pro",
36+
"anthropic/claude-3.5-haiku",
37+
"google/gemini-1.5-flash-8b",
38+
"google/gemini-2.0-flash-thinking",
39+
"google/gemma-3-27b",
40+
"mistral/devstral-small",
41+
"xai/grok-vision-beta",
42+
])
43+
44+
export const VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS = new Set([
45+
"amazon/nova-lite",
46+
"anthropic/claude-3-haiku",
47+
"anthropic/claude-3-opus",
48+
"anthropic/claude-3-sonnet",
49+
"anthropic/claude-3.5-sonnet",
50+
"anthropic/claude-3.7-sonnet",
51+
"anthropic/claude-opus-4",
52+
"anthropic/claude-opus-4.1",
53+
"anthropic/claude-sonnet-4",
54+
"google/gemini-1.5-flash",
55+
"google/gemini-1.5-pro",
56+
"google/gemini-2.0-flash",
57+
"google/gemini-2.0-flash-lite",
58+
"google/gemini-2.0-pro",
59+
"google/gemini-2.5-flash",
60+
"google/gemini-2.5-flash-lite",
61+
"google/gemini-2.5-pro",
62+
"google/gemini-exp",
63+
"meta/llama-3.2-11b",
64+
"meta/llama-3.2-90b",
65+
"meta/llama-3.3",
66+
"meta/llama-4-maverick",
67+
"meta/llama-4-scout",
68+
"mistral/pixtral-12b",
69+
"mistral/pixtral-large",
70+
"moonshotai/kimi-k2",
71+
"openai/gpt-4-turbo",
72+
"openai/gpt-4.1",
73+
"openai/gpt-4.1-mini",
74+
"openai/gpt-4.1-nano",
75+
"openai/gpt-4.5-preview",
76+
"openai/gpt-4o",
77+
"openai/gpt-4o-mini",
78+
"openai/gpt-oss-120b",
79+
"openai/gpt-oss-20b",
80+
"openai/o3",
81+
"openai/o3-pro",
82+
"openai/o4-mini",
83+
"vercel/v0-1.0-md",
84+
"xai/grok-2-vision",
85+
"zai/glm-4.5v",
86+
])
87+
88+
export const vercelAiGatewayDefaultModelInfo: ModelInfo = {
89+
maxTokens: 64000,
90+
contextWindow: 200000,
91+
supportsImages: true,
92+
supportsComputerUse: true,
93+
supportsPromptCache: true,
94+
inputPrice: 3,
95+
outputPrice: 15,
96+
cacheWritesPrice: 3.75,
97+
cacheReadsPrice: 0.3,
98+
description:
99+
"Claude Sonnet 4 significantly improves on Sonnet 3.7's industry-leading capabilities, excelling in coding with a state-of-the-art 72.7% on SWE-bench. The model balances performance and efficiency for internal and external use cases, with enhanced steerability for greater control over implementations. While not matching Opus 4 in most domains, it delivers an optimal mix of capability and practicality.",
100+
}
101+
102+
export const VERCEL_AI_GATEWAY_DEFAULT_TEMPERATURE = 0.7

packages/types/src/providers/xai.ts

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,20 @@ import type { ModelInfo } from "../model.js"
33
// https://docs.x.ai/docs/api-reference
44
export type XAIModelId = keyof typeof xaiModels
55

6-
export const xaiDefaultModelId: XAIModelId = "grok-4"
6+
export const xaiDefaultModelId: XAIModelId = "grok-code-fast-1"
77

88
export const xaiModels = {
9+
"grok-code-fast-1": {
10+
maxTokens: 16_384,
11+
contextWindow: 262_144,
12+
supportsImages: false,
13+
supportsPromptCache: true,
14+
inputPrice: 0.2,
15+
outputPrice: 1.5,
16+
cacheWritesPrice: 0.02,
17+
cacheReadsPrice: 0.02,
18+
description: "xAI's Grok Code Fast model with 256K context window",
19+
},
920
"grok-4": {
1021
maxTokens: 8192,
1122
contextWindow: 256000,

0 commit comments

Comments
 (0)