Skip to content

Commit dfef5d5

Browse files
committed
Merge branch 'main' into deselect-unwanted-modes
2 parents 957ecc5 + 241df17 commit dfef5d5

File tree

99 files changed

+2227
-197
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

99 files changed

+2227
-197
lines changed

.env.sample

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,3 +3,4 @@ POSTHOG_API_KEY=key-goes-here
33
# Roo Code Cloud / Local Development
44
CLERK_BASE_URL=https://epic-chamois-85.clerk.accounts.dev
55
ROO_CODE_API_URL=http://localhost:3000
6+
ROO_CODE_PROVIDER_URL=http://localhost:8080/proxy/v1

.github/ISSUE_TEMPLATE/bug_report.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ body:
2525
- AWS Bedrock
2626
- Chutes AI
2727
- DeepSeek
28+
- Featherless AI
2829
- Fireworks AI
2930
- Glama
3031
- Google Gemini

CHANGELOG.md

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,23 @@
11
# Roo Code Changelog
22

3+
## [3.25.20] - 2025-08-19
4+
5+
- Add announcement for Sonic model
6+
7+
## [3.25.19] - 2025-08-19
8+
9+
- Fix issue where new users couldn't select the Roo Code Cloud provider (thanks @daniel-lxs!)
10+
11+
## [3.25.18] - 2025-08-19
12+
13+
- Add new stealth Sonic model through the Roo Code Cloud provider
14+
- Fix: respect enableReasoningEffort setting when determining reasoning usage (#7048 by @ikbencasdoei, PR by @app/roomote)
15+
- Fix: prevent duplicate LM Studio models with case-insensitive deduplication (#6954 by @fbuechler, PR by @daniel-lxs)
16+
- Feat: simplify ask_followup_question prompt documentation (thanks @daniel-lxs!)
17+
- Feat: simple read_file tool for single-file-only models (thanks @daniel-lxs!)
18+
- Fix: Add missing zaiApiKey and doubaoApiKey to SECRET_STATE_KEYS (#7082 by @app/roomote)
19+
- Feat: Add new models and update configurations for vscode-lm (thanks @NaccOll!)
20+
321
## [3.25.17] - 2025-08-17
422

523
- Fix: Resolve terminal reuse logic issues

packages/types/npm/package.metadata.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
{
22
"name": "@roo-code/types",
3-
"version": "1.53.0",
3+
"version": "1.59.0",
44
"description": "TypeScript type definitions for Roo Code.",
55
"publishConfig": {
66
"access": "public",

packages/types/src/global-settings.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -178,6 +178,7 @@ export const SECRET_STATE_KEYS = [
178178
"openAiNativeApiKey",
179179
"cerebrasApiKey",
180180
"deepSeekApiKey",
181+
"doubaoApiKey",
181182
"moonshotApiKey",
182183
"mistralApiKey",
183184
"unboundApiKey",
@@ -193,7 +194,9 @@ export const SECRET_STATE_KEYS = [
193194
"codebaseIndexMistralApiKey",
194195
"huggingFaceApiKey",
195196
"sambaNovaApiKey",
197+
"zaiApiKey",
196198
"fireworksApiKey",
199+
"featherlessApiKey",
197200
"ioIntelligenceApiKey",
198201
] as const satisfies readonly (keyof ProviderSettings)[]
199202
export type SecretState = Pick<ProviderSettings, (typeof SECRET_STATE_KEYS)[number]>

packages/types/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ export * from "./message.js"
1212
export * from "./mode.js"
1313
export * from "./model.js"
1414
export * from "./provider-settings.js"
15+
export * from "./single-file-read-models.js"
1516
export * from "./task.js"
1617
export * from "./todo.js"
1718
export * from "./telemetry.js"

packages/types/src/model.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,14 @@ export const reasoningEffortsSchema = z.enum(reasoningEfforts)
1010

1111
export type ReasoningEffort = z.infer<typeof reasoningEffortsSchema>
1212

13+
/**
14+
* ReasoningEffortWithMinimal
15+
*/
16+
17+
export const reasoningEffortWithMinimalSchema = z.union([reasoningEffortsSchema, z.literal("minimal")])
18+
19+
export type ReasoningEffortWithMinimal = z.infer<typeof reasoningEffortWithMinimalSchema>
20+
1321
/**
1422
* Verbosity
1523
*/

packages/types/src/provider-settings.ts

Lines changed: 149 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,30 @@
11
import { z } from "zod"
22

3-
import { reasoningEffortsSchema, verbosityLevelsSchema, modelInfoSchema } from "./model.js"
3+
import { modelInfoSchema, reasoningEffortWithMinimalSchema, verbosityLevelsSchema } from "./model.js"
44
import { codebaseIndexProviderSchema } from "./codebase-index.js"
5-
6-
// Bedrock Claude Sonnet 4 model ID that supports 1M context
7-
export const BEDROCK_CLAUDE_SONNET_4_MODEL_ID = "anthropic.claude-sonnet-4-20250514-v1:0"
8-
9-
// Extended schema that includes "minimal" for GPT-5 models
10-
export const extendedReasoningEffortsSchema = z.union([reasoningEffortsSchema, z.literal("minimal")])
11-
12-
export type ReasoningEffortWithMinimal = z.infer<typeof extendedReasoningEffortsSchema>
5+
import {
6+
anthropicModels,
7+
bedrockModels,
8+
cerebrasModels,
9+
chutesModels,
10+
claudeCodeModels,
11+
deepSeekModels,
12+
doubaoModels,
13+
featherlessModels,
14+
fireworksModels,
15+
geminiModels,
16+
groqModels,
17+
ioIntelligenceModels,
18+
mistralModels,
19+
moonshotModels,
20+
openAiNativeModels,
21+
rooModels,
22+
sambaNovaModels,
23+
vertexModels,
24+
vscodeLlmModels,
25+
xaiModels,
26+
internationalZAiModels,
27+
} from "./providers/index.js"
1328

1429
/**
1530
* ProviderName
@@ -46,7 +61,9 @@ export const providerNames = [
4661
"sambanova",
4762
"zai",
4863
"fireworks",
64+
"featherless",
4965
"io-intelligence",
66+
"roo",
5067
] as const
5168

5269
export const providerNamesSchema = z.enum(providerNames)
@@ -85,7 +102,7 @@ const baseProviderSettingsSchema = z.object({
85102

86103
// Model reasoning.
87104
enableReasoningEffort: z.boolean().optional(),
88-
reasoningEffort: extendedReasoningEffortsSchema.optional(),
105+
reasoningEffort: reasoningEffortWithMinimalSchema.optional(),
89106
modelMaxTokens: z.number().optional(),
90107
modelMaxThinkingTokens: z.number().optional(),
91108

@@ -283,11 +300,19 @@ const fireworksSchema = apiModelIdProviderModelSchema.extend({
283300
fireworksApiKey: z.string().optional(),
284301
})
285302

303+
const featherlessSchema = apiModelIdProviderModelSchema.extend({
304+
featherlessApiKey: z.string().optional(),
305+
})
306+
286307
const ioIntelligenceSchema = apiModelIdProviderModelSchema.extend({
287308
ioIntelligenceModelId: z.string().optional(),
288309
ioIntelligenceApiKey: z.string().optional(),
289310
})
290311

312+
const rooSchema = apiModelIdProviderModelSchema.extend({
313+
// No additional fields needed - uses cloud authentication
314+
})
315+
291316
const defaultSchema = z.object({
292317
apiProvider: z.undefined(),
293318
})
@@ -323,7 +348,9 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
323348
sambaNovaSchema.merge(z.object({ apiProvider: z.literal("sambanova") })),
324349
zaiSchema.merge(z.object({ apiProvider: z.literal("zai") })),
325350
fireworksSchema.merge(z.object({ apiProvider: z.literal("fireworks") })),
351+
featherlessSchema.merge(z.object({ apiProvider: z.literal("featherless") })),
326352
ioIntelligenceSchema.merge(z.object({ apiProvider: z.literal("io-intelligence") })),
353+
rooSchema.merge(z.object({ apiProvider: z.literal("roo") })),
327354
defaultSchema,
328355
])
329356

@@ -359,7 +386,9 @@ export const providerSettingsSchema = z.object({
359386
...sambaNovaSchema.shape,
360387
...zaiSchema.shape,
361388
...fireworksSchema.shape,
389+
...featherlessSchema.shape,
362390
...ioIntelligenceSchema.shape,
391+
...rooSchema.shape,
363392
...codebaseIndexProviderSchema.shape,
364393
})
365394

@@ -393,21 +422,126 @@ export const getModelId = (settings: ProviderSettings): string | undefined => {
393422
return modelIdKey ? (settings[modelIdKey] as string) : undefined
394423
}
395424

396-
// Providers that use Anthropic-style API protocol
425+
// Providers that use Anthropic-style API protocol.
397426
export const ANTHROPIC_STYLE_PROVIDERS: ProviderName[] = ["anthropic", "claude-code", "bedrock"]
398427

399-
// Helper function to determine API protocol for a provider and model
400428
export const getApiProtocol = (provider: ProviderName | undefined, modelId?: string): "anthropic" | "openai" => {
401-
// First check if the provider is an Anthropic-style provider
402429
if (provider && ANTHROPIC_STYLE_PROVIDERS.includes(provider)) {
403430
return "anthropic"
404431
}
405432

406-
// For vertex provider, check if the model ID contains "claude" (case-insensitive)
407433
if (provider && provider === "vertex" && modelId && modelId.toLowerCase().includes("claude")) {
408434
return "anthropic"
409435
}
410436

411-
// Default to OpenAI protocol
412437
return "openai"
413438
}
439+
440+
export const MODELS_BY_PROVIDER: Record<
441+
Exclude<ProviderName, "fake-ai" | "human-relay" | "gemini-cli" | "lmstudio" | "openai" | "ollama">,
442+
{ id: ProviderName; label: string; models: string[] }
443+
> = {
444+
anthropic: {
445+
id: "anthropic",
446+
label: "Anthropic",
447+
models: Object.keys(anthropicModels),
448+
},
449+
bedrock: {
450+
id: "bedrock",
451+
label: "Amazon Bedrock",
452+
models: Object.keys(bedrockModels),
453+
},
454+
cerebras: {
455+
id: "cerebras",
456+
label: "Cerebras",
457+
models: Object.keys(cerebrasModels),
458+
},
459+
chutes: {
460+
id: "chutes",
461+
label: "Chutes AI",
462+
models: Object.keys(chutesModels),
463+
},
464+
"claude-code": { id: "claude-code", label: "Claude Code", models: Object.keys(claudeCodeModels) },
465+
deepseek: {
466+
id: "deepseek",
467+
label: "DeepSeek",
468+
models: Object.keys(deepSeekModels),
469+
},
470+
doubao: { id: "doubao", label: "Doubao", models: Object.keys(doubaoModels) },
471+
featherless: {
472+
id: "featherless",
473+
label: "Featherless",
474+
models: Object.keys(featherlessModels),
475+
},
476+
fireworks: {
477+
id: "fireworks",
478+
label: "Fireworks",
479+
models: Object.keys(fireworksModels),
480+
},
481+
gemini: {
482+
id: "gemini",
483+
label: "Google Gemini",
484+
models: Object.keys(geminiModels),
485+
},
486+
groq: { id: "groq", label: "Groq", models: Object.keys(groqModels) },
487+
"io-intelligence": {
488+
id: "io-intelligence",
489+
label: "IO Intelligence",
490+
models: Object.keys(ioIntelligenceModels),
491+
},
492+
mistral: {
493+
id: "mistral",
494+
label: "Mistral",
495+
models: Object.keys(mistralModels),
496+
},
497+
moonshot: {
498+
id: "moonshot",
499+
label: "Moonshot",
500+
models: Object.keys(moonshotModels),
501+
},
502+
"openai-native": {
503+
id: "openai-native",
504+
label: "OpenAI",
505+
models: Object.keys(openAiNativeModels),
506+
},
507+
roo: { id: "roo", label: "Roo", models: Object.keys(rooModels) },
508+
sambanova: {
509+
id: "sambanova",
510+
label: "SambaNova",
511+
models: Object.keys(sambaNovaModels),
512+
},
513+
vertex: {
514+
id: "vertex",
515+
label: "GCP Vertex AI",
516+
models: Object.keys(vertexModels),
517+
},
518+
"vscode-lm": {
519+
id: "vscode-lm",
520+
label: "VS Code LM API",
521+
models: Object.keys(vscodeLlmModels),
522+
},
523+
xai: { id: "xai", label: "xAI (Grok)", models: Object.keys(xaiModels) },
524+
zai: { id: "zai", label: "Zai", models: Object.keys(internationalZAiModels) },
525+
526+
// Dynamic providers; models pulled from the respective APIs.
527+
glama: { id: "glama", label: "Glama", models: [] },
528+
huggingface: { id: "huggingface", label: "Hugging Face", models: [] },
529+
litellm: { id: "litellm", label: "LiteLLM", models: [] },
530+
openrouter: { id: "openrouter", label: "OpenRouter", models: [] },
531+
requesty: { id: "requesty", label: "Requesty", models: [] },
532+
unbound: { id: "unbound", label: "Unbound", models: [] },
533+
}
534+
535+
export const dynamicProviders = [
536+
"glama",
537+
"huggingface",
538+
"litellm",
539+
"openrouter",
540+
"requesty",
541+
"unbound",
542+
] as const satisfies readonly ProviderName[]
543+
544+
export type DynamicProvider = (typeof dynamicProviders)[number]
545+
546+
export const isDynamicProvider = (key: string): key is DynamicProvider =>
547+
dynamicProviders.includes(key as DynamicProvider)

packages/types/src/providers/bedrock.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -441,3 +441,5 @@ export const BEDROCK_REGIONS = [
441441
{ value: "us-gov-east-1", label: "us-gov-east-1" },
442442
{ value: "us-gov-west-1", label: "us-gov-west-1" },
443443
].sort((a, b) => a.value.localeCompare(b.value))
444+
445+
export const BEDROCK_CLAUDE_SONNET_4_MODEL_ID = "anthropic.claude-sonnet-4-20250514-v1:0"
Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
import type { ModelInfo } from "../model.js"
2+
3+
export type FeatherlessModelId =
4+
| "deepseek-ai/DeepSeek-V3-0324"
5+
| "deepseek-ai/DeepSeek-R1-0528"
6+
| "moonshotai/Kimi-K2-Instruct"
7+
| "openai/gpt-oss-120b"
8+
| "Qwen/Qwen3-Coder-480B-A35B-Instruct"
9+
10+
export const featherlessModels = {
11+
"deepseek-ai/DeepSeek-V3-0324": {
12+
maxTokens: 4096,
13+
contextWindow: 32678,
14+
supportsImages: false,
15+
supportsPromptCache: false,
16+
inputPrice: 0,
17+
outputPrice: 0,
18+
description: "DeepSeek V3 0324 model.",
19+
},
20+
"deepseek-ai/DeepSeek-R1-0528": {
21+
maxTokens: 4096,
22+
contextWindow: 32678,
23+
supportsImages: false,
24+
supportsPromptCache: false,
25+
inputPrice: 0,
26+
outputPrice: 0,
27+
description: "DeepSeek R1 0528 model.",
28+
},
29+
"moonshotai/Kimi-K2-Instruct": {
30+
maxTokens: 4096,
31+
contextWindow: 32678,
32+
supportsImages: false,
33+
supportsPromptCache: false,
34+
inputPrice: 0,
35+
outputPrice: 0,
36+
description: "Kimi K2 Instruct model.",
37+
},
38+
"openai/gpt-oss-120b": {
39+
maxTokens: 4096,
40+
contextWindow: 32678,
41+
supportsImages: false,
42+
supportsPromptCache: false,
43+
inputPrice: 0,
44+
outputPrice: 0,
45+
description: "GPT-OSS 120B model.",
46+
},
47+
"Qwen/Qwen3-Coder-480B-A35B-Instruct": {
48+
maxTokens: 4096,
49+
contextWindow: 32678,
50+
supportsImages: false,
51+
supportsPromptCache: false,
52+
inputPrice: 0,
53+
outputPrice: 0,
54+
description: "Qwen3 Coder 480B A35B Instruct model.",
55+
},
56+
} as const satisfies Record<string, ModelInfo>
57+
58+
export const featherlessDefaultModelId: FeatherlessModelId = "deepseek-ai/DeepSeek-R1-0528"

0 commit comments

Comments
 (0)