Skip to content

Commit 17f91b3

Browse files
authored
Merge branch 'RooCodeInc:main' into fix/issue-4446-ssl-validation
2 parents 2916c97 + 0c481a3 commit 17f91b3

File tree

137 files changed

+2625
-854
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

137 files changed

+2625
-854
lines changed

CHANGELOG.md

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,31 @@
11
# Roo Code Changelog
22

3+
## [3.25.23] - 2025-08-22
4+
5+
- feat: add custom base URL support for Requesty provider (thanks @requesty-JohnCosta27!)
6+
- feat: add DeepSeek V3.1 model to Chutes AI provider (#7294 by @dmarkey, PR by @app/roomote)
7+
- Revert "feat: enable loading Roo modes from multiple files in .roo/modes directory" temporarily to fix a bug with mode installation
8+
9+
## [3.25.22] - 2025-08-22
10+
11+
- Add prompt caching support for Kimi K2 on Groq (thanks @daniel-lxs and @benank!)
12+
- Add documentation links for global custom instructions in UI (thanks @app/roomote!)
13+
14+
## [3.25.21] - 2025-08-21
15+
16+
- Ensure subtask results are provided to GPT-5 in OpenAI Responses API
17+
- Promote the experimental AssistantMessageParser to the default parser
18+
- Update DeepSeek models context window to 128k (thanks @JuanPerezReal)
19+
- Enable grounding features for Vertex AI (thanks @anguslees)
20+
- Allow orchestrator to pass TODO lists to subtasks
21+
- Improved MDM handling
22+
- Handle nullish token values in ContextCondenseRow to prevent UI crash (thanks @s97712)
23+
- Improved context window error handling for OpenAI and other providers
24+
- Add "installed" filter to Roo Marketplace (thanks @semidark)
25+
- Improve filesystem access checks (thanks @elianiva)
26+
- Support for loading Roo modes from multiple YAML files in the `.roo/modes/` directory (thanks @farazoman)
27+
- Add Featherless provider (thanks @DarinVerheijke)
28+
329
## [3.25.20] - 2025-08-19
430

531
- Add announcement for Sonic model

apps/web-evals/.env

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
DATABASE_URL=postgres://postgres:password@localhost:5432/evals_development
1+
DATABASE_URL=postgres://postgres:password@localhost:5433/evals_development

apps/web-evals/scripts/check-services.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,13 +5,13 @@ if ! docker info &> /dev/null; then
55
exit 1
66
fi
77

8-
if ! nc -z localhost 5432 2>/dev/null; then
8+
if ! nc -z postgres 5433 2>/dev/null; then
99
echo "❌ PostgreSQL is not running on port 5432"
1010
echo "💡 Start it with: pnpm --filter @roo-code/evals db:up"
1111
exit 1
1212
fi
1313

14-
if ! nc -z localhost 6379 2>/dev/null; then
14+
if ! nc -z redis 6380 2>/dev/null; then
1515
echo "❌ Redis is not running on port 6379"
1616
echo "💡 Start it with: pnpm --filter @roo-code/evals redis:up"
1717
exit 1

apps/web-evals/src/app/runs/new/new-run.tsx

Lines changed: 5 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ import { useForm, FormProvider } from "react-hook-form"
88
import { zodResolver } from "@hookform/resolvers/zod"
99
import fuzzysort from "fuzzysort"
1010
import { toast } from "sonner"
11-
import { X, Rocket, Check, ChevronsUpDown, SlidersHorizontal, Book, CircleCheck } from "lucide-react"
11+
import { X, Rocket, Check, ChevronsUpDown, SlidersHorizontal, CircleCheck } from "lucide-react"
1212

1313
import { globalSettingsSchema, providerSettingsSchema, EVALS_SETTINGS, getModelId } from "@roo-code/types"
1414

@@ -49,11 +49,8 @@ import {
4949
PopoverContent,
5050
PopoverTrigger,
5151
ScrollArea,
52+
ScrollBar,
5253
Slider,
53-
Dialog,
54-
DialogContent,
55-
DialogTitle,
56-
DialogFooter,
5754
} from "@/components/ui"
5855

5956
import { SettingsDiff } from "./settings-diff"
@@ -93,24 +90,20 @@ export function NewRun() {
9390

9491
const [model, suite, settings] = watch(["model", "suite", "settings", "concurrency"])
9592

96-
const [systemPromptDialogOpen, setSystemPromptDialogOpen] = useState(false)
97-
const [systemPrompt, setSystemPrompt] = useState("")
98-
const systemPromptRef = useRef<HTMLTextAreaElement>(null)
99-
10093
const onSubmit = useCallback(
10194
async (values: CreateRun) => {
10295
try {
10396
if (mode === "openrouter") {
10497
values.settings = { ...(values.settings || {}), openRouterModelId: model }
10598
}
10699

107-
const { id } = await createRun({ ...values, systemPrompt })
100+
const { id } = await createRun(values)
108101
router.push(`/runs/${id}`)
109102
} catch (e) {
110103
toast.error(e instanceof Error ? e.message : "An unknown error occurred.")
111104
}
112105
},
113-
[mode, model, router, systemPrompt],
106+
[mode, model, router],
114107
)
115108

116109
const onFilterModels = useCallback(
@@ -269,29 +262,11 @@ export function NewRun() {
269262
</div>
270263
<SettingsDiff defaultSettings={EVALS_SETTINGS} customSettings={settings} />
271264
</>
265+
<ScrollBar orientation="horizontal" />
272266
</ScrollArea>
273267
)}
274268
<FormMessage />
275269
</FormItem>
276-
277-
<Button type="button" variant="secondary" onClick={() => setSystemPromptDialogOpen(true)}>
278-
<Book />
279-
Override System Prompt
280-
</Button>
281-
282-
<Dialog open={systemPromptDialogOpen} onOpenChange={setSystemPromptDialogOpen}>
283-
<DialogContent>
284-
<DialogTitle>Override System Prompt</DialogTitle>
285-
<Textarea
286-
ref={systemPromptRef}
287-
value={systemPrompt}
288-
onChange={(e) => setSystemPrompt(e.target.value)}
289-
/>
290-
<DialogFooter>
291-
<Button onClick={() => setSystemPromptDialogOpen(false)}>Done</Button>
292-
</DialogFooter>
293-
</DialogContent>
294-
</Dialog>
295270
</div>
296271

297272
<FormField

apps/web-evals/src/app/runs/new/settings-diff.tsx

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,13 +52,13 @@ type SettingDiffProps = HTMLAttributes<HTMLDivElement> & {
5252
export function SettingDiff({ name, defaultValue, customValue, ...props }: SettingDiffProps) {
5353
return (
5454
<Fragment {...props}>
55-
<div className="overflow-hidden font-mono" title={name}>
55+
<div className="font-mono" title={name}>
5656
{name}
5757
</div>
58-
<pre className="overflow-hidden inline text-rose-500 line-through" title={defaultValue}>
58+
<pre className="inline text-rose-500 line-through" title={defaultValue}>
5959
{defaultValue}
6060
</pre>
61-
<pre className="overflow-hidden inline text-teal-500" title={customValue}>
61+
<pre className="inline text-teal-500" title={customValue}>
6262
{customValue}
6363
</pre>
6464
</Fragment>

packages/types/src/experiment.ts

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ import type { Keys, Equals, AssertEqual } from "./type-fu.js"
66
* ExperimentId
77
*/
88

9-
export const experimentIds = ["powerSteering", "multiFileApplyDiff", "preventFocusDisruption", "assistantMessageParser"] as const
9+
export const experimentIds = ["powerSteering", "multiFileApplyDiff", "preventFocusDisruption"] as const
1010

1111
export const experimentIdsSchema = z.enum(experimentIds)
1212

@@ -20,7 +20,6 @@ export const experimentsSchema = z.object({
2020
powerSteering: z.boolean().optional(),
2121
multiFileApplyDiff: z.boolean().optional(),
2222
preventFocusDisruption: z.boolean().optional(),
23-
assistantMessageParser: z.boolean().optional(),
2423
})
2524

2625
export type Experiments = z.infer<typeof experimentsSchema>

packages/types/src/provider-settings.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,6 +163,8 @@ const vertexSchema = apiModelIdProviderModelSchema.extend({
163163
vertexJsonCredentials: z.string().optional(),
164164
vertexProjectId: z.string().optional(),
165165
vertexRegion: z.string().optional(),
166+
enableUrlContext: z.boolean().optional(),
167+
enableGrounding: z.boolean().optional(),
166168
})
167169

168170
const openAiSchema = baseProviderSettingsSchema.extend({

packages/types/src/providers/chutes.ts

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ export type ChutesModelId =
55
| "deepseek-ai/DeepSeek-R1-0528"
66
| "deepseek-ai/DeepSeek-R1"
77
| "deepseek-ai/DeepSeek-V3"
8+
| "deepseek-ai/DeepSeek-V3.1"
89
| "unsloth/Llama-3.3-70B-Instruct"
910
| "chutesai/Llama-4-Scout-17B-16E-Instruct"
1011
| "unsloth/Mistral-Nemo-Instruct-2407"
@@ -60,6 +61,15 @@ export const chutesModels = {
6061
outputPrice: 0,
6162
description: "DeepSeek V3 model.",
6263
},
64+
"deepseek-ai/DeepSeek-V3.1": {
65+
maxTokens: 32768,
66+
contextWindow: 163840,
67+
supportsImages: false,
68+
supportsPromptCache: false,
69+
inputPrice: 0,
70+
outputPrice: 0,
71+
description: "DeepSeek V3.1 model.",
72+
},
6373
"unsloth/Llama-3.3-70B-Instruct": {
6474
maxTokens: 32768, // From Groq
6575
contextWindow: 131072, // From Groq

packages/types/src/providers/deepseek.ts

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@ export const deepSeekDefaultModelId: DeepSeekModelId = "deepseek-chat"
77

88
export const deepSeekModels = {
99
"deepseek-chat": {
10-
maxTokens: 8192,
11-
contextWindow: 64_000,
10+
maxTokens: 8192, // 8K max output
11+
contextWindow: 128_000,
1212
supportsImages: false,
1313
supportsPromptCache: true,
1414
inputPrice: 0.27, // $0.27 per million tokens (cache miss)
@@ -18,15 +18,15 @@ export const deepSeekModels = {
1818
description: `DeepSeek-V3 achieves a significant breakthrough in inference speed over previous models. It tops the leaderboard among open-source models and rivals the most advanced closed-source models globally.`,
1919
},
2020
"deepseek-reasoner": {
21-
maxTokens: 8192,
22-
contextWindow: 64_000,
21+
maxTokens: 65536, // 64K max output for reasoning mode
22+
contextWindow: 128_000,
2323
supportsImages: false,
2424
supportsPromptCache: true,
2525
inputPrice: 0.55, // $0.55 per million tokens (cache miss)
2626
outputPrice: 2.19, // $2.19 per million tokens
2727
cacheWritesPrice: 0.55, // $0.55 per million tokens (cache miss)
2828
cacheReadsPrice: 0.14, // $0.14 per million tokens (cache hit)
29-
description: `DeepSeek-R1 achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to 32K tokens.`,
29+
description: `DeepSeek-R1 achieves performance comparable to OpenAI-o1 across math, code, and reasoning tasks. Supports Chain of Thought reasoning with up to 64K output tokens.`,
3030
},
3131
} as const satisfies Record<string, ModelInfo>
3232

packages/types/src/providers/fireworks.ts

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ export type FireworksModelId =
66
| "accounts/fireworks/models/qwen3-coder-480b-a35b-instruct"
77
| "accounts/fireworks/models/deepseek-r1-0528"
88
| "accounts/fireworks/models/deepseek-v3"
9+
| "accounts/fireworks/models/deepseek-v3p1"
910
| "accounts/fireworks/models/glm-4p5"
1011
| "accounts/fireworks/models/glm-4p5-air"
1112
| "accounts/fireworks/models/gpt-oss-20b"
@@ -62,6 +63,16 @@ export const fireworksModels = {
6263
description:
6364
"A strong Mixture-of-Experts (MoE) language model with 671B total parameters with 37B activated for each token from Deepseek. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us.",
6465
},
66+
"accounts/fireworks/models/deepseek-v3p1": {
67+
maxTokens: 16384,
68+
contextWindow: 163840,
69+
supportsImages: false,
70+
supportsPromptCache: false,
71+
inputPrice: 0.56,
72+
outputPrice: 1.68,
73+
description:
74+
"DeepSeek v3.1 is an improved version of the v3 model with enhanced performance, better reasoning capabilities, and improved code generation. This Mixture-of-Experts (MoE) model maintains the same 671B total parameters with 37B activated per token.",
75+
},
6576
"accounts/fireworks/models/glm-4p5": {
6677
maxTokens: 16384,
6778
contextWindow: 128000,

0 commit comments

Comments
 (0)