Skip to content

Commit 1076c2a

Browse files
committed
Merge branch 'main' into add_api_key_env_vars
Signed-off-by: Geoff Wilson <[email protected]>
2 parents 91206b1 + 0c481a3 commit 1076c2a

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+1108
-741
lines changed

CHANGELOG.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,11 @@
11
# Roo Code Changelog
22

3+
## [3.25.23] - 2025-08-22
4+
5+
- feat: add custom base URL support for Requesty provider (thanks @requesty-JohnCosta27!)
6+
- feat: add DeepSeek V3.1 model to Chutes AI provider (#7294 by @dmarkey, PR by @app/roomote)
7+
- Revert "feat: enable loading Roo modes from multiple files in .roo/modes directory" temporarily to fix a bug with mode installation
8+
39
## [3.25.22] - 2025-08-22
410

511
- Add prompt caching support for Kimi K2 on Groq (thanks @daniel-lxs and @benank!)

packages/types/src/providers/chutes.ts

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ export type ChutesModelId =
55
| "deepseek-ai/DeepSeek-R1-0528"
66
| "deepseek-ai/DeepSeek-R1"
77
| "deepseek-ai/DeepSeek-V3"
8+
| "deepseek-ai/DeepSeek-V3.1"
89
| "unsloth/Llama-3.3-70B-Instruct"
910
| "chutesai/Llama-4-Scout-17B-16E-Instruct"
1011
| "unsloth/Mistral-Nemo-Instruct-2407"
@@ -60,6 +61,15 @@ export const chutesModels = {
6061
outputPrice: 0,
6162
description: "DeepSeek V3 model.",
6263
},
64+
"deepseek-ai/DeepSeek-V3.1": {
65+
maxTokens: 32768,
66+
contextWindow: 163840,
67+
supportsImages: false,
68+
supportsPromptCache: false,
69+
inputPrice: 0,
70+
outputPrice: 0,
71+
description: "DeepSeek V3.1 model.",
72+
},
6373
"unsloth/Llama-3.3-70B-Instruct": {
6474
maxTokens: 32768, // From Groq
6575
contextWindow: 131072, // From Groq

packages/types/src/providers/fireworks.ts

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ export type FireworksModelId =
66
| "accounts/fireworks/models/qwen3-coder-480b-a35b-instruct"
77
| "accounts/fireworks/models/deepseek-r1-0528"
88
| "accounts/fireworks/models/deepseek-v3"
9+
| "accounts/fireworks/models/deepseek-v3p1"
910
| "accounts/fireworks/models/glm-4p5"
1011
| "accounts/fireworks/models/glm-4p5-air"
1112
| "accounts/fireworks/models/gpt-oss-20b"
@@ -62,6 +63,16 @@ export const fireworksModels = {
6263
description:
6364
"A strong Mixture-of-Experts (MoE) language model with 671B total parameters with 37B activated for each token from Deepseek. Note that fine-tuning for this model is only available through contacting fireworks at https://fireworks.ai/company/contact-us.",
6465
},
66+
"accounts/fireworks/models/deepseek-v3p1": {
67+
maxTokens: 16384,
68+
contextWindow: 163840,
69+
supportsImages: false,
70+
supportsPromptCache: false,
71+
inputPrice: 0.56,
72+
outputPrice: 1.68,
73+
description:
74+
"DeepSeek v3.1 is an improved version of the v3 model with enhanced performance, better reasoning capabilities, and improved code generation. This Mixture-of-Experts (MoE) model maintains the same 671B total parameters with 37B activated per token.",
75+
},
6576
"accounts/fireworks/models/glm-4p5": {
6677
maxTokens: 16384,
6778
contextWindow: 128000,

0 commit comments

Comments
 (0)