Skip to content

Commit c4a8f30

Browse files
github-actions[bot]mrubens
authored andcommitted
Changeset version bump (RooCodeInc#5039)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Matt Rubens <[email protected]>
2 parents 25f9b0c + 9bf31d3 commit c4a8f30

File tree

143 files changed

+2897
-832
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

143 files changed

+2897
-832
lines changed

CHANGELOG.md

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,36 @@
1515
- Close the local browser when used as fallback for remote (thanks @markijbema!)
1616
- Add Claude Code provider for local CLI integration (thanks @BarreiroT!)
1717

18+
## [3.22.1] - 2025-06-26
19+
20+
- Add Gemini CLI provider (thanks Cline!)
21+
- Fix undefined mcp command (thanks @qdaxb!)
22+
- Use upstream_inference_cost for OpenRouter BYOK cost calculation and show cached token count (thanks @chrarnoldus!)
23+
- Update maxTokens value for qwen/qwen3-32b model on Groq (thanks @KanTakahiro!)
24+
- Standardize tooltip delays to 300ms
25+
26+
## [3.22.0] - 2025-06-25
27+
28+
- Add 1-click task sharing
29+
- Add support for loading rules from a global .roo directory (thanks @samhvw8!)
30+
- Modes selector improvements (thanks @brunobergher!)
31+
- Use safeWriteJson for all JSON file writes to avoid task history corruption (thanks @KJ7LNW!)
32+
- Improve YAML error handling when editing modes
33+
- Register importSettings as VSCode command (thanks @shivamd1810!)
34+
- Add default task names for empty tasks (thanks @daniel-lxs!)
35+
- Improve translation workflow to avoid unnecessary file reads (thanks @KJ7LNW!)
36+
- Allow write_to_file to handle newline-only and empty content (thanks @Githubguy132010!)
37+
- Address multiple memory leaks in CodeBlock component (thanks @kiwina!)
38+
- Memory cleanup (thanks @xyOz-dev!)
39+
- Fix port handling bug in code indexing for HTTPS URLs (thanks @benashby!)
40+
- Improve Bedrock error handling for throttling and streaming contexts
41+
- Handle long Claude code messages (thanks @daniel-lxs!)
42+
- Fixes to Claude Code caching and image upload
43+
- Disable reasoning budget UI controls for Claude Code provider
44+
- Remove temperature parameter for Azure OpenAI reasoning models (thanks @ExactDoug!)
45+
- Allowed commands import/export (thanks @catrielmuller!)
46+
- Add VS Code setting to disable quick fix context actions (thanks @OlegOAndreev!)
47+
1848
## [3.21.5] - 2025-06-23
1949

2050
- Fix Qdrant URL prefix handling for QdrantClient initialization (thanks @CW-B-W!)

packages/types/src/provider-settings.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ export const providerNames = [
1919
"vscode-lm",
2020
"lmstudio",
2121
"gemini",
22+
"gemini-cli",
2223
"openai-native",
2324
"mistral",
2425
"deepseek",
@@ -158,6 +159,11 @@ const geminiSchema = apiModelIdProviderModelSchema.extend({
158159
googleGeminiBaseUrl: z.string().optional(),
159160
})
160161

162+
const geminiCliSchema = apiModelIdProviderModelSchema.extend({
163+
geminiCliOAuthPath: z.string().optional(),
164+
geminiCliProjectId: z.string().optional(),
165+
})
166+
161167
const openAiNativeSchema = apiModelIdProviderModelSchema.extend({
162168
openAiNativeApiKey: z.string().optional(),
163169
openAiNativeBaseUrl: z.string().optional(),
@@ -223,6 +229,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
223229
vsCodeLmSchema.merge(z.object({ apiProvider: z.literal("vscode-lm") })),
224230
lmStudioSchema.merge(z.object({ apiProvider: z.literal("lmstudio") })),
225231
geminiSchema.merge(z.object({ apiProvider: z.literal("gemini") })),
232+
geminiCliSchema.merge(z.object({ apiProvider: z.literal("gemini-cli") })),
226233
openAiNativeSchema.merge(z.object({ apiProvider: z.literal("openai-native") })),
227234
mistralSchema.merge(z.object({ apiProvider: z.literal("mistral") })),
228235
deepSeekSchema.merge(z.object({ apiProvider: z.literal("deepseek") })),
@@ -250,6 +257,7 @@ export const providerSettingsSchema = z.object({
250257
...vsCodeLmSchema.shape,
251258
...lmStudioSchema.shape,
252259
...geminiSchema.shape,
260+
...geminiCliSchema.shape,
253261
...openAiNativeSchema.shape,
254262
...mistralSchema.shape,
255263
...deepSeekSchema.shape,
Lines changed: 110 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,110 @@
1+
import type { ModelInfo } from "../model.js"
2+
3+
// Gemini CLI models with free tier pricing (all $0)
4+
export type GeminiCliModelId = keyof typeof geminiCliModels
5+
6+
export const geminiCliDefaultModelId: GeminiCliModelId = "gemini-2.0-flash-001"
7+
8+
export const geminiCliModels = {
9+
"gemini-2.0-flash-001": {
10+
maxTokens: 8192,
11+
contextWindow: 1_048_576,
12+
supportsImages: true,
13+
supportsPromptCache: false,
14+
inputPrice: 0,
15+
outputPrice: 0,
16+
},
17+
"gemini-2.0-flash-thinking-exp-01-21": {
18+
maxTokens: 65_536,
19+
contextWindow: 1_048_576,
20+
supportsImages: true,
21+
supportsPromptCache: false,
22+
inputPrice: 0,
23+
outputPrice: 0,
24+
},
25+
"gemini-2.0-flash-thinking-exp-1219": {
26+
maxTokens: 8192,
27+
contextWindow: 32_767,
28+
supportsImages: true,
29+
supportsPromptCache: false,
30+
inputPrice: 0,
31+
outputPrice: 0,
32+
},
33+
"gemini-2.0-flash-exp": {
34+
maxTokens: 8192,
35+
contextWindow: 1_048_576,
36+
supportsImages: true,
37+
supportsPromptCache: false,
38+
inputPrice: 0,
39+
outputPrice: 0,
40+
},
41+
"gemini-1.5-flash-002": {
42+
maxTokens: 8192,
43+
contextWindow: 1_048_576,
44+
supportsImages: true,
45+
supportsPromptCache: false,
46+
inputPrice: 0,
47+
outputPrice: 0,
48+
},
49+
"gemini-1.5-flash-exp-0827": {
50+
maxTokens: 8192,
51+
contextWindow: 1_048_576,
52+
supportsImages: true,
53+
supportsPromptCache: false,
54+
inputPrice: 0,
55+
outputPrice: 0,
56+
},
57+
"gemini-1.5-flash-8b-exp-0827": {
58+
maxTokens: 8192,
59+
contextWindow: 1_048_576,
60+
supportsImages: true,
61+
supportsPromptCache: false,
62+
inputPrice: 0,
63+
outputPrice: 0,
64+
},
65+
"gemini-1.5-pro-002": {
66+
maxTokens: 8192,
67+
contextWindow: 2_097_152,
68+
supportsImages: true,
69+
supportsPromptCache: false,
70+
inputPrice: 0,
71+
outputPrice: 0,
72+
},
73+
"gemini-1.5-pro-exp-0827": {
74+
maxTokens: 8192,
75+
contextWindow: 2_097_152,
76+
supportsImages: true,
77+
supportsPromptCache: false,
78+
inputPrice: 0,
79+
outputPrice: 0,
80+
},
81+
"gemini-exp-1206": {
82+
maxTokens: 8192,
83+
contextWindow: 2_097_152,
84+
supportsImages: true,
85+
supportsPromptCache: false,
86+
inputPrice: 0,
87+
outputPrice: 0,
88+
},
89+
"gemini-2.5-flash": {
90+
maxTokens: 64_000,
91+
contextWindow: 1_048_576,
92+
supportsImages: true,
93+
supportsPromptCache: false,
94+
inputPrice: 0,
95+
outputPrice: 0,
96+
maxThinkingTokens: 24_576,
97+
supportsReasoningBudget: true,
98+
},
99+
"gemini-2.5-pro": {
100+
maxTokens: 64_000,
101+
contextWindow: 1_048_576,
102+
supportsImages: true,
103+
supportsPromptCache: false,
104+
inputPrice: 0,
105+
outputPrice: 0,
106+
maxThinkingTokens: 32_768,
107+
supportsReasoningBudget: true,
108+
requiredReasoningBudget: true,
109+
},
110+
} as const satisfies Record<string, ModelInfo>

packages/types/src/providers/groq.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ export const groqModels = {
7070
description: "Alibaba Qwen QwQ 32B model, 128K context.",
7171
},
7272
"qwen/qwen3-32b": {
73-
maxTokens: 131072,
73+
maxTokens: 40960,
7474
contextWindow: 131072,
7575
supportsImages: false,
7676
supportsPromptCache: false,

packages/types/src/providers/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ export * from "./chutes.js"
44
export * from "./claude-code.js"
55
export * from "./deepseek.js"
66
export * from "./gemini.js"
7+
export * from "./gemini-cli.js"
78
export * from "./glama.js"
89
export * from "./groq.js"
910
export * from "./lite-llm.js"

packages/types/src/telemetry.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ export enum TelemetryEventName {
2525
TASK_CONVERSATION_MESSAGE = "Conversation Message",
2626
LLM_COMPLETION = "LLM Completion",
2727
MODE_SWITCH = "Mode Switched",
28+
MODE_SELECTOR_OPENED = "Mode Selector Opened",
2829
TOOL_USED = "Tool Used",
2930

3031
CHECKPOINT_CREATED = "Checkpoint Created",
@@ -126,6 +127,7 @@ export const rooCodeTelemetryEventSchema = z.discriminatedUnion("type", [
126127
TelemetryEventName.TASK_COMPLETED,
127128
TelemetryEventName.TASK_CONVERSATION_MESSAGE,
128129
TelemetryEventName.MODE_SWITCH,
130+
TelemetryEventName.MODE_SELECTOR_OPENED,
129131
TelemetryEventName.TOOL_USED,
130132
TelemetryEventName.CHECKPOINT_CREATED,
131133
TelemetryEventName.CHECKPOINT_RESTORED,

src/activate/registerTerminalActions.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ const registerTerminalAction = (
2020
) => {
2121
context.subscriptions.push(
2222
vscode.commands.registerCommand(getTerminalCommand(command), async (args: any) => {
23-
let content = args.selection
23+
let content = args?.selection
2424

2525
if (!content || content === "") {
2626
content = await Terminal.getTerminalContents(promptType === "TERMINAL_ADD_TO_CONTEXT" ? -1 : 1)

src/api/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ import {
1515
OllamaHandler,
1616
LmStudioHandler,
1717
GeminiHandler,
18+
GeminiCliHandler,
1819
OpenAiNativeHandler,
1920
DeepSeekHandler,
2021
MistralHandler,
@@ -85,6 +86,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
8586
return new LmStudioHandler(options)
8687
case "gemini":
8788
return new GeminiHandler(options)
89+
case "gemini-cli":
90+
return new GeminiCliHandler(options)
8891
case "openai-native":
8992
return new OpenAiNativeHandler(options)
9093
case "deepseek":

0 commit comments

Comments
 (0)