From 39715f13e1f7078f11701d52a2a1d23fba8a93ea Mon Sep 17 00:00:00 2001 From: Catalin Lupuleti Date: Mon, 24 Feb 2025 19:05:05 +0000 Subject: [PATCH 001/145] Added support for Claude sonnet 3.7 via VertexAI --- src/shared/api.ts | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/shared/api.ts b/src/shared/api.ts index 9ecb12c1403..5bcf60138ef 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -396,6 +396,15 @@ export const openRouterDefaultModelInfo: ModelInfo = { export type VertexModelId = keyof typeof vertexModels export const vertexDefaultModelId: VertexModelId = "claude-3-5-sonnet-v2@20241022" export const vertexModels = { + "claude-3-7-sonnet@20250219": { + maxTokens: 8192, + contextWindow: 200_000, + supportsImages: true, + supportsComputerUse: true, + supportsPromptCache: false, + inputPrice: 3.0, + outputPrice: 15.0, + }, "claude-3-5-sonnet-v2@20241022": { maxTokens: 8192, contextWindow: 200_000, From 92ac610366ca4c21e3d237325c8bf3239888b30d Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 11:14:08 -0800 Subject: [PATCH 002/145] Add Claude 3.7 --- src/api/providers/anthropic.ts | 2 ++ src/api/providers/openrouter.ts | 1 + src/shared/api.ts | 11 +++++++++++ 3 files changed, 14 insertions(+) diff --git a/src/api/providers/anthropic.ts b/src/api/providers/anthropic.ts index 9a14756f5d2..4c62238f461 100644 --- a/src/api/providers/anthropic.ts +++ b/src/api/providers/anthropic.ts @@ -27,8 +27,10 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { let stream: AnthropicStream const modelId = this.getModel().id + switch (modelId) { // 'latest' alias does not support cache_control + case "claude-3-7-sonnet-20250219": case "claude-3-5-sonnet-20241022": case "claude-3-5-haiku-20241022": case "claude-3-opus-20240229": diff --git a/src/api/providers/openrouter.ts b/src/api/providers/openrouter.ts index 1fcf25260ef..af087226ebd 100644 --- a/src/api/providers/openrouter.ts +++ b/src/api/providers/openrouter.ts @@ -55,6 +55,7 @@ export class OpenRouterHandler implements ApiHandler, SingleCompletionHandler { // prompt caching: https://openrouter.ai/docs/prompt-caching // this is specifically for claude models (some models may 'support prompt caching' automatically without this) switch (this.getModel().id) { + case "anthropic/claude-3.7-sonnet": case "anthropic/claude-3.5-sonnet": case "anthropic/claude-3.5-sonnet:beta": case "anthropic/claude-3.5-sonnet-20240620": diff --git a/src/shared/api.ts b/src/shared/api.ts index 9ecb12c1403..3598eb16ac9 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -95,6 +95,17 @@ export interface ModelInfo { export type AnthropicModelId = keyof typeof anthropicModels export const anthropicDefaultModelId: AnthropicModelId = "claude-3-5-sonnet-20241022" export const anthropicModels = { + "claude-3-7-sonnet-20250219": { + maxTokens: 128_000, + contextWindow: 200_000, + supportsImages: true, + supportsComputerUse: true, + supportsPromptCache: true, + inputPrice: 3.0, // $3 per million input tokens + outputPrice: 15.0, // $15 per million output tokens + cacheWritesPrice: 3.75, // $3.75 per million tokens + cacheReadsPrice: 0.3, // $0.30 per million tokens + }, "claude-3-5-sonnet-20241022": { maxTokens: 8192, contextWindow: 200_000, From 08aa911e138a771d54f16a443074ca635f68bf90 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Mon, 24 Feb 2025 13:25:33 -0600 Subject: [PATCH 003/145] Revert "Merge pull request #1125 from hannesrudolph/change_order_apply_diff" This reverts commit dddac90721719c5e54100e80205e30f3a4738b96, reversing changes made to 1c9dcaddc417b3ce48c2c39bf7ecc8484e1aa063. --- CHANGELOG.md | 2 +- src/core/Cline.ts | 34 +-- src/core/__tests__/mode-validator.test.ts | 40 +-- src/core/assistant-message/index.ts | 6 +- .../parse-assistant-message.ts | 4 +- .../strategies/__tests__/new-unified.test.ts | 2 +- .../__tests__/search-replace.test.ts | 4 +- .../diff/strategies/__tests__/unified.test.ts | 2 +- src/core/diff/strategies/new-unified/index.ts | 6 +- src/core/diff/strategies/search-replace.ts | 6 +- src/core/diff/strategies/unified.ts | 6 +- .../__snapshots__/system.test.ts.snap | 284 +++++++++--------- src/core/prompts/__tests__/sections.test.ts | 16 +- src/core/prompts/__tests__/system.test.ts | 12 +- src/core/prompts/sections/capabilities.ts | 2 +- src/core/prompts/sections/mcp-servers.ts | 2 +- src/core/prompts/sections/modes.ts | 2 +- src/core/prompts/sections/rules.ts | 23 +- src/core/prompts/tools/index.ts | 4 +- src/core/prompts/tools/write-to-file.ts | 10 +- .../webview/__tests__/ClineProvider.test.ts | 2 +- src/shared/ExtensionMessage.ts | 2 +- src/shared/__tests__/modes.test.ts | 66 ++-- src/shared/tool-groups.ts | 6 +- 24 files changed, 269 insertions(+), 274 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ff8cfdc3e85..748f8c78f88 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -479,7 +479,7 @@ Join us at https://www.reddit.com/r/RooCode to share your custom modes and be pa ## [2.1.14] - Fix bug where diffs were not being applied correctly and try Aider's [unified diff prompt](https://github.com/Aider-AI/aider/blob/3995accd0ca71cea90ef76d516837f8c2731b9fe/aider/coders/udiff_prompts.py#L75-L105) -- If diffs are enabled, automatically reject create_file commands that lead to truncated output +- If diffs are enabled, automatically reject write_to_file commands that lead to truncated output ## [2.1.13] diff --git a/src/core/Cline.ts b/src/core/Cline.ts index 522af873274..12cf062406b 100644 --- a/src/core/Cline.ts +++ b/src/core/Cline.ts @@ -733,7 +733,7 @@ export class Cline { text: `[TASK RESUMPTION] This task was interrupted ${agoText}. It may or may not be complete, so please reassess the task context. Be aware that the project state may have changed since then. The current working directory is now '${cwd.toPosix()}'. If the task has not been completed, retry the last step before interruption and proceed with completing the task.\n\nNote: If you previously attempted a tool use that the user did not provide a result for, you should assume the tool use was not successful and assess whether you should retry. If the last tool was a browser_action, the browser has been closed and you must launch a new browser if needed.${ wasRecent - ? "\n\nIMPORTANT: If the last tool use was a create_file that was interrupted, the file was reverted back to its original state before the interrupted edit, and you do NOT need to re-read the file as you already have its up-to-date contents." + ? "\n\nIMPORTANT: If the last tool use was a write_to_file that was interrupted, the file was reverted back to its original state before the interrupted edit, and you do NOT need to re-read the file as you already have its up-to-date contents." : "" }` + (responseText @@ -1141,9 +1141,9 @@ export class Cline { return `[${block.name} for '${block.params.command}']` case "read_file": return `[${block.name} for '${block.params.path}']` - case "create_file": + case "write_to_file": return `[${block.name} for '${block.params.path}']` - case "edit_file": + case "apply_diff": return `[${block.name} for '${block.params.path}']` case "search_files": return `[${block.name} for '${block.params.regex}'${ @@ -1295,7 +1295,7 @@ export class Cline { mode ?? defaultModeSlug, customModes ?? [], { - edit_file: this.diffEnabled, + apply_diff: this.diffEnabled, }, block.params, ) @@ -1306,7 +1306,7 @@ export class Cline { } switch (block.name) { - case "create_file": { + case "write_to_file": { const relPath: string | undefined = block.params.path let newContent: string | undefined = block.params.content let predictedLineCount: number | undefined = parseInt(block.params.line_count ?? "0") @@ -1371,20 +1371,20 @@ export class Cline { } else { if (!relPath) { this.consecutiveMistakeCount++ - pushToolResult(await this.sayAndCreateMissingParamError("create_file", "path")) + pushToolResult(await this.sayAndCreateMissingParamError("write_to_file", "path")) await this.diffViewProvider.reset() break } if (!newContent) { this.consecutiveMistakeCount++ - pushToolResult(await this.sayAndCreateMissingParamError("create_file", "content")) + pushToolResult(await this.sayAndCreateMissingParamError("write_to_file", "content")) await this.diffViewProvider.reset() break } if (!predictedLineCount) { this.consecutiveMistakeCount++ pushToolResult( - await this.sayAndCreateMissingParamError("create_file", "line_count"), + await this.sayAndCreateMissingParamError("write_to_file", "line_count"), ) await this.diffViewProvider.reset() break @@ -1421,7 +1421,7 @@ export class Cline { formatResponse.toolError( `Content appears to be truncated (file has ${ newContent.split("\n").length - } lines but was predicted to have ${predictedLineCount} lines), and found comments indicating omitted code (e.g., '// rest of code unchanged', '/* previous code */'). Please provide the complete file content without any omissions if possible, or otherwise use the 'edit_file' tool to apply the diff to the original file.`, + } lines but was predicted to have ${predictedLineCount} lines), and found comments indicating omitted code (e.g., '// rest of code unchanged', '/* previous code */'). Please provide the complete file content without any omissions if possible, or otherwise use the 'apply_diff' tool to apply the diff to the original file.`, ), ) break @@ -1497,7 +1497,7 @@ export class Cline { break } } - case "edit_file": { + case "apply_diff": { const relPath: string | undefined = block.params.path const diffContent: string | undefined = block.params.diff @@ -1515,12 +1515,12 @@ export class Cline { } else { if (!relPath) { this.consecutiveMistakeCount++ - pushToolResult(await this.sayAndCreateMissingParamError("edit_file", "path")) + pushToolResult(await this.sayAndCreateMissingParamError("apply_diff", "path")) break } if (!diffContent) { this.consecutiveMistakeCount++ - pushToolResult(await this.sayAndCreateMissingParamError("edit_file", "diff")) + pushToolResult(await this.sayAndCreateMissingParamError("apply_diff", "diff")) break } @@ -2233,7 +2233,7 @@ export class Cline { formatResponse.toolResult( `The browser action has been executed. The console logs and screenshot have been captured for your analysis.\n\nConsole logs:\n${ browserActionResult.logs || "(No new logs)" - }\n\n(REMEMBER: if you need to proceed to using non-\`browser_action\` tools or launch a new browser, you MUST first close this browser. For example, if after analyzing the logs and screenshot you need to edit a file, you must first close the browser before you can use the create_file tool.)`, + }\n\n(REMEMBER: if you need to proceed to using non-\`browser_action\` tools or launch a new browser, you MUST first close this browser. For example, if after analyzing the logs and screenshot you need to edit a file, you must first close the browser before you can use the write_to_file tool.)`, browserActionResult.screenshot ? [browserActionResult.screenshot] : [], ), ) @@ -2750,7 +2750,7 @@ export class Cline { /* Seeing out of bounds is fine, it means that the next too call is being built up and ready to add to assistantMessageContent to present. - When you see the UI inactive during this, it means that a tool is breaking without presenting any UI. For example the create_file tool was breaking when relpath was undefined, and for invalid relpath it never presented UI. + When you see the UI inactive during this, it means that a tool is breaking without presenting any UI. For example the write_to_file tool was breaking when relpath was undefined, and for invalid relpath it never presented UI. */ this.presentAssistantMessageLocked = false // this needs to be placed here, if not then calling this.presentAssistantMessage below would fail (sometimes) since it's locked // NOTE: when tool is rejected, iterator stream is interrupted and it waits for userMessageContentReady to be true. Future calls to present will skip execution since didRejectTool and iterate until contentIndex is set to message length and it sets userMessageContentReady to true itself (instead of preemptively doing it in iterator) @@ -3300,10 +3300,10 @@ export class Cline { // Add warning if not in code mode if ( - !isToolAllowedForMode("create_file", currentMode, customModes ?? [], { - edit_file: this.diffEnabled, + !isToolAllowedForMode("write_to_file", currentMode, customModes ?? [], { + apply_diff: this.diffEnabled, }) && - !isToolAllowedForMode("edit_file", currentMode, customModes ?? [], { edit_file: this.diffEnabled }) + !isToolAllowedForMode("apply_diff", currentMode, customModes ?? [], { apply_diff: this.diffEnabled }) ) { const currentModeName = getModeBySlug(currentMode, customModes)?.name ?? currentMode const defaultModeName = getModeBySlug(defaultModeSlug, customModes)?.name ?? defaultModeSlug diff --git a/src/core/__tests__/mode-validator.test.ts b/src/core/__tests__/mode-validator.test.ts index 4efcd06e3e7..632ca8a8ab0 100644 --- a/src/core/__tests__/mode-validator.test.ts +++ b/src/core/__tests__/mode-validator.test.ts @@ -59,7 +59,7 @@ describe("mode-validator", () => { ] // Should allow tools from read and edit groups expect(isToolAllowedForMode("read_file", "custom-mode", customModes)).toBe(true) - expect(isToolAllowedForMode("create_file", "custom-mode", customModes)).toBe(true) + expect(isToolAllowedForMode("write_to_file", "custom-mode", customModes)).toBe(true) // Should not allow tools from other groups expect(isToolAllowedForMode("execute_command", "custom-mode", customModes)).toBe(false) }) @@ -76,7 +76,7 @@ describe("mode-validator", () => { // Should allow tools from read group expect(isToolAllowedForMode("read_file", codeMode, customModes)).toBe(true) // Should not allow tools from other groups - expect(isToolAllowedForMode("create_file", codeMode, customModes)).toBe(false) + expect(isToolAllowedForMode("write_to_file", codeMode, customModes)).toBe(false) }) it("respects tool requirements in custom modes", () => { @@ -88,39 +88,39 @@ describe("mode-validator", () => { groups: ["edit"] as const, }, ] - const requirements = { edit_file: false } + const requirements = { apply_diff: false } // Should respect disabled requirement even if tool group is allowed - expect(isToolAllowedForMode("edit_file", "custom-mode", customModes, requirements)).toBe(false) + expect(isToolAllowedForMode("apply_diff", "custom-mode", customModes, requirements)).toBe(false) // Should allow other edit tools - expect(isToolAllowedForMode("create_file", "custom-mode", customModes, requirements)).toBe(true) + expect(isToolAllowedForMode("write_to_file", "custom-mode", customModes, requirements)).toBe(true) }) }) describe("tool requirements", () => { it("respects tool requirements when provided", () => { - const requirements = { edit_file: false } - expect(isToolAllowedForMode("edit_file", codeMode, [], requirements)).toBe(false) + const requirements = { apply_diff: false } + expect(isToolAllowedForMode("apply_diff", codeMode, [], requirements)).toBe(false) - const enabledRequirements = { edit_file: true } - expect(isToolAllowedForMode("edit_file", codeMode, [], enabledRequirements)).toBe(true) + const enabledRequirements = { apply_diff: true } + expect(isToolAllowedForMode("apply_diff", codeMode, [], enabledRequirements)).toBe(true) }) it("allows tools when their requirements are not specified", () => { const requirements = { some_other_tool: true } - expect(isToolAllowedForMode("edit_file", codeMode, [], requirements)).toBe(true) + expect(isToolAllowedForMode("apply_diff", codeMode, [], requirements)).toBe(true) }) it("handles undefined and empty requirements", () => { - expect(isToolAllowedForMode("edit_file", codeMode, [], undefined)).toBe(true) - expect(isToolAllowedForMode("edit_file", codeMode, [], {})).toBe(true) + expect(isToolAllowedForMode("apply_diff", codeMode, [], undefined)).toBe(true) + expect(isToolAllowedForMode("apply_diff", codeMode, [], {})).toBe(true) }) it("prioritizes requirements over mode configuration", () => { - const requirements = { edit_file: false } + const requirements = { apply_diff: false } // Even in code mode which allows all tools, disabled requirement should take precedence - expect(isToolAllowedForMode("edit_file", codeMode, [], requirements)).toBe(false) + expect(isToolAllowedForMode("apply_diff", codeMode, [], requirements)).toBe(false) }) }) }) @@ -137,19 +137,19 @@ describe("mode-validator", () => { }) it("throws error when tool requirement is not met", () => { - const requirements = { edit_file: false } - expect(() => validateToolUse("edit_file", codeMode, [], requirements)).toThrow( - 'Tool "edit_file" is not allowed in code mode.', + const requirements = { apply_diff: false } + expect(() => validateToolUse("apply_diff", codeMode, [], requirements)).toThrow( + 'Tool "apply_diff" is not allowed in code mode.', ) }) it("does not throw when tool requirement is met", () => { - const requirements = { edit_file: true } - expect(() => validateToolUse("edit_file", codeMode, [], requirements)).not.toThrow() + const requirements = { apply_diff: true } + expect(() => validateToolUse("apply_diff", codeMode, [], requirements)).not.toThrow() }) it("handles undefined requirements gracefully", () => { - expect(() => validateToolUse("edit_file", codeMode, [], undefined)).not.toThrow() + expect(() => validateToolUse("apply_diff", codeMode, [], undefined)).not.toThrow() }) }) }) diff --git a/src/core/assistant-message/index.ts b/src/core/assistant-message/index.ts index 46b29a703db..f1c49f85ab7 100644 --- a/src/core/assistant-message/index.ts +++ b/src/core/assistant-message/index.ts @@ -11,8 +11,8 @@ export interface TextContent { export const toolUseNames = [ "execute_command", "read_file", - "create_file", - "edit_file", + "write_to_file", + "apply_diff", "insert_content", "search_and_replace", "search_files", @@ -80,7 +80,7 @@ export interface ReadFileToolUse extends ToolUse { } export interface WriteToFileToolUse extends ToolUse { - name: "create_file" + name: "write_to_file" params: Partial, "path" | "content" | "line_count">> } diff --git a/src/core/assistant-message/parse-assistant-message.ts b/src/core/assistant-message/parse-assistant-message.ts index 9b1cea70a9b..e38e8f6458e 100644 --- a/src/core/assistant-message/parse-assistant-message.ts +++ b/src/core/assistant-message/parse-assistant-message.ts @@ -61,9 +61,9 @@ export function parseAssistantMessage(assistantMessage: string) { // there's no current param, and not starting a new param - // special case for create_file where file contents could contain the closing tag, in which case the param would have closed and we end up with the rest of the file contents here. To work around this, we get the string between the starting content tag and the LAST content tag. + // special case for write_to_file where file contents could contain the closing tag, in which case the param would have closed and we end up with the rest of the file contents here. To work around this, we get the string between the starting content tag and the LAST content tag. const contentParamName: ToolParamName = "content" - if (currentToolUse.name === "create_file" && accumulator.endsWith(``)) { + if (currentToolUse.name === "write_to_file" && accumulator.endsWith(``)) { const toolContent = accumulator.slice(currentToolUseStartIndex) const contentStartTag = `<${contentParamName}>` const contentEndTag = `` diff --git a/src/core/diff/strategies/__tests__/new-unified.test.ts b/src/core/diff/strategies/__tests__/new-unified.test.ts index 9d30cece7e4..8832f9e7c08 100644 --- a/src/core/diff/strategies/__tests__/new-unified.test.ts +++ b/src/core/diff/strategies/__tests__/new-unified.test.ts @@ -29,7 +29,7 @@ describe("main", () => { const cwd = "/test/path" const description = strategy.getToolDescription({ cwd }) - expect(description).toContain("edit_file Tool - Generate Precise Code Changes") + expect(description).toContain("apply_diff Tool - Generate Precise Code Changes") expect(description).toContain(cwd) expect(description).toContain("Step-by-Step Instructions") expect(description).toContain("Requirements") diff --git a/src/core/diff/strategies/__tests__/search-replace.test.ts b/src/core/diff/strategies/__tests__/search-replace.test.ts index 723beee23a7..cd71edac475 100644 --- a/src/core/diff/strategies/__tests__/search-replace.test.ts +++ b/src/core/diff/strategies/__tests__/search-replace.test.ts @@ -1544,8 +1544,8 @@ function two() { expect(description).toContain("<<<<<<< SEARCH") expect(description).toContain("=======") expect(description).toContain(">>>>>>> REPLACE") - expect(description).toContain("") - expect(description).toContain("") + expect(description).toContain("") + expect(description).toContain("") }) it("should document start_line and end_line parameters", async () => { diff --git a/src/core/diff/strategies/__tests__/unified.test.ts b/src/core/diff/strategies/__tests__/unified.test.ts index ae7860869bb..1d9847b3c51 100644 --- a/src/core/diff/strategies/__tests__/unified.test.ts +++ b/src/core/diff/strategies/__tests__/unified.test.ts @@ -12,7 +12,7 @@ describe("UnifiedDiffStrategy", () => { const cwd = "/test/path" const description = strategy.getToolDescription({ cwd }) - expect(description).toContain("edit_file") + expect(description).toContain("apply_diff") expect(description).toContain(cwd) expect(description).toContain("Parameters:") expect(description).toContain("Format Requirements:") diff --git a/src/core/diff/strategies/new-unified/index.ts b/src/core/diff/strategies/new-unified/index.ts index df130ffaca6..d82a05a1045 100644 --- a/src/core/diff/strategies/new-unified/index.ts +++ b/src/core/diff/strategies/new-unified/index.ts @@ -108,7 +108,7 @@ export class NewUnifiedDiffStrategy implements DiffStrategy { } getToolDescription(args: { cwd: string; toolOptions?: { [key: string]: string } }): string { - return `# edit_file Tool - Generate Precise Code Changes + return `# apply_diff Tool - Generate Precise Code Changes Generate a unified diff that can be cleanly applied to modify code files. @@ -168,12 +168,12 @@ Parameters: - diff: (required) Unified diff content in unified format to apply to the file. Usage: - + path/to/file.ext Your diff here -` +` } // Helper function to split a hunk into smaller hunks based on contiguous changes diff --git a/src/core/diff/strategies/search-replace.ts b/src/core/diff/strategies/search-replace.ts index c8d4f22c8d1..a9bf46758de 100644 --- a/src/core/diff/strategies/search-replace.ts +++ b/src/core/diff/strategies/search-replace.ts @@ -40,7 +40,7 @@ export class SearchReplaceDiffStrategy implements DiffStrategy { } getToolDescription(args: { cwd: string; toolOptions?: { [key: string]: string } }): string { - return `## edit_file + return `## apply_diff Description: Request to replace existing code using a search and replace block. This tool allows for precise, surgical replaces to files by specifying exactly what content to search for and what to replace it with. The tool will maintain proper indentation and formatting while making changes. @@ -91,14 +91,14 @@ def calculate_total(items): \`\`\` Usage: - + File path here Your search/replace content here 1 5 -` +` } async applyDiff( diff --git a/src/core/diff/strategies/unified.ts b/src/core/diff/strategies/unified.ts index 5947391df69..f1cdb3b5849 100644 --- a/src/core/diff/strategies/unified.ts +++ b/src/core/diff/strategies/unified.ts @@ -3,7 +3,7 @@ import { DiffStrategy, DiffResult } from "../types" export class UnifiedDiffStrategy implements DiffStrategy { getToolDescription(args: { cwd: string; toolOptions?: { [key: string]: string } }): string { - return `## edit_file + return `## apply_diff Description: Apply a unified diff to a file at the specified path. This tool is useful when you need to make specific modifications to a file based on a set of changes provided in unified diff format (diff -U3). Parameters: @@ -100,12 +100,12 @@ Best Practices: 4. Verify line numbers match the line numbers you have in the file Usage: - + File path here Your diff here -` +` } async applyDiff(originalContent: string, diffContent: string): Promise { diff --git a/src/core/prompts/__tests__/__snapshots__/system.test.ts.snap b/src/core/prompts/__tests__/__snapshots__/system.test.ts.snap index e4447d31eef..2abc6138619 100644 --- a/src/core/prompts/__tests__/__snapshots__/system.test.ts.snap +++ b/src/core/prompts/__tests__/__snapshots__/system.test.ts.snap @@ -94,23 +94,23 @@ Example: Requesting to list all top level source code definitions in the current . -## create_file +## write_to_file Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file. Parameters: - path: (required) The path of the file to write to (relative to the current working directory /test/path) - content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file. - line_count: (required) The number of lines in the file. Make sure to compute this based on the actual content of the file, not the number of lines in the content you're providing. Usage: - + File path here Your file content here total number of lines in the file, including empty lines - + Example: Requesting to write to frontend-config.json - + frontend-config.json { @@ -129,7 +129,7 @@ Example: Requesting to write to frontend-config.json } 14 - + ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Commands will be executed in the current working directory: /test/path @@ -249,7 +249,7 @@ CAPABILITIES - When the user initially gives you a task, a recursive list of all filepaths in the current working directory ('/test/path') will be included in environment_details. This provides an overview of the project's file structure, offering key insights into the project from directory/file names (how developers conceptualize and organize their code) and file extensions (the language used). This can also guide decision-making on which files to explore further. If you need to further explore directories such as outside the current working directory, you can use the list_files tool. If you pass 'true' for the recursive parameter, it will list files recursively. Otherwise, it will list files at the top level, which is better suited for generic directories where you don't necessarily need the nested structure, like the Desktop. - You can use search_files to perform regex searches across files in a specified directory, outputting context-rich results that include surrounding lines. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need refactoring. - You can use the list_code_definition_names tool to get an overview of source code definitions for all files at the top level of a specified directory. This can be particularly useful when you need to understand the broader context and relationships between certain parts of the code. You may need to call this tool multiple times to understand various parts of the codebase related to the task. - - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the create_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. + - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the write_to_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. - You can use the execute_command tool to run commands on the user's computer whenever you feel it can help accomplish the user's task. When you need to execute a CLI command, you must provide a clear explanation of what the command does. Prefer to execute complex CLI commands over creating executable scripts, since they are more flexible and easier to run. Interactive and long-running commands are allowed, since the commands are run in the user's VSCode terminal. The user may keep commands running in the background and you will be kept updated on their status along the way. Each command you execute is run in a new terminal instance. ==== @@ -266,9 +266,9 @@ RULES - You cannot \`cd\` into a different directory to complete a task. You are stuck operating from '/test/path', so be sure to pass in the correct 'path' parameter when using tools that require a path. - Do not use the ~ character or $HOME to refer to the home directory. - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with \`cd\`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run \`npm install\` in a project outside of '/test/path', you would need to prepend with a \`cd\` i.e. pseudocode for this would be \`cd (path to project) && (command, in this case npm install)\`. -- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using create_file to make informed changes. -- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the create_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- When using the create_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. +- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. +- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. +- When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. - Be sure to consider the type of project (e.g. Python, JavaScript, web application) when determining the appropriate structure and files to include. Also consider what files may be most relevant to accomplishing the task, for example looking at a project's manifest file would help you understand the project's dependencies, which you could incorporate into any code you write. * For example, in architect mode trying to edit app.js would be rejected because architect mode can only edit files matching "\\.md$" @@ -417,23 +417,23 @@ Example: Requesting to list all top level source code definitions in the current . -## create_file +## write_to_file Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file. Parameters: - path: (required) The path of the file to write to (relative to the current working directory /test/path) - content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file. - line_count: (required) The number of lines in the file. Make sure to compute this based on the actual content of the file, not the number of lines in the content you're providing. Usage: - + File path here Your file content here total number of lines in the file, including empty lines - + Example: Requesting to write to frontend-config.json - + frontend-config.json { @@ -452,7 +452,7 @@ Example: Requesting to write to frontend-config.json } 14 - + ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Commands will be executed in the current working directory: /test/path @@ -572,7 +572,7 @@ CAPABILITIES - When the user initially gives you a task, a recursive list of all filepaths in the current working directory ('/test/path') will be included in environment_details. This provides an overview of the project's file structure, offering key insights into the project from directory/file names (how developers conceptualize and organize their code) and file extensions (the language used). This can also guide decision-making on which files to explore further. If you need to further explore directories such as outside the current working directory, you can use the list_files tool. If you pass 'true' for the recursive parameter, it will list files recursively. Otherwise, it will list files at the top level, which is better suited for generic directories where you don't necessarily need the nested structure, like the Desktop. - You can use search_files to perform regex searches across files in a specified directory, outputting context-rich results that include surrounding lines. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need refactoring. - You can use the list_code_definition_names tool to get an overview of source code definitions for all files at the top level of a specified directory. This can be particularly useful when you need to understand the broader context and relationships between certain parts of the code. You may need to call this tool multiple times to understand various parts of the codebase related to the task. - - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the create_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. + - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the write_to_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. - You can use the execute_command tool to run commands on the user's computer whenever you feel it can help accomplish the user's task. When you need to execute a CLI command, you must provide a clear explanation of what the command does. Prefer to execute complex CLI commands over creating executable scripts, since they are more flexible and easier to run. Interactive and long-running commands are allowed, since the commands are run in the user's VSCode terminal. The user may keep commands running in the background and you will be kept updated on their status along the way. Each command you execute is run in a new terminal instance. ==== @@ -589,9 +589,9 @@ RULES - You cannot \`cd\` into a different directory to complete a task. You are stuck operating from '/test/path', so be sure to pass in the correct 'path' parameter when using tools that require a path. - Do not use the ~ character or $HOME to refer to the home directory. - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with \`cd\`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run \`npm install\` in a project outside of '/test/path', you would need to prepend with a \`cd\` i.e. pseudocode for this would be \`cd (path to project) && (command, in this case npm install)\`. -- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using create_file to make informed changes. -- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the create_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- When using the create_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. +- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. +- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. +- When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. - Be sure to consider the type of project (e.g. Python, JavaScript, web application) when determining the appropriate structure and files to include. Also consider what files may be most relevant to accomplishing the task, for example looking at a project's manifest file would help you understand the project's dependencies, which you could incorporate into any code you write. * For example, in architect mode trying to edit app.js would be rejected because architect mode can only edit files matching "\\.md$" @@ -740,23 +740,23 @@ Example: Requesting to list all top level source code definitions in the current . -## create_file +## write_to_file Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file. Parameters: - path: (required) The path of the file to write to (relative to the current working directory /test/path) - content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file. - line_count: (required) The number of lines in the file. Make sure to compute this based on the actual content of the file, not the number of lines in the content you're providing. Usage: - + File path here Your file content here total number of lines in the file, including empty lines - + Example: Requesting to write to frontend-config.json - + frontend-config.json { @@ -775,7 +775,7 @@ Example: Requesting to write to frontend-config.json } 14 - + ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Commands will be executed in the current working directory: /test/path @@ -895,7 +895,7 @@ CAPABILITIES - When the user initially gives you a task, a recursive list of all filepaths in the current working directory ('/test/path') will be included in environment_details. This provides an overview of the project's file structure, offering key insights into the project from directory/file names (how developers conceptualize and organize their code) and file extensions (the language used). This can also guide decision-making on which files to explore further. If you need to further explore directories such as outside the current working directory, you can use the list_files tool. If you pass 'true' for the recursive parameter, it will list files recursively. Otherwise, it will list files at the top level, which is better suited for generic directories where you don't necessarily need the nested structure, like the Desktop. - You can use search_files to perform regex searches across files in a specified directory, outputting context-rich results that include surrounding lines. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need refactoring. - You can use the list_code_definition_names tool to get an overview of source code definitions for all files at the top level of a specified directory. This can be particularly useful when you need to understand the broader context and relationships between certain parts of the code. You may need to call this tool multiple times to understand various parts of the codebase related to the task. - - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the create_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. + - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the write_to_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. - You can use the execute_command tool to run commands on the user's computer whenever you feel it can help accomplish the user's task. When you need to execute a CLI command, you must provide a clear explanation of what the command does. Prefer to execute complex CLI commands over creating executable scripts, since they are more flexible and easier to run. Interactive and long-running commands are allowed, since the commands are run in the user's VSCode terminal. The user may keep commands running in the background and you will be kept updated on their status along the way. Each command you execute is run in a new terminal instance. ==== @@ -912,9 +912,9 @@ RULES - You cannot \`cd\` into a different directory to complete a task. You are stuck operating from '/test/path', so be sure to pass in the correct 'path' parameter when using tools that require a path. - Do not use the ~ character or $HOME to refer to the home directory. - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with \`cd\`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run \`npm install\` in a project outside of '/test/path', you would need to prepend with a \`cd\` i.e. pseudocode for this would be \`cd (path to project) && (command, in this case npm install)\`. -- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using create_file to make informed changes. -- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the create_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- When using the create_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. +- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. +- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. +- When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. - Be sure to consider the type of project (e.g. Python, JavaScript, web application) when determining the appropriate structure and files to include. Also consider what files may be most relevant to accomplishing the task, for example looking at a project's manifest file would help you understand the project's dependencies, which you could incorporate into any code you write. * For example, in architect mode trying to edit app.js would be rejected because architect mode can only edit files matching "\\.md$" @@ -1063,23 +1063,23 @@ Example: Requesting to list all top level source code definitions in the current . -## create_file +## write_to_file Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file. Parameters: - path: (required) The path of the file to write to (relative to the current working directory /test/path) - content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file. - line_count: (required) The number of lines in the file. Make sure to compute this based on the actual content of the file, not the number of lines in the content you're providing. Usage: - + File path here Your file content here total number of lines in the file, including empty lines - + Example: Requesting to write to frontend-config.json - + frontend-config.json { @@ -1098,7 +1098,7 @@ Example: Requesting to write to frontend-config.json } 14 - + ## browser_action Description: Request to interact with a Puppeteer-controlled browser. Every action, except \`close\`, will be responded to with a screenshot of the browser's current state, along with any new console logs. You may only perform one browser action per message, and wait for the user's response including a screenshot and logs to determine the next action. @@ -1264,7 +1264,7 @@ CAPABILITIES - When the user initially gives you a task, a recursive list of all filepaths in the current working directory ('/test/path') will be included in environment_details. This provides an overview of the project's file structure, offering key insights into the project from directory/file names (how developers conceptualize and organize their code) and file extensions (the language used). This can also guide decision-making on which files to explore further. If you need to further explore directories such as outside the current working directory, you can use the list_files tool. If you pass 'true' for the recursive parameter, it will list files recursively. Otherwise, it will list files at the top level, which is better suited for generic directories where you don't necessarily need the nested structure, like the Desktop. - You can use search_files to perform regex searches across files in a specified directory, outputting context-rich results that include surrounding lines. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need refactoring. - You can use the list_code_definition_names tool to get an overview of source code definitions for all files at the top level of a specified directory. This can be particularly useful when you need to understand the broader context and relationships between certain parts of the code. You may need to call this tool multiple times to understand various parts of the codebase related to the task. - - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the create_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. + - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the write_to_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. - You can use the execute_command tool to run commands on the user's computer whenever you feel it can help accomplish the user's task. When you need to execute a CLI command, you must provide a clear explanation of what the command does. Prefer to execute complex CLI commands over creating executable scripts, since they are more flexible and easier to run. Interactive and long-running commands are allowed, since the commands are run in the user's VSCode terminal. The user may keep commands running in the background and you will be kept updated on their status along the way. Each command you execute is run in a new terminal instance. - You can use the browser_action tool to interact with websites (including html files and locally running development servers) through a Puppeteer-controlled browser when you feel it is necessary in accomplishing the user's task. This tool is particularly useful for web development tasks as it allows you to launch a browser, navigate to pages, interact with elements through clicks and keyboard input, and capture the results through screenshots and console logs. This tool may be useful at key stages of web development tasks-such as after implementing new features, making substantial changes, when troubleshooting issues, or to verify the result of your work. You can analyze the provided screenshots to ensure correct rendering or identify errors, and review console logs for runtime issues. - For example, if asked to add a component to a react website, you might create the necessary files, use execute_command to run the site locally, then use browser_action to launch the browser, navigate to the local server, and verify the component renders & functions correctly before closing the browser. @@ -1283,9 +1283,9 @@ RULES - You cannot \`cd\` into a different directory to complete a task. You are stuck operating from '/test/path', so be sure to pass in the correct 'path' parameter when using tools that require a path. - Do not use the ~ character or $HOME to refer to the home directory. - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with \`cd\`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run \`npm install\` in a project outside of '/test/path', you would need to prepend with a \`cd\` i.e. pseudocode for this would be \`cd (path to project) && (command, in this case npm install)\`. -- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using create_file to make informed changes. -- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the create_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- When using the create_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. +- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. +- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. +- When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. - Be sure to consider the type of project (e.g. Python, JavaScript, web application) when determining the appropriate structure and files to include. Also consider what files may be most relevant to accomplishing the task, for example looking at a project's manifest file would help you understand the project's dependencies, which you could incorporate into any code you write. * For example, in architect mode trying to edit app.js would be rejected because architect mode can only edit files matching "\\.md$" @@ -1435,23 +1435,23 @@ Example: Requesting to list all top level source code definitions in the current . -## create_file +## write_to_file Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file. Parameters: - path: (required) The path of the file to write to (relative to the current working directory /test/path) - content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file. - line_count: (required) The number of lines in the file. Make sure to compute this based on the actual content of the file, not the number of lines in the content you're providing. Usage: - + File path here Your file content here total number of lines in the file, including empty lines - + Example: Requesting to write to frontend-config.json - + frontend-config.json { @@ -1470,7 +1470,7 @@ Example: Requesting to write to frontend-config.json } 14 - + ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Commands will be executed in the current working directory: /test/path @@ -1983,7 +1983,7 @@ IMPORTANT: Regardless of what else you see in the MCP settings file, you must de ## Editing MCP Servers -The user may ask to add tools or resources that may make sense to add to an existing MCP server (listed under 'Connected MCP Servers' above: (None running currently), e.g. if it would use the same API. This would be possible if you can locate the MCP server repository on the user's system by looking at the server arguments for a filepath. You might then use list_files and read_file to explore the files in the repository, and use create_file to make changes to the files. +The user may ask to add tools or resources that may make sense to add to an existing MCP server (listed under 'Connected MCP Servers' above: (None running currently), e.g. if it would use the same API. This would be possible if you can locate the MCP server repository on the user's system by looking at the server arguments for a filepath. You might then use list_files and read_file to explore the files in the repository, and use write_to_file to make changes to the files. However some MCP servers may be running from installed packages rather than a local repository, in which case it may make more sense to create a new MCP server. @@ -2001,7 +2001,7 @@ CAPABILITIES - When the user initially gives you a task, a recursive list of all filepaths in the current working directory ('/test/path') will be included in environment_details. This provides an overview of the project's file structure, offering key insights into the project from directory/file names (how developers conceptualize and organize their code) and file extensions (the language used). This can also guide decision-making on which files to explore further. If you need to further explore directories such as outside the current working directory, you can use the list_files tool. If you pass 'true' for the recursive parameter, it will list files recursively. Otherwise, it will list files at the top level, which is better suited for generic directories where you don't necessarily need the nested structure, like the Desktop. - You can use search_files to perform regex searches across files in a specified directory, outputting context-rich results that include surrounding lines. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need refactoring. - You can use the list_code_definition_names tool to get an overview of source code definitions for all files at the top level of a specified directory. This can be particularly useful when you need to understand the broader context and relationships between certain parts of the code. You may need to call this tool multiple times to understand various parts of the codebase related to the task. - - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the create_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. + - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the write_to_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. - You can use the execute_command tool to run commands on the user's computer whenever you feel it can help accomplish the user's task. When you need to execute a CLI command, you must provide a clear explanation of what the command does. Prefer to execute complex CLI commands over creating executable scripts, since they are more flexible and easier to run. Interactive and long-running commands are allowed, since the commands are run in the user's VSCode terminal. The user may keep commands running in the background and you will be kept updated on their status along the way. Each command you execute is run in a new terminal instance. - You have access to MCP servers that may provide additional tools and resources. Each server may provide different capabilities that you can use to accomplish tasks more effectively. @@ -2020,9 +2020,9 @@ RULES - You cannot \`cd\` into a different directory to complete a task. You are stuck operating from '/test/path', so be sure to pass in the correct 'path' parameter when using tools that require a path. - Do not use the ~ character or $HOME to refer to the home directory. - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with \`cd\`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run \`npm install\` in a project outside of '/test/path', you would need to prepend with a \`cd\` i.e. pseudocode for this would be \`cd (path to project) && (command, in this case npm install)\`. -- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using create_file to make informed changes. -- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the create_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- When using the create_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. +- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. +- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. +- When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. - Be sure to consider the type of project (e.g. Python, JavaScript, web application) when determining the appropriate structure and files to include. Also consider what files may be most relevant to accomplishing the task, for example looking at a project's manifest file would help you understand the project's dependencies, which you could incorporate into any code you write. * For example, in architect mode trying to edit app.js would be rejected because architect mode can only edit files matching "\\.md$" @@ -2171,23 +2171,23 @@ Example: Requesting to list all top level source code definitions in the current . -## create_file +## write_to_file Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file. Parameters: - path: (required) The path of the file to write to (relative to the current working directory /test/path) - content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file. - line_count: (required) The number of lines in the file. Make sure to compute this based on the actual content of the file, not the number of lines in the content you're providing. Usage: - + File path here Your file content here total number of lines in the file, including empty lines - + Example: Requesting to write to frontend-config.json - + frontend-config.json { @@ -2206,7 +2206,7 @@ Example: Requesting to write to frontend-config.json } 14 - + ## browser_action Description: Request to interact with a Puppeteer-controlled browser. Every action, except \`close\`, will be responded to with a screenshot of the browser's current state, along with any new console logs. You may only perform one browser action per message, and wait for the user's response including a screenshot and logs to determine the next action. @@ -2372,7 +2372,7 @@ CAPABILITIES - When the user initially gives you a task, a recursive list of all filepaths in the current working directory ('/test/path') will be included in environment_details. This provides an overview of the project's file structure, offering key insights into the project from directory/file names (how developers conceptualize and organize their code) and file extensions (the language used). This can also guide decision-making on which files to explore further. If you need to further explore directories such as outside the current working directory, you can use the list_files tool. If you pass 'true' for the recursive parameter, it will list files recursively. Otherwise, it will list files at the top level, which is better suited for generic directories where you don't necessarily need the nested structure, like the Desktop. - You can use search_files to perform regex searches across files in a specified directory, outputting context-rich results that include surrounding lines. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need refactoring. - You can use the list_code_definition_names tool to get an overview of source code definitions for all files at the top level of a specified directory. This can be particularly useful when you need to understand the broader context and relationships between certain parts of the code. You may need to call this tool multiple times to understand various parts of the codebase related to the task. - - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the create_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. + - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the write_to_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. - You can use the execute_command tool to run commands on the user's computer whenever you feel it can help accomplish the user's task. When you need to execute a CLI command, you must provide a clear explanation of what the command does. Prefer to execute complex CLI commands over creating executable scripts, since they are more flexible and easier to run. Interactive and long-running commands are allowed, since the commands are run in the user's VSCode terminal. The user may keep commands running in the background and you will be kept updated on their status along the way. Each command you execute is run in a new terminal instance. - You can use the browser_action tool to interact with websites (including html files and locally running development servers) through a Puppeteer-controlled browser when you feel it is necessary in accomplishing the user's task. This tool is particularly useful for web development tasks as it allows you to launch a browser, navigate to pages, interact with elements through clicks and keyboard input, and capture the results through screenshots and console logs. This tool may be useful at key stages of web development tasks-such as after implementing new features, making substantial changes, when troubleshooting issues, or to verify the result of your work. You can analyze the provided screenshots to ensure correct rendering or identify errors, and review console logs for runtime issues. - For example, if asked to add a component to a react website, you might create the necessary files, use execute_command to run the site locally, then use browser_action to launch the browser, navigate to the local server, and verify the component renders & functions correctly before closing the browser. @@ -2391,9 +2391,9 @@ RULES - You cannot \`cd\` into a different directory to complete a task. You are stuck operating from '/test/path', so be sure to pass in the correct 'path' parameter when using tools that require a path. - Do not use the ~ character or $HOME to refer to the home directory. - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with \`cd\`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run \`npm install\` in a project outside of '/test/path', you would need to prepend with a \`cd\` i.e. pseudocode for this would be \`cd (path to project) && (command, in this case npm install)\`. -- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using create_file to make informed changes. -- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the create_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- When using the create_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. +- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. +- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. +- When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. - Be sure to consider the type of project (e.g. Python, JavaScript, web application) when determining the appropriate structure and files to include. Also consider what files may be most relevant to accomplishing the task, for example looking at a project's manifest file would help you understand the project's dependencies, which you could incorporate into any code you write. * For example, in architect mode trying to edit app.js would be rejected because architect mode can only edit files matching "\\.md$" @@ -2543,7 +2543,44 @@ Example: Requesting to list all top level source code definitions in the current . -## edit_file +## write_to_file +Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file. +Parameters: +- path: (required) The path of the file to write to (relative to the current working directory /test/path) +- content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file. +- line_count: (required) The number of lines in the file. Make sure to compute this based on the actual content of the file, not the number of lines in the content you're providing. +Usage: + +File path here + +Your file content here + +total number of lines in the file, including empty lines + + +Example: Requesting to write to frontend-config.json + +frontend-config.json + +{ + "apiEndpoint": "https://api.example.com", + "theme": { + "primaryColor": "#007bff", + "secondaryColor": "#6c757d", + "fontFamily": "Arial, sans-serif" + }, + "features": { + "darkMode": true, + "notifications": true, + "analytics": false + }, + "version": "1.0.0" +} + +14 + + +## apply_diff Description: Request to replace existing code using a search and replace block. This tool allows for precise, surgical replaces to files by specifying exactly what content to search for and what to replace it with. The tool will maintain proper indentation and formatting while making changes. @@ -2594,51 +2631,14 @@ def calculate_total(items): \`\`\` Usage: - + File path here Your search/replace content here 1 5 - - -## create_file -Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file. -Parameters: -- path: (required) The path of the file to write to (relative to the current working directory /test/path) -- content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file. -- line_count: (required) The number of lines in the file. Make sure to compute this based on the actual content of the file, not the number of lines in the content you're providing. -Usage: - -File path here - -Your file content here - -total number of lines in the file, including empty lines - - -Example: Requesting to write to frontend-config.json - -frontend-config.json - -{ - "apiEndpoint": "https://api.example.com", - "theme": { - "primaryColor": "#007bff", - "secondaryColor": "#6c757d", - "fontFamily": "Arial, sans-serif" - }, - "features": { - "darkMode": true, - "notifications": true, - "analytics": false - }, - "version": "1.0.0" -} - -14 - + ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Commands will be executed in the current working directory: /test/path @@ -2758,7 +2758,7 @@ CAPABILITIES - When the user initially gives you a task, a recursive list of all filepaths in the current working directory ('/test/path') will be included in environment_details. This provides an overview of the project's file structure, offering key insights into the project from directory/file names (how developers conceptualize and organize their code) and file extensions (the language used). This can also guide decision-making on which files to explore further. If you need to further explore directories such as outside the current working directory, you can use the list_files tool. If you pass 'true' for the recursive parameter, it will list files recursively. Otherwise, it will list files at the top level, which is better suited for generic directories where you don't necessarily need the nested structure, like the Desktop. - You can use search_files to perform regex searches across files in a specified directory, outputting context-rich results that include surrounding lines. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need refactoring. - You can use the list_code_definition_names tool to get an overview of source code definitions for all files at the top level of a specified directory. This can be particularly useful when you need to understand the broader context and relationships between certain parts of the code. You may need to call this tool multiple times to understand various parts of the codebase related to the task. - - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the edit_file or create_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. + - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the write_to_file or apply_diff tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. - You can use the execute_command tool to run commands on the user's computer whenever you feel it can help accomplish the user's task. When you need to execute a CLI command, you must provide a clear explanation of what the command does. Prefer to execute complex CLI commands over creating executable scripts, since they are more flexible and easier to run. Interactive and long-running commands are allowed, since the commands are run in the user's VSCode terminal. The user may keep commands running in the background and you will be kept updated on their status along the way. Each command you execute is run in a new terminal instance. ==== @@ -2775,11 +2775,11 @@ RULES - You cannot \`cd\` into a different directory to complete a task. You are stuck operating from '/test/path', so be sure to pass in the correct 'path' parameter when using tools that require a path. - Do not use the ~ character or $HOME to refer to the home directory. - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with \`cd\`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run \`npm install\` in a project outside of '/test/path', you would need to prepend with a \`cd\` i.e. pseudocode for this would be \`cd (path to project) && (command, in this case npm install)\`. -- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using edit_file or create_file to make informed changes. -- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the create_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- For editing files, you have access to these tools: edit_file (for replacing lines in existing files), create_file (for creating new files or complete file rewrites). -- You should always prefer using other editing tools over create_file when making changes to existing files since create_file is much slower and cannot handle large files. -- When using the create_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. +- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. +- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. +- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), apply_diff (for replacing lines in existing files). +- When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. +- You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. - Be sure to consider the type of project (e.g. Python, JavaScript, web application) when determining the appropriate structure and files to include. Also consider what files may be most relevant to accomplishing the task, for example looking at a project's manifest file would help you understand the project's dependencies, which you could incorporate into any code you write. * For example, in architect mode trying to edit app.js would be rejected because architect mode can only edit files matching "\\.md$" @@ -2928,23 +2928,23 @@ Example: Requesting to list all top level source code definitions in the current . -## create_file +## write_to_file Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file. Parameters: - path: (required) The path of the file to write to (relative to the current working directory /test/path) - content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file. - line_count: (required) The number of lines in the file. Make sure to compute this based on the actual content of the file, not the number of lines in the content you're providing. Usage: - + File path here Your file content here total number of lines in the file, including empty lines - + Example: Requesting to write to frontend-config.json - + frontend-config.json { @@ -2963,7 +2963,7 @@ Example: Requesting to write to frontend-config.json } 14 - + ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Commands will be executed in the current working directory: /test/path @@ -3083,7 +3083,7 @@ CAPABILITIES - When the user initially gives you a task, a recursive list of all filepaths in the current working directory ('/test/path') will be included in environment_details. This provides an overview of the project's file structure, offering key insights into the project from directory/file names (how developers conceptualize and organize their code) and file extensions (the language used). This can also guide decision-making on which files to explore further. If you need to further explore directories such as outside the current working directory, you can use the list_files tool. If you pass 'true' for the recursive parameter, it will list files recursively. Otherwise, it will list files at the top level, which is better suited for generic directories where you don't necessarily need the nested structure, like the Desktop. - You can use search_files to perform regex searches across files in a specified directory, outputting context-rich results that include surrounding lines. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need refactoring. - You can use the list_code_definition_names tool to get an overview of source code definitions for all files at the top level of a specified directory. This can be particularly useful when you need to understand the broader context and relationships between certain parts of the code. You may need to call this tool multiple times to understand various parts of the codebase related to the task. - - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the create_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. + - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the write_to_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. - You can use the execute_command tool to run commands on the user's computer whenever you feel it can help accomplish the user's task. When you need to execute a CLI command, you must provide a clear explanation of what the command does. Prefer to execute complex CLI commands over creating executable scripts, since they are more flexible and easier to run. Interactive and long-running commands are allowed, since the commands are run in the user's VSCode terminal. The user may keep commands running in the background and you will be kept updated on their status along the way. Each command you execute is run in a new terminal instance. ==== @@ -3100,9 +3100,9 @@ RULES - You cannot \`cd\` into a different directory to complete a task. You are stuck operating from '/test/path', so be sure to pass in the correct 'path' parameter when using tools that require a path. - Do not use the ~ character or $HOME to refer to the home directory. - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with \`cd\`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run \`npm install\` in a project outside of '/test/path', you would need to prepend with a \`cd\` i.e. pseudocode for this would be \`cd (path to project) && (command, in this case npm install)\`. -- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using create_file to make informed changes. -- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the create_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- When using the create_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. +- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. +- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. +- When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. - Be sure to consider the type of project (e.g. Python, JavaScript, web application) when determining the appropriate structure and files to include. Also consider what files may be most relevant to accomplishing the task, for example looking at a project's manifest file would help you understand the project's dependencies, which you could incorporate into any code you write. * For example, in architect mode trying to edit app.js would be rejected because architect mode can only edit files matching "\\.md$" @@ -3293,23 +3293,23 @@ Example: Requesting to list all top level source code definitions in the current . -## create_file +## write_to_file Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file. Parameters: - path: (required) The path of the file to write to (relative to the current working directory /test/path) - content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file. - line_count: (required) The number of lines in the file. Make sure to compute this based on the actual content of the file, not the number of lines in the content you're providing. Usage: - + File path here Your file content here total number of lines in the file, including empty lines - + Example: Requesting to write to frontend-config.json - + frontend-config.json { @@ -3328,7 +3328,7 @@ Example: Requesting to write to frontend-config.json } 14 - + ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Commands will be executed in the current working directory: /test/path @@ -3505,7 +3505,7 @@ CAPABILITIES - When the user initially gives you a task, a recursive list of all filepaths in the current working directory ('/test/path') will be included in environment_details. This provides an overview of the project's file structure, offering key insights into the project from directory/file names (how developers conceptualize and organize their code) and file extensions (the language used). This can also guide decision-making on which files to explore further. If you need to further explore directories such as outside the current working directory, you can use the list_files tool. If you pass 'true' for the recursive parameter, it will list files recursively. Otherwise, it will list files at the top level, which is better suited for generic directories where you don't necessarily need the nested structure, like the Desktop. - You can use search_files to perform regex searches across files in a specified directory, outputting context-rich results that include surrounding lines. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need refactoring. - You can use the list_code_definition_names tool to get an overview of source code definitions for all files at the top level of a specified directory. This can be particularly useful when you need to understand the broader context and relationships between certain parts of the code. You may need to call this tool multiple times to understand various parts of the codebase related to the task. - - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the create_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. + - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the write_to_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. - You can use the execute_command tool to run commands on the user's computer whenever you feel it can help accomplish the user's task. When you need to execute a CLI command, you must provide a clear explanation of what the command does. Prefer to execute complex CLI commands over creating executable scripts, since they are more flexible and easier to run. Interactive and long-running commands are allowed, since the commands are run in the user's VSCode terminal. The user may keep commands running in the background and you will be kept updated on their status along the way. Each command you execute is run in a new terminal instance. - You have access to MCP servers that may provide additional tools and resources. Each server may provide different capabilities that you can use to accomplish tasks more effectively. @@ -3524,9 +3524,9 @@ RULES - You cannot \`cd\` into a different directory to complete a task. You are stuck operating from '/test/path', so be sure to pass in the correct 'path' parameter when using tools that require a path. - Do not use the ~ character or $HOME to refer to the home directory. - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with \`cd\`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run \`npm install\` in a project outside of '/test/path', you would need to prepend with a \`cd\` i.e. pseudocode for this would be \`cd (path to project) && (command, in this case npm install)\`. -- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using create_file to make informed changes. -- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the create_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- When using the create_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. +- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. +- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. +- When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. - Be sure to consider the type of project (e.g. Python, JavaScript, web application) when determining the appropriate structure and files to include. Also consider what files may be most relevant to accomplishing the task, for example looking at a project's manifest file would help you understand the project's dependencies, which you could incorporate into any code you write. * For example, in architect mode trying to edit app.js would be rejected because architect mode can only edit files matching "\\.md$" @@ -3690,23 +3690,23 @@ Example: Requesting to list all top level source code definitions in the current . -## create_file +## write_to_file Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file. Parameters: - path: (required) The path of the file to write to (relative to the current working directory /test/path) - content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file. - line_count: (required) The number of lines in the file. Make sure to compute this based on the actual content of the file, not the number of lines in the content you're providing. Usage: - + File path here Your file content here total number of lines in the file, including empty lines - + Example: Requesting to write to frontend-config.json - + frontend-config.json { @@ -3725,7 +3725,7 @@ Example: Requesting to write to frontend-config.json } 14 - + ## ask_followup_question Description: Ask the user a question to gather additional information needed to complete the task. This tool should be used when you encounter ambiguities, need clarification, or require more details to proceed effectively. It allows for interactive problem-solving by enabling direct communication with the user. Use this tool judiciously to maintain a balance between gathering necessary information and avoiding excessive back-and-forth. @@ -3831,7 +3831,7 @@ CAPABILITIES - When the user initially gives you a task, a recursive list of all filepaths in the current working directory ('/test/path') will be included in environment_details. This provides an overview of the project's file structure, offering key insights into the project from directory/file names (how developers conceptualize and organize their code) and file extensions (the language used). This can also guide decision-making on which files to explore further. If you need to further explore directories such as outside the current working directory, you can use the list_files tool. If you pass 'true' for the recursive parameter, it will list files recursively. Otherwise, it will list files at the top level, which is better suited for generic directories where you don't necessarily need the nested structure, like the Desktop. - You can use search_files to perform regex searches across files in a specified directory, outputting context-rich results that include surrounding lines. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need refactoring. - You can use the list_code_definition_names tool to get an overview of source code definitions for all files at the top level of a specified directory. This can be particularly useful when you need to understand the broader context and relationships between certain parts of the code. You may need to call this tool multiple times to understand various parts of the codebase related to the task. - - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the create_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. + - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the write_to_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. - You can use the execute_command tool to run commands on the user's computer whenever you feel it can help accomplish the user's task. When you need to execute a CLI command, you must provide a clear explanation of what the command does. Prefer to execute complex CLI commands over creating executable scripts, since they are more flexible and easier to run. Interactive and long-running commands are allowed, since the commands are run in the user's VSCode terminal. The user may keep commands running in the background and you will be kept updated on their status along the way. Each command you execute is run in a new terminal instance. ==== @@ -3848,9 +3848,9 @@ RULES - You cannot \`cd\` into a different directory to complete a task. You are stuck operating from '/test/path', so be sure to pass in the correct 'path' parameter when using tools that require a path. - Do not use the ~ character or $HOME to refer to the home directory. - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with \`cd\`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run \`npm install\` in a project outside of '/test/path', you would need to prepend with a \`cd\` i.e. pseudocode for this would be \`cd (path to project) && (command, in this case npm install)\`. -- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using create_file to make informed changes. -- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the create_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- When using the create_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. +- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. +- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. +- When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. - Be sure to consider the type of project (e.g. Python, JavaScript, web application) when determining the appropriate structure and files to include. Also consider what files may be most relevant to accomplishing the task, for example looking at a project's manifest file would help you understand the project's dependencies, which you could incorporate into any code you write. * For example, in architect mode trying to edit app.js would be rejected because architect mode can only edit files matching "\\.md$" @@ -4108,7 +4108,7 @@ CAPABILITIES - When the user initially gives you a task, a recursive list of all filepaths in the current working directory ('/test/path') will be included in environment_details. This provides an overview of the project's file structure, offering key insights into the project from directory/file names (how developers conceptualize and organize their code) and file extensions (the language used). This can also guide decision-making on which files to explore further. If you need to further explore directories such as outside the current working directory, you can use the list_files tool. If you pass 'true' for the recursive parameter, it will list files recursively. Otherwise, it will list files at the top level, which is better suited for generic directories where you don't necessarily need the nested structure, like the Desktop. - You can use search_files to perform regex searches across files in a specified directory, outputting context-rich results that include surrounding lines. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need refactoring. - You can use the list_code_definition_names tool to get an overview of source code definitions for all files at the top level of a specified directory. This can be particularly useful when you need to understand the broader context and relationships between certain parts of the code. You may need to call this tool multiple times to understand various parts of the codebase related to the task. - - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the create_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. + - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the write_to_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. - You can use the execute_command tool to run commands on the user's computer whenever you feel it can help accomplish the user's task. When you need to execute a CLI command, you must provide a clear explanation of what the command does. Prefer to execute complex CLI commands over creating executable scripts, since they are more flexible and easier to run. Interactive and long-running commands are allowed, since the commands are run in the user's VSCode terminal. The user may keep commands running in the background and you will be kept updated on their status along the way. Each command you execute is run in a new terminal instance. ==== @@ -4125,9 +4125,9 @@ RULES - You cannot \`cd\` into a different directory to complete a task. You are stuck operating from '/test/path', so be sure to pass in the correct 'path' parameter when using tools that require a path. - Do not use the ~ character or $HOME to refer to the home directory. - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with \`cd\`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run \`npm install\` in a project outside of '/test/path', you would need to prepend with a \`cd\` i.e. pseudocode for this would be \`cd (path to project) && (command, in this case npm install)\`. -- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using create_file to make informed changes. -- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the create_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- When using the create_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. +- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. +- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. +- When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. - Be sure to consider the type of project (e.g. Python, JavaScript, web application) when determining the appropriate structure and files to include. Also consider what files may be most relevant to accomplishing the task, for example looking at a project's manifest file would help you understand the project's dependencies, which you could incorporate into any code you write. * For example, in architect mode trying to edit app.js would be rejected because architect mode can only edit files matching "\\.md$" @@ -4309,23 +4309,23 @@ Example: Requesting to list all top level source code definitions in the current . -## create_file +## write_to_file Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file. Parameters: - path: (required) The path of the file to write to (relative to the current working directory /test/path) - content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file. - line_count: (required) The number of lines in the file. Make sure to compute this based on the actual content of the file, not the number of lines in the content you're providing. Usage: - + File path here Your file content here total number of lines in the file, including empty lines - + Example: Requesting to write to frontend-config.json - + frontend-config.json { @@ -4344,7 +4344,7 @@ Example: Requesting to write to frontend-config.json } 14 - + ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Commands will be executed in the current working directory: /test/path @@ -4857,7 +4857,7 @@ IMPORTANT: Regardless of what else you see in the MCP settings file, you must de ## Editing MCP Servers -The user may ask to add tools or resources that may make sense to add to an existing MCP server (listed under 'Connected MCP Servers' above: (None running currently), e.g. if it would use the same API. This would be possible if you can locate the MCP server repository on the user's system by looking at the server arguments for a filepath. You might then use list_files and read_file to explore the files in the repository, and use create_file to make changes to the files. +The user may ask to add tools or resources that may make sense to add to an existing MCP server (listed under 'Connected MCP Servers' above: (None running currently), e.g. if it would use the same API. This would be possible if you can locate the MCP server repository on the user's system by looking at the server arguments for a filepath. You might then use list_files and read_file to explore the files in the repository, and use write_to_file to make changes to the files. However some MCP servers may be running from installed packages rather than a local repository, in which case it may make more sense to create a new MCP server. @@ -4875,7 +4875,7 @@ CAPABILITIES - When the user initially gives you a task, a recursive list of all filepaths in the current working directory ('/test/path') will be included in environment_details. This provides an overview of the project's file structure, offering key insights into the project from directory/file names (how developers conceptualize and organize their code) and file extensions (the language used). This can also guide decision-making on which files to explore further. If you need to further explore directories such as outside the current working directory, you can use the list_files tool. If you pass 'true' for the recursive parameter, it will list files recursively. Otherwise, it will list files at the top level, which is better suited for generic directories where you don't necessarily need the nested structure, like the Desktop. - You can use search_files to perform regex searches across files in a specified directory, outputting context-rich results that include surrounding lines. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need refactoring. - You can use the list_code_definition_names tool to get an overview of source code definitions for all files at the top level of a specified directory. This can be particularly useful when you need to understand the broader context and relationships between certain parts of the code. You may need to call this tool multiple times to understand various parts of the codebase related to the task. - - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the create_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. + - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the write_to_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. - You can use the execute_command tool to run commands on the user's computer whenever you feel it can help accomplish the user's task. When you need to execute a CLI command, you must provide a clear explanation of what the command does. Prefer to execute complex CLI commands over creating executable scripts, since they are more flexible and easier to run. Interactive and long-running commands are allowed, since the commands are run in the user's VSCode terminal. The user may keep commands running in the background and you will be kept updated on their status along the way. Each command you execute is run in a new terminal instance. - You have access to MCP servers that may provide additional tools and resources. Each server may provide different capabilities that you can use to accomplish tasks more effectively. @@ -4894,9 +4894,9 @@ RULES - You cannot \`cd\` into a different directory to complete a task. You are stuck operating from '/test/path', so be sure to pass in the correct 'path' parameter when using tools that require a path. - Do not use the ~ character or $HOME to refer to the home directory. - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with \`cd\`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run \`npm install\` in a project outside of '/test/path', you would need to prepend with a \`cd\` i.e. pseudocode for this would be \`cd (path to project) && (command, in this case npm install)\`. -- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using create_file to make informed changes. -- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the create_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- When using the create_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. +- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. +- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. +- When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. - Be sure to consider the type of project (e.g. Python, JavaScript, web application) when determining the appropriate structure and files to include. Also consider what files may be most relevant to accomplishing the task, for example looking at a project's manifest file would help you understand the project's dependencies, which you could incorporate into any code you write. * For example, in architect mode trying to edit app.js would be rejected because architect mode can only edit files matching "\\.md$" diff --git a/src/core/prompts/__tests__/sections.test.ts b/src/core/prompts/__tests__/sections.test.ts index 75af4ce6a6b..2100016e467 100644 --- a/src/core/prompts/__tests__/sections.test.ts +++ b/src/core/prompts/__tests__/sections.test.ts @@ -33,24 +33,24 @@ describe("getCapabilitiesSection", () => { const cwd = "/test/path" const mcpHub = undefined const mockDiffStrategy: DiffStrategy = { - getToolDescription: () => "edit_file tool description", + getToolDescription: () => "apply_diff tool description", applyDiff: async (originalContent: string, diffContent: string): Promise => { return { success: true, content: "mock result" } }, } - test("includes edit_file in capabilities when diffStrategy is provided", () => { + test("includes apply_diff in capabilities when diffStrategy is provided", () => { const result = getCapabilitiesSection(cwd, false, mcpHub, mockDiffStrategy) - expect(result).toContain("or create_file") - expect(result).toContain("then use the edit_file or create_file tool") + expect(result).toContain("or apply_diff") + expect(result).toContain("then use the write_to_file or apply_diff tool") }) - test("excludes edit_file from capabilities when diffStrategy is undefined", () => { + test("excludes apply_diff from capabilities when diffStrategy is undefined", () => { const result = getCapabilitiesSection(cwd, false, mcpHub, undefined) - expect(result).not.toContain("or edit_file") - expect(result).toContain("then use the create_file tool") - expect(result).not.toContain("create_file or edit_file") + expect(result).not.toContain("or apply_diff") + expect(result).toContain("then use the write_to_file tool") + expect(result).not.toContain("write_to_file or apply_diff") }) }) diff --git a/src/core/prompts/__tests__/system.test.ts b/src/core/prompts/__tests__/system.test.ts index 5f936fd4058..2adfa927eb6 100644 --- a/src/core/prompts/__tests__/system.test.ts +++ b/src/core/prompts/__tests__/system.test.ts @@ -288,7 +288,7 @@ describe("SYSTEM_PROMPT", () => { true, // enableMcpServerCreation ) - expect(prompt).toContain("edit_file") + expect(prompt).toContain("apply_diff") expect(prompt).toMatchSnapshot() }) @@ -310,7 +310,7 @@ describe("SYSTEM_PROMPT", () => { true, // enableMcpServerCreation ) - expect(prompt).not.toContain("edit_file") + expect(prompt).not.toContain("apply_diff") expect(prompt).toMatchSnapshot() }) @@ -332,7 +332,7 @@ describe("SYSTEM_PROMPT", () => { true, // enableMcpServerCreation ) - expect(prompt).not.toContain("edit_file") + expect(prompt).not.toContain("apply_diff") expect(prompt).toMatchSnapshot() }) @@ -562,8 +562,8 @@ describe("SYSTEM_PROMPT", () => { ) // Verify base instruction lists all available tools - expect(prompt).toContain("edit_file (for replacing lines in existing files)") - expect(prompt).toContain("create_file (for creating new files or complete file rewrites)") + expect(prompt).toContain("apply_diff (for replacing lines in existing files)") + expect(prompt).toContain("write_to_file (for creating new files or complete file rewrites)") expect(prompt).toContain("insert_content (for adding lines to existing files)") expect(prompt).toContain("search_and_replace (for finding and replacing individual pieces of text)") }) @@ -593,7 +593,7 @@ describe("SYSTEM_PROMPT", () => { // Verify detailed instructions for each tool expect(prompt).toContain( - "You should always prefer using other editing tools over create_file when making changes to existing files since create_file is much slower and cannot handle large files.", + "You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files.", ) expect(prompt).toContain("The insert_content tool adds lines of text to files") expect(prompt).toContain("The search_and_replace tool finds and replaces text or regex in files") diff --git a/src/core/prompts/sections/capabilities.ts b/src/core/prompts/sections/capabilities.ts index 9cd39bde580..c292eeffbc3 100644 --- a/src/core/prompts/sections/capabilities.ts +++ b/src/core/prompts/sections/capabilities.ts @@ -17,7 +17,7 @@ CAPABILITIES - When the user initially gives you a task, a recursive list of all filepaths in the current working directory ('${cwd}') will be included in environment_details. This provides an overview of the project's file structure, offering key insights into the project from directory/file names (how developers conceptualize and organize their code) and file extensions (the language used). This can also guide decision-making on which files to explore further. If you need to further explore directories such as outside the current working directory, you can use the list_files tool. If you pass 'true' for the recursive parameter, it will list files recursively. Otherwise, it will list files at the top level, which is better suited for generic directories where you don't necessarily need the nested structure, like the Desktop. - You can use search_files to perform regex searches across files in a specified directory, outputting context-rich results that include surrounding lines. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need refactoring. - You can use the list_code_definition_names tool to get an overview of source code definitions for all files at the top level of a specified directory. This can be particularly useful when you need to understand the broader context and relationships between certain parts of the code. You may need to call this tool multiple times to understand various parts of the codebase related to the task. - - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use ${diffStrategy ? "the edit_file or create_file" : "the create_file"} tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. + - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the write_to_file${diffStrategy ? " or apply_diff" : ""} tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. - You can use the execute_command tool to run commands on the user's computer whenever you feel it can help accomplish the user's task. When you need to execute a CLI command, you must provide a clear explanation of what the command does. Prefer to execute complex CLI commands over creating executable scripts, since they are more flexible and easier to run. Interactive and long-running commands are allowed, since the commands are run in the user's VSCode terminal. The user may keep commands running in the background and you will be kept updated on their status along the way. Each command you execute is run in a new terminal instance.${ supportsComputerUse ? "\n- You can use the browser_action tool to interact with websites (including html files and locally running development servers) through a Puppeteer-controlled browser when you feel it is necessary in accomplishing the user's task. This tool is particularly useful for web development tasks as it allows you to launch a browser, navigate to pages, interact with elements through clicks and keyboard input, and capture the results through screenshots and console logs. This tool may be useful at key stages of web development tasks-such as after implementing new features, making substantial changes, when troubleshooting issues, or to verify the result of your work. You can analyze the provided screenshots to ensure correct rendering or identify errors, and review console logs for runtime issues.\n - For example, if asked to add a component to a react website, you might create the necessary files, use execute_command to run the site locally, then use browser_action to launch the browser, navigate to the local server, and verify the component renders & functions correctly before closing the browser." diff --git a/src/core/prompts/sections/mcp-servers.ts b/src/core/prompts/sections/mcp-servers.ts index fd7f520ddd9..3f7ec88297c 100644 --- a/src/core/prompts/sections/mcp-servers.ts +++ b/src/core/prompts/sections/mcp-servers.ts @@ -414,7 +414,7 @@ The user may ask to add tools or resources that may make sense to add to an exis .getServers() .map((server) => server.name) .join(", ") || "(None running currently)" - }, e.g. if it would use the same API. This would be possible if you can locate the MCP server repository on the user's system by looking at the server arguments for a filepath. You might then use list_files and read_file to explore the files in the repository, and use create_file${diffStrategy ? " or edit_file" : ""} to make changes to the files. + }, e.g. if it would use the same API. This would be possible if you can locate the MCP server repository on the user's system by looking at the server arguments for a filepath. You might then use list_files and read_file to explore the files in the repository, and use write_to_file${diffStrategy ? " or apply_diff" : ""} to make changes to the files. However some MCP servers may be running from installed packages rather than a local repository, in which case it may make more sense to create a new MCP server. diff --git a/src/core/prompts/sections/modes.ts b/src/core/prompts/sections/modes.ts index de3cac9c947..eff950c2c2f 100644 --- a/src/core/prompts/sections/modes.ts +++ b/src/core/prompts/sections/modes.ts @@ -45,7 +45,7 @@ Both files should follow this structure: "roleDefinition": "You are Roo, a UI/UX expert specializing in design systems and frontend development. Your expertise includes:\\n- Creating and maintaining design systems\\n- Implementing responsive and accessible web interfaces\\n- Working with CSS, HTML, and modern frontend frameworks\\n- Ensuring consistent user experiences across platforms", // Required: non-empty "groups": [ // Required: array of tool groups (can be empty) "read", // Read files group (read_file, search_files, list_files, list_code_definition_names) - "edit", // Edit files group (edit_file, create_file) - allows editing any file + "edit", // Edit files group (write_to_file, apply_diff) - allows editing any file // Or with file restrictions: // ["edit", { fileRegex: "\\.md$", description: "Markdown files only" }], // Edit group that only allows editing markdown files "browser", // Browser group (browser_action) diff --git a/src/core/prompts/sections/rules.ts b/src/core/prompts/sections/rules.ts index e0d65976182..b6e19eb08c9 100644 --- a/src/core/prompts/sections/rules.ts +++ b/src/core/prompts/sections/rules.ts @@ -5,16 +5,11 @@ import * as path from "path" function getEditingInstructions(diffStrategy?: DiffStrategy, experiments?: Record): string { const instructions: string[] = [] - const availableTools: string[] = [] + const availableTools: string[] = ["write_to_file (for creating new files or complete file rewrites)"] // Collect available editing tools if (diffStrategy) { - availableTools.push( - "edit_file (for replacing lines in existing files)", - "create_file (for creating new files or complete file rewrites)", - ) - } else { - availableTools.push("create_file (for creating new files or complete file rewrites)") + availableTools.push("apply_diff (for replacing lines in existing files)") } if (experiments?.["insert_content"]) { availableTools.push("insert_content (for adding lines to existing files)") @@ -41,16 +36,16 @@ function getEditingInstructions(diffStrategy?: DiffStrategy, experiments?: Recor ) } + instructions.push( + "- When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project.", + ) + if (availableTools.length > 1) { instructions.push( - "- You should always prefer using other editing tools over create_file when making changes to existing files since create_file is much slower and cannot handle large files.", + "- You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files.", ) } - instructions.push( - "- When using the create_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project.", - ) - return instructions.join("\n") } @@ -68,8 +63,8 @@ RULES - You cannot \`cd\` into a different directory to complete a task. You are stuck operating from '${cwd.toPosix()}', so be sure to pass in the correct 'path' parameter when using tools that require a path. - Do not use the ~ character or $HOME to refer to the home directory. - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '${cwd.toPosix()}', and if so prepend with \`cd\`'ing into that directory && then executing the command (as one command since you are stuck operating from '${cwd.toPosix()}'). For example, if you needed to run \`npm install\` in a project outside of '${cwd.toPosix()}', you would need to prepend with a \`cd\` i.e. pseudocode for this would be \`cd (path to project) && (command, in this case npm install)\`. -- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using ${diffStrategy ? "edit_file or create_file" : "create_file"} to make informed changes. -- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the create_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. +- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. +- When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. ${getEditingInstructions(diffStrategy, experiments)} - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. - Be sure to consider the type of project (e.g. Python, JavaScript, web application) when determining the appropriate structure and files to include. Also consider what files may be most relevant to accomplishing the task, for example looking at a project's manifest file would help you understand the project's dependencies, which you could incorporate into any code you write. diff --git a/src/core/prompts/tools/index.ts b/src/core/prompts/tools/index.ts index 6310620aac9..1b9b9a43d9d 100644 --- a/src/core/prompts/tools/index.ts +++ b/src/core/prompts/tools/index.ts @@ -23,7 +23,7 @@ import { ToolArgs } from "./types" const toolDescriptionMap: Record string | undefined> = { execute_command: (args) => getExecuteCommandDescription(args), read_file: (args) => getReadFileDescription(args), - create_file: (args) => getWriteToFileDescription(args), + write_to_file: (args) => getWriteToFileDescription(args), search_files: (args) => getSearchFilesDescription(args), list_files: (args) => getListFilesDescription(args), list_code_definition_names: (args) => getListCodeDefinitionNamesDescription(args), @@ -36,7 +36,7 @@ const toolDescriptionMap: Record string | undefined> new_task: (args) => getNewTaskDescription(args), insert_content: (args) => getInsertContentDescription(args), search_and_replace: (args) => getSearchAndReplaceDescription(args), - edit_file: (args) => + apply_diff: (args) => args.diffStrategy ? args.diffStrategy.getToolDescription({ cwd: args.cwd, toolOptions: args.toolOptions }) : "", } diff --git a/src/core/prompts/tools/write-to-file.ts b/src/core/prompts/tools/write-to-file.ts index 7a20e9b3f4f..c2a311cf361 100644 --- a/src/core/prompts/tools/write-to-file.ts +++ b/src/core/prompts/tools/write-to-file.ts @@ -1,23 +1,23 @@ import { ToolArgs } from "./types" export function getWriteToFileDescription(args: ToolArgs): string { - return `## create_file + return `## write_to_file Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file. Parameters: - path: (required) The path of the file to write to (relative to the current working directory ${args.cwd}) - content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file. - line_count: (required) The number of lines in the file. Make sure to compute this based on the actual content of the file, not the number of lines in the content you're providing. Usage: - + File path here Your file content here total number of lines in the file, including empty lines - + Example: Requesting to write to frontend-config.json - + frontend-config.json { @@ -36,5 +36,5 @@ Example: Requesting to write to frontend-config.json } 14 -` +` } diff --git a/src/core/webview/__tests__/ClineProvider.test.ts b/src/core/webview/__tests__/ClineProvider.test.ts index f8df84721da..6449cc93bec 100644 --- a/src/core/webview/__tests__/ClineProvider.test.ts +++ b/src/core/webview/__tests__/ClineProvider.test.ts @@ -107,7 +107,7 @@ jest.mock( // Mock DiffStrategy jest.mock("../../diff/DiffStrategy", () => ({ getDiffStrategy: jest.fn().mockImplementation(() => ({ - getToolDescription: jest.fn().mockReturnValue("edit_file tool description"), + getToolDescription: jest.fn().mockReturnValue("apply_diff tool description"), })), })) diff --git a/src/shared/ExtensionMessage.ts b/src/shared/ExtensionMessage.ts index 5d0e16e39cd..fe9fa394270 100644 --- a/src/shared/ExtensionMessage.ts +++ b/src/shared/ExtensionMessage.ts @@ -127,7 +127,7 @@ export interface ExtensionState { experiments: Record // Map of experiment IDs to their enabled state autoApprovalEnabled?: boolean customModes: ModeConfig[] - toolRequirements?: Record // Map of tool names to their requirements (e.g. {"edit_file": true} if diffEnabled) + toolRequirements?: Record // Map of tool names to their requirements (e.g. {"apply_diff": true} if diffEnabled) maxOpenTabsContext: number // Maximum number of VSCode open tabs to include in context (0-500) } diff --git a/src/shared/__tests__/modes.test.ts b/src/shared/__tests__/modes.test.ts index 52d26735a9e..3bd89c4ecb5 100644 --- a/src/shared/__tests__/modes.test.ts +++ b/src/shared/__tests__/modes.test.ts @@ -44,14 +44,14 @@ describe("isToolAllowedForMode", () => { describe("file restrictions", () => { it("allows editing matching files", () => { // Test markdown editor mode - const mdResult = isToolAllowedForMode("create_file", "markdown-editor", customModes, undefined, { + const mdResult = isToolAllowedForMode("write_to_file", "markdown-editor", customModes, undefined, { path: "test.md", content: "# Test", }) expect(mdResult).toBe(true) // Test CSS editor mode - const cssResult = isToolAllowedForMode("create_file", "css-editor", customModes, undefined, { + const cssResult = isToolAllowedForMode("write_to_file", "css-editor", customModes, undefined, { path: "styles.css", content: ".test { color: red; }", }) @@ -61,13 +61,13 @@ describe("isToolAllowedForMode", () => { it("rejects editing non-matching files", () => { // Test markdown editor mode with non-markdown file expect(() => - isToolAllowedForMode("create_file", "markdown-editor", customModes, undefined, { + isToolAllowedForMode("write_to_file", "markdown-editor", customModes, undefined, { path: "test.js", content: "console.log('test')", }), ).toThrow(FileRestrictionError) expect(() => - isToolAllowedForMode("create_file", "markdown-editor", customModes, undefined, { + isToolAllowedForMode("write_to_file", "markdown-editor", customModes, undefined, { path: "test.js", content: "console.log('test')", }), @@ -75,13 +75,13 @@ describe("isToolAllowedForMode", () => { // Test CSS editor mode with non-CSS file expect(() => - isToolAllowedForMode("create_file", "css-editor", customModes, undefined, { + isToolAllowedForMode("write_to_file", "css-editor", customModes, undefined, { path: "test.js", content: "console.log('test')", }), ).toThrow(FileRestrictionError) expect(() => - isToolAllowedForMode("create_file", "css-editor", customModes, undefined, { + isToolAllowedForMode("write_to_file", "css-editor", customModes, undefined, { path: "test.js", content: "console.log('test')", }), @@ -91,35 +91,35 @@ describe("isToolAllowedForMode", () => { it("handles partial streaming cases (path only, no content/diff)", () => { // Should allow path-only for matching files (no validation yet since content/diff not provided) expect( - isToolAllowedForMode("create_file", "markdown-editor", customModes, undefined, { + isToolAllowedForMode("write_to_file", "markdown-editor", customModes, undefined, { path: "test.js", }), ).toBe(true) expect( - isToolAllowedForMode("edit_file", "markdown-editor", customModes, undefined, { + isToolAllowedForMode("apply_diff", "markdown-editor", customModes, undefined, { path: "test.js", }), ).toBe(true) // Should allow path-only for architect mode too expect( - isToolAllowedForMode("create_file", "architect", [], undefined, { + isToolAllowedForMode("write_to_file", "architect", [], undefined, { path: "test.js", }), ).toBe(true) }) - it("applies restrictions to both create_file and edit_file", () => { - // Test create_file - const writeResult = isToolAllowedForMode("create_file", "markdown-editor", customModes, undefined, { + it("applies restrictions to both write_to_file and apply_diff", () => { + // Test write_to_file + const writeResult = isToolAllowedForMode("write_to_file", "markdown-editor", customModes, undefined, { path: "test.md", content: "# Test", }) expect(writeResult).toBe(true) - // Test edit_file - const diffResult = isToolAllowedForMode("edit_file", "markdown-editor", customModes, undefined, { + // Test apply_diff + const diffResult = isToolAllowedForMode("apply_diff", "markdown-editor", customModes, undefined, { path: "test.md", diff: "- old\n+ new", }) @@ -127,14 +127,14 @@ describe("isToolAllowedForMode", () => { // Test both with non-matching file expect(() => - isToolAllowedForMode("create_file", "markdown-editor", customModes, undefined, { + isToolAllowedForMode("write_to_file", "markdown-editor", customModes, undefined, { path: "test.js", content: "console.log('test')", }), ).toThrow(FileRestrictionError) expect(() => - isToolAllowedForMode("edit_file", "markdown-editor", customModes, undefined, { + isToolAllowedForMode("apply_diff", "markdown-editor", customModes, undefined, { path: "test.js", diff: "- old\n+ new", }), @@ -155,29 +155,29 @@ describe("isToolAllowedForMode", () => { }, ] - // Test create_file with non-matching file + // Test write_to_file with non-matching file expect(() => - isToolAllowedForMode("create_file", "docs-editor", customModesWithDescription, undefined, { + isToolAllowedForMode("write_to_file", "docs-editor", customModesWithDescription, undefined, { path: "test.js", content: "console.log('test')", }), ).toThrow(FileRestrictionError) expect(() => - isToolAllowedForMode("create_file", "docs-editor", customModesWithDescription, undefined, { + isToolAllowedForMode("write_to_file", "docs-editor", customModesWithDescription, undefined, { path: "test.js", content: "console.log('test')", }), ).toThrow(/Documentation files only/) - // Test edit_file with non-matching file + // Test apply_diff with non-matching file expect(() => - isToolAllowedForMode("edit_file", "docs-editor", customModesWithDescription, undefined, { + isToolAllowedForMode("apply_diff", "docs-editor", customModesWithDescription, undefined, { path: "test.js", diff: "- old\n+ new", }), ).toThrow(FileRestrictionError) expect(() => - isToolAllowedForMode("edit_file", "docs-editor", customModesWithDescription, undefined, { + isToolAllowedForMode("apply_diff", "docs-editor", customModesWithDescription, undefined, { path: "test.js", diff: "- old\n+ new", }), @@ -185,14 +185,14 @@ describe("isToolAllowedForMode", () => { // Test that matching files are allowed expect( - isToolAllowedForMode("create_file", "docs-editor", customModesWithDescription, undefined, { + isToolAllowedForMode("write_to_file", "docs-editor", customModesWithDescription, undefined, { path: "test.md", content: "# Test", }), ).toBe(true) expect( - isToolAllowedForMode("create_file", "docs-editor", customModesWithDescription, undefined, { + isToolAllowedForMode("write_to_file", "docs-editor", customModesWithDescription, undefined, { path: "test.txt", content: "Test content", }), @@ -200,7 +200,7 @@ describe("isToolAllowedForMode", () => { // Test partial streaming cases expect( - isToolAllowedForMode("create_file", "docs-editor", customModesWithDescription, undefined, { + isToolAllowedForMode("write_to_file", "docs-editor", customModesWithDescription, undefined, { path: "test.js", }), ).toBe(true) @@ -209,7 +209,7 @@ describe("isToolAllowedForMode", () => { it("allows architect mode to edit markdown files only", () => { // Should allow editing markdown files expect( - isToolAllowedForMode("create_file", "architect", [], undefined, { + isToolAllowedForMode("write_to_file", "architect", [], undefined, { path: "test.md", content: "# Test", }), @@ -217,7 +217,7 @@ describe("isToolAllowedForMode", () => { // Should allow applying diffs to markdown files expect( - isToolAllowedForMode("edit_file", "architect", [], undefined, { + isToolAllowedForMode("apply_diff", "architect", [], undefined, { path: "readme.md", diff: "- old\n+ new", }), @@ -225,13 +225,13 @@ describe("isToolAllowedForMode", () => { // Should reject non-markdown files expect(() => - isToolAllowedForMode("create_file", "architect", [], undefined, { + isToolAllowedForMode("write_to_file", "architect", [], undefined, { path: "test.js", content: "console.log('test')", }), ).toThrow(FileRestrictionError) expect(() => - isToolAllowedForMode("create_file", "architect", [], undefined, { + isToolAllowedForMode("write_to_file", "architect", [], undefined, { path: "test.js", content: "console.log('test')", }), @@ -245,15 +245,15 @@ describe("isToolAllowedForMode", () => { }) it("handles non-existent modes", () => { - expect(isToolAllowedForMode("create_file", "non-existent", customModes)).toBe(false) + expect(isToolAllowedForMode("write_to_file", "non-existent", customModes)).toBe(false) }) it("respects tool requirements", () => { const toolRequirements = { - create_file: false, + write_to_file: false, } - expect(isToolAllowedForMode("create_file", "markdown-editor", customModes, toolRequirements)).toBe(false) + expect(isToolAllowedForMode("write_to_file", "markdown-editor", customModes, toolRequirements)).toBe(false) }) describe("experimental tools", () => { @@ -312,7 +312,7 @@ describe("isToolAllowedForMode", () => { ).toBe(true) expect( isToolAllowedForMode( - "create_file", + "write_to_file", "markdown-editor", customModes, undefined, diff --git a/src/shared/tool-groups.ts b/src/shared/tool-groups.ts index 8a25e1400ee..2728d42319d 100644 --- a/src/shared/tool-groups.ts +++ b/src/shared/tool-groups.ts @@ -8,8 +8,8 @@ export type ToolGroupConfig = { export const TOOL_DISPLAY_NAMES = { execute_command: "run commands", read_file: "read files", - create_file: "write files", - edit_file: "apply changes", + write_to_file: "write files", + apply_diff: "apply changes", search_files: "search files", list_files: "list files", list_code_definition_names: "list definitions", @@ -28,7 +28,7 @@ export const TOOL_GROUPS: Record = { tools: ["read_file", "search_files", "list_files", "list_code_definition_names"], }, edit: { - tools: ["edit_file", "create_file", "insert_content", "search_and_replace"], + tools: ["write_to_file", "apply_diff", "insert_content", "search_and_replace"], }, browser: { tools: ["browser_action"], From c677a450d197bb9b7ee856d2c7353e00abe35b2c Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 11:30:15 -0800 Subject: [PATCH 004/145] Fix maxTokens --- src/shared/api.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/shared/api.ts b/src/shared/api.ts index 3598eb16ac9..129704ace26 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -96,7 +96,7 @@ export type AnthropicModelId = keyof typeof anthropicModels export const anthropicDefaultModelId: AnthropicModelId = "claude-3-5-sonnet-20241022" export const anthropicModels = { "claude-3-7-sonnet-20250219": { - maxTokens: 128_000, + maxTokens: 64_000, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, From 30b10c4e266c5ff10710fa697f5a4c989b96215a Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Mon, 24 Feb 2025 13:39:08 -0600 Subject: [PATCH 005/145] 3.7 --- .changeset/warm-kangaroos-give.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/warm-kangaroos-give.md diff --git a/.changeset/warm-kangaroos-give.md b/.changeset/warm-kangaroos-give.md new file mode 100644 index 00000000000..a95f6a1cf89 --- /dev/null +++ b/.changeset/warm-kangaroos-give.md @@ -0,0 +1,5 @@ +--- +"roo-cline": minor +--- + +3.7 From 158b9d4659fd8d48dccf02c1a81915dff0b8b667 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 24 Feb 2025 19:42:40 +0000 Subject: [PATCH 006/145] changeset version bump --- .changeset/warm-kangaroos-give.md | 5 ----- CHANGELOG.md | 6 ++++++ package-lock.json | 4 ++-- package.json | 2 +- 4 files changed, 9 insertions(+), 8 deletions(-) delete mode 100644 .changeset/warm-kangaroos-give.md diff --git a/.changeset/warm-kangaroos-give.md b/.changeset/warm-kangaroos-give.md deleted file mode 100644 index a95f6a1cf89..00000000000 --- a/.changeset/warm-kangaroos-give.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": minor ---- - -3.7 diff --git a/CHANGELOG.md b/CHANGELOG.md index 748f8c78f88..6b85eed8cac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Roo Code Changelog +## 3.4.0 + +### Minor Changes + +- 3.7 + ## [3.3.26] - Adjust the default prompt for Debug mode to focus more on diagnosis and to require user confirmation before moving on to implementation diff --git a/package-lock.json b/package-lock.json index 9822548678d..f5648649b52 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "roo-cline", - "version": "3.3.26", + "version": "3.4.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "roo-cline", - "version": "3.3.26", + "version": "3.4.0", "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/sdk": "^0.26.0", diff --git a/package.json b/package.json index c4c38e78571..d64f0c7b021 100644 --- a/package.json +++ b/package.json @@ -3,7 +3,7 @@ "displayName": "Roo Code (prev. Roo Cline)", "description": "An AI-powered autonomous coding agent that lives in your editor.", "publisher": "RooVeterinaryInc", - "version": "3.3.26", + "version": "3.4.0", "icon": "assets/icons/rocket.png", "galleryBanner": { "color": "#617A91", From 9a41763dbbde56256102deb691a1039bc040a1dc Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Mon, 24 Feb 2025 13:47:55 -0600 Subject: [PATCH 007/145] Update CHANGELOG.md --- CHANGELOG.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b85eed8cac..2d4546a2e14 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,8 @@ # Roo Code Changelog -## 3.4.0 +## [3.7.0] -### Minor Changes - -- 3.7 +- Introducing Roo Code 3.7, with support for the new Claude Sonnet 3.7. Because who cares about skipping version numbers anymore? Thanks @lupuletic and @cte for the PRs! ## [3.3.26] From 8b28189850959d86b961c045fb9fb7ed84e821f2 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Mon, 24 Feb 2025 13:48:10 -0600 Subject: [PATCH 008/145] Update package.json --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index d64f0c7b021..0d9e545090c 100644 --- a/package.json +++ b/package.json @@ -3,7 +3,7 @@ "displayName": "Roo Code (prev. Roo Cline)", "description": "An AI-powered autonomous coding agent that lives in your editor.", "publisher": "RooVeterinaryInc", - "version": "3.4.0", + "version": "3.7.0", "icon": "assets/icons/rocket.png", "galleryBanner": { "color": "#617A91", From 3845338702500630c0ade45ddcee50c65667f412 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Mon, 24 Feb 2025 13:48:53 -0600 Subject: [PATCH 009/145] Update package-lock.json --- package-lock.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/package-lock.json b/package-lock.json index f5648649b52..7650f96ea58 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "roo-cline", - "version": "3.4.0", + "version": "3.7.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "roo-cline", - "version": "3.4.0", + "version": "3.7.0", "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/sdk": "^0.26.0", From 35a56a991854e03f317e93e0390bc5556ef5814c Mon Sep 17 00:00:00 2001 From: Roo Code Date: Sat, 22 Feb 2025 16:38:46 -0700 Subject: [PATCH 010/145] Reorder tool groups and update tool usage instructions to always mention apply_diff before write_to_file --- src/core/prompts/sections/capabilities.ts | 2 +- src/core/prompts/sections/modes.ts | 2 +- src/core/prompts/sections/rules.ts | 19 ++++++++++++------- src/shared/tool-groups.ts | 2 +- 4 files changed, 15 insertions(+), 10 deletions(-) diff --git a/src/core/prompts/sections/capabilities.ts b/src/core/prompts/sections/capabilities.ts index c292eeffbc3..983d07bf761 100644 --- a/src/core/prompts/sections/capabilities.ts +++ b/src/core/prompts/sections/capabilities.ts @@ -17,7 +17,7 @@ CAPABILITIES - When the user initially gives you a task, a recursive list of all filepaths in the current working directory ('${cwd}') will be included in environment_details. This provides an overview of the project's file structure, offering key insights into the project from directory/file names (how developers conceptualize and organize their code) and file extensions (the language used). This can also guide decision-making on which files to explore further. If you need to further explore directories such as outside the current working directory, you can use the list_files tool. If you pass 'true' for the recursive parameter, it will list files recursively. Otherwise, it will list files at the top level, which is better suited for generic directories where you don't necessarily need the nested structure, like the Desktop. - You can use search_files to perform regex searches across files in a specified directory, outputting context-rich results that include surrounding lines. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need refactoring. - You can use the list_code_definition_names tool to get an overview of source code definitions for all files at the top level of a specified directory. This can be particularly useful when you need to understand the broader context and relationships between certain parts of the code. You may need to call this tool multiple times to understand various parts of the codebase related to the task. - - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the write_to_file${diffStrategy ? " or apply_diff" : ""} tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. + - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use ${diffStrategy ? "the apply_diff or write_to_file" : "the write_to_file"} tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. - You can use the execute_command tool to run commands on the user's computer whenever you feel it can help accomplish the user's task. When you need to execute a CLI command, you must provide a clear explanation of what the command does. Prefer to execute complex CLI commands over creating executable scripts, since they are more flexible and easier to run. Interactive and long-running commands are allowed, since the commands are run in the user's VSCode terminal. The user may keep commands running in the background and you will be kept updated on their status along the way. Each command you execute is run in a new terminal instance.${ supportsComputerUse ? "\n- You can use the browser_action tool to interact with websites (including html files and locally running development servers) through a Puppeteer-controlled browser when you feel it is necessary in accomplishing the user's task. This tool is particularly useful for web development tasks as it allows you to launch a browser, navigate to pages, interact with elements through clicks and keyboard input, and capture the results through screenshots and console logs. This tool may be useful at key stages of web development tasks-such as after implementing new features, making substantial changes, when troubleshooting issues, or to verify the result of your work. You can analyze the provided screenshots to ensure correct rendering or identify errors, and review console logs for runtime issues.\n - For example, if asked to add a component to a react website, you might create the necessary files, use execute_command to run the site locally, then use browser_action to launch the browser, navigate to the local server, and verify the component renders & functions correctly before closing the browser." diff --git a/src/core/prompts/sections/modes.ts b/src/core/prompts/sections/modes.ts index eff950c2c2f..f3863870dbc 100644 --- a/src/core/prompts/sections/modes.ts +++ b/src/core/prompts/sections/modes.ts @@ -45,7 +45,7 @@ Both files should follow this structure: "roleDefinition": "You are Roo, a UI/UX expert specializing in design systems and frontend development. Your expertise includes:\\n- Creating and maintaining design systems\\n- Implementing responsive and accessible web interfaces\\n- Working with CSS, HTML, and modern frontend frameworks\\n- Ensuring consistent user experiences across platforms", // Required: non-empty "groups": [ // Required: array of tool groups (can be empty) "read", // Read files group (read_file, search_files, list_files, list_code_definition_names) - "edit", // Edit files group (write_to_file, apply_diff) - allows editing any file + "edit", // Edit files group (apply_diff, write_to_file) - allows editing any file // Or with file restrictions: // ["edit", { fileRegex: "\\.md$", description: "Markdown files only" }], // Edit group that only allows editing markdown files "browser", // Browser group (browser_action) diff --git a/src/core/prompts/sections/rules.ts b/src/core/prompts/sections/rules.ts index b6e19eb08c9..86e554a157e 100644 --- a/src/core/prompts/sections/rules.ts +++ b/src/core/prompts/sections/rules.ts @@ -5,11 +5,16 @@ import * as path from "path" function getEditingInstructions(diffStrategy?: DiffStrategy, experiments?: Record): string { const instructions: string[] = [] - const availableTools: string[] = ["write_to_file (for creating new files or complete file rewrites)"] + const availableTools: string[] = [] // Collect available editing tools if (diffStrategy) { - availableTools.push("apply_diff (for replacing lines in existing files)") + availableTools.push( + "apply_diff (for replacing lines in existing files)", + "write_to_file (for creating new files or complete file rewrites)", + ) + } else { + availableTools.push("write_to_file (for creating new files or complete file rewrites)") } if (experiments?.["insert_content"]) { availableTools.push("insert_content (for adding lines to existing files)") @@ -36,16 +41,16 @@ function getEditingInstructions(diffStrategy?: DiffStrategy, experiments?: Recor ) } - instructions.push( - "- When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project.", - ) - if (availableTools.length > 1) { instructions.push( "- You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files.", ) } + instructions.push( + "- When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project.", + ) + return instructions.join("\n") } @@ -63,7 +68,7 @@ RULES - You cannot \`cd\` into a different directory to complete a task. You are stuck operating from '${cwd.toPosix()}', so be sure to pass in the correct 'path' parameter when using tools that require a path. - Do not use the ~ character or $HOME to refer to the home directory. - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '${cwd.toPosix()}', and if so prepend with \`cd\`'ing into that directory && then executing the command (as one command since you are stuck operating from '${cwd.toPosix()}'). For example, if you needed to run \`npm install\` in a project outside of '${cwd.toPosix()}', you would need to prepend with a \`cd\` i.e. pseudocode for this would be \`cd (path to project) && (command, in this case npm install)\`. -- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. +- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using ${diffStrategy ? "apply_diff or write_to_file" : "write_to_file"} to make informed changes. - When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. ${getEditingInstructions(diffStrategy, experiments)} - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. diff --git a/src/shared/tool-groups.ts b/src/shared/tool-groups.ts index 2728d42319d..50c7b80ca9e 100644 --- a/src/shared/tool-groups.ts +++ b/src/shared/tool-groups.ts @@ -28,7 +28,7 @@ export const TOOL_GROUPS: Record = { tools: ["read_file", "search_files", "list_files", "list_code_definition_names"], }, edit: { - tools: ["write_to_file", "apply_diff", "insert_content", "search_and_replace"], + tools: ["apply_diff", "write_to_file", "insert_content", "search_and_replace"], }, browser: { tools: ["browser_action"], From 0e644f102ff0b9e74213f502c9c41cda7e7776e5 Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 12:48:55 -0800 Subject: [PATCH 011/145] Default to Claude 3.7 where appropriate --- .github/ISSUE_TEMPLATE/bug_report.yml | 2 +- src/core/Cline.ts | 2 +- src/core/webview/ClineProvider.ts | 50 +++---------------- src/shared/api.ts | 24 +++++---- src/test/suite/index.ts | 2 +- .../components/settings/GlamaModelPicker.tsx | 2 +- .../settings/OpenRouterModelPicker.tsx | 2 +- .../settings/RequestyModelPicker.tsx | 2 +- .../src/components/settings/SettingsView.tsx | 2 +- 9 files changed, 26 insertions(+), 62 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 501180c3d53..dc66b4f390b 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -33,7 +33,7 @@ body: id: model attributes: label: Which Model are you using? - description: Please specify the model you're using (e.g. Claude 3.5 Sonnet) + description: Please specify the model you're using (e.g. Claude 3.7 Sonnet) validations: required: true - type: textarea diff --git a/src/core/Cline.ts b/src/core/Cline.ts index 12cf062406b..f1f5e41b331 100644 --- a/src/core/Cline.ts +++ b/src/core/Cline.ts @@ -2792,7 +2792,7 @@ export class Cline { "mistake_limit_reached", this.api.getModel().id.includes("claude") ? `This may indicate a failure in his thought process or inability to use a tool properly, which can be mitigated with some user guidance (e.g. "Try breaking down the task into smaller steps").` - : "Roo Code uses complex prompts and iterative task execution that may be challenging for less capable models. For best results, it's recommended to use Claude 3.5 Sonnet for its advanced agentic coding capabilities.", + : "Roo Code uses complex prompts and iterative task execution that may be challenging for less capable models. For best results, it's recommended to use Claude 3.7 Sonnet for its advanced agentic coding capabilities.", ) if (response === "messageResponse") { userContent.push( diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index 6790224ecae..b4819d96833 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -1900,23 +1900,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { } const response = await axios.get("https://router.requesty.ai/v1/models", config) - /* - { - "id": "anthropic/claude-3-5-sonnet-20240620", - "object": "model", - "created": 1738243330, - "owned_by": "system", - "input_price": 0.000003, - "caching_price": 0.00000375, - "cached_price": 3E-7, - "output_price": 0.000015, - "max_output_tokens": 8192, - "context_window": 200000, - "supports_caching": true, - "description": "Anthropic's most intelligent model. Highest level of intelligence and capability" - }, - } - */ + if (response.data) { const rawModels = response.data.data const parsePrice = (price: any) => { @@ -2116,34 +2100,10 @@ export class ClineProvider implements vscode.WebviewViewProvider { ) const models: Record = {} + try { const response = await axios.get("https://openrouter.ai/api/v1/models") - /* - { - "id": "anthropic/claude-3.5-sonnet", - "name": "Anthropic: Claude 3.5 Sonnet", - "created": 1718841600, - "description": "Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices. Sonnet is particularly good at:\n\n- Coding: Autonomously writes, edits, and runs code with reasoning and troubleshooting\n- Data science: Augments human data science expertise; navigates unstructured data while using multiple tools for insights\n- Visual processing: excelling at interpreting charts, graphs, and images, accurately transcribing text to derive insights beyond just the text alone\n- Agentic tasks: exceptional tool use, making it great at agentic tasks (i.e. complex, multi-step problem solving tasks that require engaging with other systems)\n\n#multimodal", - "context_length": 200000, - "architecture": { - "modality": "text+image-\u003Etext", - "tokenizer": "Claude", - "instruct_type": null - }, - "pricing": { - "prompt": "0.000003", - "completion": "0.000015", - "image": "0.0048", - "request": "0" - }, - "top_provider": { - "context_length": 200000, - "max_completion_tokens": 8192, - "is_moderated": true - }, - "per_request_limits": null - }, - */ + if (response.data?.data) { const rawModels = response.data.data const parsePrice = (price: any) => { @@ -2152,6 +2112,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { } return undefined } + for (const rawModel of rawModels) { const modelInfo: ModelInfo = { maxTokens: rawModel.top_provider?.max_completion_tokens, @@ -2164,9 +2125,10 @@ export class ClineProvider implements vscode.WebviewViewProvider { } switch (rawModel.id) { + case "anthropic/claude-3.7-sonnet": case "anthropic/claude-3.5-sonnet": case "anthropic/claude-3.5-sonnet:beta": - // NOTE: this needs to be synced with api.ts/openrouter default model info + // NOTE: this needs to be synced with api.ts/openrouter default model info. modelInfo.supportsComputerUse = true modelInfo.supportsPromptCache = true modelInfo.cacheWritesPrice = 3.75 diff --git a/src/shared/api.ts b/src/shared/api.ts index 23fe60696c9..4619d2930dc 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -93,7 +93,7 @@ export interface ModelInfo { // Anthropic // https://docs.anthropic.com/en/docs/about-claude/models export type AnthropicModelId = keyof typeof anthropicModels -export const anthropicDefaultModelId: AnthropicModelId = "claude-3-5-sonnet-20241022" +export const anthropicDefaultModelId: AnthropicModelId = "claude-3-7-sonnet-20250219" export const anthropicModels = { "claude-3-7-sonnet-20250219": { maxTokens: 64_000, @@ -355,9 +355,9 @@ export const bedrockModels = { // Glama // https://glama.ai/models -export const glamaDefaultModelId = "anthropic/claude-3-5-sonnet" +export const glamaDefaultModelId = "anthropic/claude-3-7-sonnet" export const glamaDefaultModelInfo: ModelInfo = { - maxTokens: 8192, + maxTokens: 64_000, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, @@ -367,11 +367,14 @@ export const glamaDefaultModelInfo: ModelInfo = { cacheWritesPrice: 3.75, cacheReadsPrice: 0.3, description: - "The new Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices. Sonnet is particularly good at:\n\n- Coding: New Sonnet scores ~49% on SWE-Bench Verified, higher than the last best score, and without any fancy prompt scaffolding\n- Data science: Augments human data science expertise; navigates unstructured data while using multiple tools for insights\n- Visual processing: excelling at interpreting charts, graphs, and images, accurately transcribing text to derive insights beyond just the text alone\n- Agentic tasks: exceptional tool use, making it great at agentic tasks (i.e. complex, multi-step problem solving tasks that require engaging with other systems)\n\n#multimodal\n\n_This is a faster endpoint, made available in collaboration with Anthropic, that is self-moderated: response moderation happens on the provider's side instead of OpenRouter's. For requests that pass moderation, it's identical to the [Standard](/anthropic/claude-3.5-sonnet) variant._", + "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. Claude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks. Read more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)", } +// Requesty +// https://requesty.ai/router-2 +export const requestyDefaultModelId = "anthropic/claude-3-7-sonnet-latest" export const requestyDefaultModelInfo: ModelInfo = { - maxTokens: 8192, + maxTokens: 64_000, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, @@ -381,15 +384,14 @@ export const requestyDefaultModelInfo: ModelInfo = { cacheWritesPrice: 3.75, cacheReadsPrice: 0.3, description: - "The new Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices. Sonnet is particularly good at:\n\n- Coding: New Sonnet scores ~49% on SWE-Bench Verified, higher than the last best score, and without any fancy prompt scaffolding\n- Data science: Augments human data science expertise; navigates unstructured data while using multiple tools for insights\n- Visual processing: excelling at interpreting charts, graphs, and images, accurately transcribing text to derive insights beyond just the text alone\n- Agentic tasks: exceptional tool use, making it great at agentic tasks (i.e. complex, multi-step problem solving tasks that require engaging with other systems)\n\n#multimodal\n\n_This is a faster endpoint, made available in collaboration with Anthropic, that is self-moderated: response moderation happens on the provider's side instead of OpenRouter's. For requests that pass moderation, it's identical to the [Standard](/anthropic/claude-3.5-sonnet) variant._", + "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. Claude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks. Read more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)", } -export const requestyDefaultModelId = "anthropic/claude-3-5-sonnet" // OpenRouter // https://openrouter.ai/models?order=newest&supported_parameters=tools -export const openRouterDefaultModelId = "anthropic/claude-3.5-sonnet:beta" // will always exist in openRouterModels +export const openRouterDefaultModelId = "anthropic/claude-3.7-sonnet" export const openRouterDefaultModelInfo: ModelInfo = { - maxTokens: 8192, + maxTokens: 64_000, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, @@ -399,13 +401,13 @@ export const openRouterDefaultModelInfo: ModelInfo = { cacheWritesPrice: 3.75, cacheReadsPrice: 0.3, description: - "The new Claude 3.5 Sonnet delivers better-than-Opus capabilities, faster-than-Sonnet speeds, at the same Sonnet prices. Sonnet is particularly good at:\n\n- Coding: New Sonnet scores ~49% on SWE-Bench Verified, higher than the last best score, and without any fancy prompt scaffolding\n- Data science: Augments human data science expertise; navigates unstructured data while using multiple tools for insights\n- Visual processing: excelling at interpreting charts, graphs, and images, accurately transcribing text to derive insights beyond just the text alone\n- Agentic tasks: exceptional tool use, making it great at agentic tasks (i.e. complex, multi-step problem solving tasks that require engaging with other systems)\n\n#multimodal\n\n_This is a faster endpoint, made available in collaboration with Anthropic, that is self-moderated: response moderation happens on the provider's side instead of OpenRouter's. For requests that pass moderation, it's identical to the [Standard](/anthropic/claude-3.5-sonnet) variant._", + "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. Claude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks. Read more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)", } // Vertex AI // https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude export type VertexModelId = keyof typeof vertexModels -export const vertexDefaultModelId: VertexModelId = "claude-3-5-sonnet-v2@20241022" +export const vertexDefaultModelId: VertexModelId = "claude-3-7-sonnet@20250219" export const vertexModels = { "claude-3-7-sonnet@20250219": { maxTokens: 8192, diff --git a/src/test/suite/index.ts b/src/test/suite/index.ts index ffb8de7473e..540be7cef83 100644 --- a/src/test/suite/index.ts +++ b/src/test/suite/index.ts @@ -39,7 +39,7 @@ export async function run(): Promise { : await globalThis.extension.activate() globalThis.provider = globalThis.api.sidebarProvider await globalThis.provider.updateGlobalState("apiProvider", "openrouter") - await globalThis.provider.updateGlobalState("openRouterModelId", "anthropic/claude-3.5-sonnet") + await globalThis.provider.updateGlobalState("openRouterModelId", "anthropic/claude-3.7-sonnet") await globalThis.provider.storeSecret( "openRouterApiKey", process.env.OPENROUTER_API_KEY || "sk-or-v1-fake-api-key", diff --git a/webview-ui/src/components/settings/GlamaModelPicker.tsx b/webview-ui/src/components/settings/GlamaModelPicker.tsx index cb813a0d058..37e326d8f87 100644 --- a/webview-ui/src/components/settings/GlamaModelPicker.tsx +++ b/webview-ui/src/components/settings/GlamaModelPicker.tsx @@ -10,6 +10,6 @@ export const GlamaModelPicker = () => ( refreshMessageType="refreshGlamaModels" serviceName="Glama" serviceUrl="https://glama.ai/models" - recommendedModel="anthropic/claude-3-5-sonnet" + recommendedModel="anthropic/claude-3-7-sonnet" /> ) diff --git a/webview-ui/src/components/settings/OpenRouterModelPicker.tsx b/webview-ui/src/components/settings/OpenRouterModelPicker.tsx index 9111407cd61..c773478e542 100644 --- a/webview-ui/src/components/settings/OpenRouterModelPicker.tsx +++ b/webview-ui/src/components/settings/OpenRouterModelPicker.tsx @@ -10,6 +10,6 @@ export const OpenRouterModelPicker = () => ( refreshMessageType="refreshOpenRouterModels" serviceName="OpenRouter" serviceUrl="https://openrouter.ai/models" - recommendedModel="anthropic/claude-3.5-sonnet:beta" + recommendedModel="anthropic/claude-3.7-sonnet" /> ) diff --git a/webview-ui/src/components/settings/RequestyModelPicker.tsx b/webview-ui/src/components/settings/RequestyModelPicker.tsx index e0759a43ba1..c65067068aa 100644 --- a/webview-ui/src/components/settings/RequestyModelPicker.tsx +++ b/webview-ui/src/components/settings/RequestyModelPicker.tsx @@ -16,7 +16,7 @@ export const RequestyModelPicker = () => { }} serviceName="Requesty" serviceUrl="https://requesty.ai" - recommendedModel="anthropic/claude-3-5-sonnet-latest" + recommendedModel="anthropic/claude-3-7-sonnet-latest" /> ) } diff --git a/webview-ui/src/components/settings/SettingsView.tsx b/webview-ui/src/components/settings/SettingsView.tsx index 495bf49bd77..0d80580b491 100644 --- a/webview-ui/src/components/settings/SettingsView.tsx +++ b/webview-ui/src/components/settings/SettingsView.tsx @@ -765,7 +765,7 @@ const SettingsView = forwardRef(({ onDone }, color: "var(--vscode-descriptionForeground)", }}> When enabled, Roo will be able to edit files more quickly and will automatically reject - truncated full-file writes. Works best with the latest Claude 3.5 Sonnet model. + truncated full-file writes. Works best with the latest Claude 3.7 Sonnet model.

{diffEnabled && ( From d94067aba8b375fa14b76b04a0bb2e5774a43a06 Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 12:54:05 -0800 Subject: [PATCH 012/145] Revert maxTokens change for now --- src/api/providers/openrouter.ts | 1 + src/shared/api.ts | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/api/providers/openrouter.ts b/src/api/providers/openrouter.ts index af087226ebd..eb9e819d772 100644 --- a/src/api/providers/openrouter.ts +++ b/src/api/providers/openrouter.ts @@ -107,6 +107,7 @@ export class OpenRouterHandler implements ApiHandler, SingleCompletionHandler { // (models usually default to max tokens allowed) let maxTokens: number | undefined switch (this.getModel().id) { + case "anthropic/claude-3.7-sonnet": case "anthropic/claude-3.5-sonnet": case "anthropic/claude-3.5-sonnet:beta": case "anthropic/claude-3.5-sonnet-20240620": diff --git a/src/shared/api.ts b/src/shared/api.ts index 4619d2930dc..056a40c49ef 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -96,7 +96,7 @@ export type AnthropicModelId = keyof typeof anthropicModels export const anthropicDefaultModelId: AnthropicModelId = "claude-3-7-sonnet-20250219" export const anthropicModels = { "claude-3-7-sonnet-20250219": { - maxTokens: 64_000, + maxTokens: 8192, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, @@ -357,7 +357,7 @@ export const bedrockModels = { // https://glama.ai/models export const glamaDefaultModelId = "anthropic/claude-3-7-sonnet" export const glamaDefaultModelInfo: ModelInfo = { - maxTokens: 64_000, + maxTokens: 8192, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, @@ -374,7 +374,7 @@ export const glamaDefaultModelInfo: ModelInfo = { // https://requesty.ai/router-2 export const requestyDefaultModelId = "anthropic/claude-3-7-sonnet-latest" export const requestyDefaultModelInfo: ModelInfo = { - maxTokens: 64_000, + maxTokens: 8192, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, @@ -391,7 +391,7 @@ export const requestyDefaultModelInfo: ModelInfo = { // https://openrouter.ai/models?order=newest&supported_parameters=tools export const openRouterDefaultModelId = "anthropic/claude-3.7-sonnet" export const openRouterDefaultModelInfo: ModelInfo = { - maxTokens: 64_000, + maxTokens: 8192, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, From a130e656f1ecf7e04cb5af2352b844d2f80e4908 Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 13:07:00 -0800 Subject: [PATCH 013/145] Fix tests --- src/api/providers/__tests__/glama.test.ts | 12 +++++++----- src/api/providers/__tests__/openrouter.test.ts | 4 +++- src/api/providers/__tests__/vertex.test.ts | 7 +++++-- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/src/api/providers/__tests__/glama.test.ts b/src/api/providers/__tests__/glama.test.ts index c3fc90e32b4..5e017ccd0ad 100644 --- a/src/api/providers/__tests__/glama.test.ts +++ b/src/api/providers/__tests__/glama.test.ts @@ -1,9 +1,11 @@ -import { GlamaHandler } from "../glama" -import { ApiHandlerOptions } from "../../../shared/api" -import OpenAI from "openai" +// npx jest src/api/providers/__tests__/glama.test.ts + import { Anthropic } from "@anthropic-ai/sdk" import axios from "axios" +import { GlamaHandler } from "../glama" +import { ApiHandlerOptions } from "../../../shared/api" + // Mock OpenAI client const mockCreate = jest.fn() const mockWithResponse = jest.fn() @@ -71,8 +73,8 @@ describe("GlamaHandler", () => { beforeEach(() => { mockOptions = { - apiModelId: "anthropic/claude-3-5-sonnet", - glamaModelId: "anthropic/claude-3-5-sonnet", + apiModelId: "anthropic/claude-3-7-sonnet", + glamaModelId: "anthropic/claude-3-7-sonnet", glamaApiKey: "test-api-key", } handler = new GlamaHandler(mockOptions) diff --git a/src/api/providers/__tests__/openrouter.test.ts b/src/api/providers/__tests__/openrouter.test.ts index 18f81ce2fdf..aabd7f71a84 100644 --- a/src/api/providers/__tests__/openrouter.test.ts +++ b/src/api/providers/__tests__/openrouter.test.ts @@ -1,3 +1,5 @@ +// npx jest src/api/providers/__tests__/openrouter.test.ts + import { OpenRouterHandler } from "../openrouter" import { ApiHandlerOptions, ModelInfo } from "../../../shared/api" import OpenAI from "openai" @@ -55,7 +57,7 @@ describe("OpenRouterHandler", () => { const handler = new OpenRouterHandler({}) const result = handler.getModel() - expect(result.id).toBe("anthropic/claude-3.5-sonnet:beta") + expect(result.id).toBe("anthropic/claude-3.7-sonnet") expect(result.info.supportsPromptCache).toBe(true) }) diff --git a/src/api/providers/__tests__/vertex.test.ts b/src/api/providers/__tests__/vertex.test.ts index a51033af2d6..ebe60ba0c68 100644 --- a/src/api/providers/__tests__/vertex.test.ts +++ b/src/api/providers/__tests__/vertex.test.ts @@ -1,7 +1,10 @@ -import { VertexHandler } from "../vertex" +// npx jest src/api/providers/__tests__/vertex.test.ts + import { Anthropic } from "@anthropic-ai/sdk" import { AnthropicVertex } from "@anthropic-ai/vertex-sdk" +import { VertexHandler } from "../vertex" + // Mock Vertex SDK jest.mock("@anthropic-ai/vertex-sdk", () => ({ AnthropicVertex: jest.fn().mockImplementation(() => ({ @@ -289,7 +292,7 @@ describe("VertexHandler", () => { vertexRegion: "us-central1", }) const modelInfo = invalidHandler.getModel() - expect(modelInfo.id).toBe("claude-3-5-sonnet-v2@20241022") // Default model + expect(modelInfo.id).toBe("claude-3-7-sonnet@20250219") // Default model }) }) }) From 92131adc6d4abed404e0292db1e12a1761e733a2 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Mon, 24 Feb 2025 15:35:00 -0600 Subject: [PATCH 014/145] Add Claude 3.7 to Bedrock --- src/shared/api.ts | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/src/shared/api.ts b/src/shared/api.ts index 23fe60696c9..4d68ea2b87b 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -173,7 +173,7 @@ export interface MessageContent { } export type BedrockModelId = keyof typeof bedrockModels -export const bedrockDefaultModelId: BedrockModelId = "anthropic.claude-3-5-sonnet-20241022-v2:0" +export const bedrockDefaultModelId: BedrockModelId = "anthropic.claude-3-7-sonnet-20250219-v1:0" export const bedrockModels = { "amazon.nova-pro-v1:0": { maxTokens: 5000, @@ -208,6 +208,17 @@ export const bedrockModels = { cacheWritesPrice: 0.035, // per million tokens cacheReadsPrice: 0.00875, // per million tokens }, + "anthropic.claude-3-7-sonnet-20250219-v1:0": { + maxTokens: 8192, + contextWindow: 200_000, + supportsImages: true, + supportsComputerUse: true, + supportsPromptCache: true, + inputPrice: 3.0, + outputPrice: 15.0, + cacheWritesPrice: 3.75, + cacheReadsPrice: 0.3, + }, "anthropic.claude-3-5-sonnet-20241022-v2:0": { maxTokens: 8192, contextWindow: 200_000, @@ -216,8 +227,8 @@ export const bedrockModels = { supportsPromptCache: false, inputPrice: 3.0, outputPrice: 15.0, - cacheWritesPrice: 3.75, // per million tokens - cacheReadsPrice: 0.3, // per million tokens + cacheWritesPrice: 3.75, + cacheReadsPrice: 0.3, }, "anthropic.claude-3-5-haiku-20241022-v1:0": { maxTokens: 8192, From 1d1f5c9c3b4e5523f483ef16187c92df48bdb254 Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 13:43:47 -0800 Subject: [PATCH 015/145] Integration test cleanup --- src/test/suite/index.ts | 17 ++-- src/test/suite/modes.test.ts | 152 ++++++++++++++++++----------------- src/test/suite/task.test.ts | 58 ++++++------- 3 files changed, 114 insertions(+), 113 deletions(-) diff --git a/src/test/suite/index.ts b/src/test/suite/index.ts index 540be7cef83..cc487b0bf78 100644 --- a/src/test/suite/index.ts +++ b/src/test/suite/index.ts @@ -13,23 +13,23 @@ declare global { } export async function run(): Promise { - // Create the mocha test const mocha = new Mocha({ ui: "tdd", - timeout: 600000, // 10 minutes to compensate for time communicating with LLM while running in GHA + timeout: 600000, // 10 minutes to compensate for time communicating with LLM while running in GHA. }) const testsRoot = path.resolve(__dirname, "..") try { - // Find all test files + // Find all test files. const files = await glob("**/**.test.js", { cwd: testsRoot }) - // Add files to the test suite + // Add files to the test suite. files.forEach((f: string) => mocha.addFile(path.resolve(testsRoot, f))) - //Set up global extension, api, provider, and panel + // Set up global extension, api, provider, and panel. globalThis.extension = vscode.extensions.getExtension("RooVeterinaryInc.roo-cline") + if (!globalThis.extension) { throw new Error("Extension not found") } @@ -37,9 +37,12 @@ export async function run(): Promise { globalThis.api = globalThis.extension.isActive ? globalThis.extension.exports : await globalThis.extension.activate() + globalThis.provider = globalThis.api.sidebarProvider + await globalThis.provider.updateGlobalState("apiProvider", "openrouter") - await globalThis.provider.updateGlobalState("openRouterModelId", "anthropic/claude-3.7-sonnet") + await globalThis.provider.updateGlobalState("openRouterModelId", "anthropic/claude-3.5-sonnet") + await globalThis.provider.storeSecret( "openRouterApiKey", process.env.OPENROUTER_API_KEY || "sk-or-v1-fake-api-key", @@ -71,7 +74,7 @@ export async function run(): Promise { await new Promise((resolve) => setTimeout(resolve, interval)) } - // Run the mocha test + // Run the mocha test. return new Promise((resolve, reject) => { try { mocha.run((failures: number) => { diff --git a/src/test/suite/modes.test.ts b/src/test/suite/modes.test.ts index 2fe0eaa597f..b94e71d1106 100644 --- a/src/test/suite/modes.test.ts +++ b/src/test/suite/modes.test.ts @@ -1,101 +1,105 @@ import * as assert from "assert" -import * as vscode from "vscode" suite("Roo Code Modes", () => { test("Should handle switching modes correctly", async function () { const timeout = 30000 const interval = 1000 + const testPrompt = "For each mode (Code, Architect, Ask) respond with the mode name and what it specializes in after switching to that mode, do not start with the current mode, be sure to say 'I AM DONE' after the task is complete" + if (!globalThis.extension) { assert.fail("Extension not found") } - try { - let startTime = Date.now() - - // Ensure the webview is launched. - while (Date.now() - startTime < timeout) { - if (globalThis.provider.viewLaunched) { - break - } + let startTime = Date.now() - await new Promise((resolve) => setTimeout(resolve, interval)) + // Ensure the webview is launched. + while (Date.now() - startTime < timeout) { + if (globalThis.provider.viewLaunched) { + break } - await globalThis.provider.updateGlobalState("mode", "Ask") - await globalThis.provider.updateGlobalState("alwaysAllowModeSwitch", true) - await globalThis.provider.updateGlobalState("autoApprovalEnabled", true) + await new Promise((resolve) => setTimeout(resolve, interval)) + } - // Start a new task. - await globalThis.api.startNewTask(testPrompt) + await globalThis.provider.updateGlobalState("mode", "Ask") + await globalThis.provider.updateGlobalState("alwaysAllowModeSwitch", true) + await globalThis.provider.updateGlobalState("autoApprovalEnabled", true) - // Wait for task to appear in history with tokens. - startTime = Date.now() + // Start a new task. + await globalThis.api.startNewTask(testPrompt) - while (Date.now() - startTime < timeout) { - const messages = globalThis.provider.messages + // Wait for task to appear in history with tokens. + startTime = Date.now() - if ( - messages.some( - ({ type, text }) => - type === "say" && text?.includes("I AM DONE") && !text?.includes("be sure to say"), - ) - ) { - break - } + while (Date.now() - startTime < timeout) { + const messages = globalThis.provider.messages - await new Promise((resolve) => setTimeout(resolve, interval)) - } - if (globalThis.provider.messages.length === 0) { - assert.fail("No messages received") + if ( + messages.some( + ({ type, text }) => + type === "say" && text?.includes("I AM DONE") && !text?.includes("be sure to say"), + ) + ) { + break } - //Log the messages to the console - globalThis.provider.messages.forEach(({ type, text }) => { - if (type === "say") { - console.log(text) - } - }) - - //Start Grading Portion of test to grade the response from 1 to 10 - await globalThis.provider.updateGlobalState("mode", "Ask") - let output = globalThis.provider.messages.map(({ type, text }) => (type === "say" ? text : "")).join("\n") - await globalThis.api.startNewTask( - `Given this prompt: ${testPrompt} grade the response from 1 to 10 in the format of "Grade: (1-10)": ${output} \n Be sure to say 'I AM DONE GRADING' after the task is complete`, - ) - - startTime = Date.now() - - while (Date.now() - startTime < timeout) { - const messages = globalThis.provider.messages - - if ( - messages.some( - ({ type, text }) => - type === "say" && text?.includes("I AM DONE GRADING") && !text?.includes("be sure to say"), - ) - ) { - break - } - - await new Promise((resolve) => setTimeout(resolve, interval)) + await new Promise((resolve) => setTimeout(resolve, interval)) + } + + if (globalThis.provider.messages.length === 0) { + assert.fail("No messages received") + } + + // Log the messages to the console. + globalThis.provider.messages.forEach(({ type, text }) => { + if (type === "say") { + console.log(text) } - if (globalThis.provider.messages.length === 0) { - assert.fail("No messages received") + }) + + // Start Grading Portion of test to grade the response from 1 to 10. + await globalThis.provider.updateGlobalState("mode", "Ask") + let output = globalThis.provider.messages.map(({ type, text }) => (type === "say" ? text : "")).join("\n") + + await globalThis.api.startNewTask( + `Given this prompt: ${testPrompt} grade the response from 1 to 10 in the format of "Grade: (1-10)": ${output} \n Be sure to say 'I AM DONE GRADING' after the task is complete`, + ) + + startTime = Date.now() + + while (Date.now() - startTime < timeout) { + const messages = globalThis.provider.messages + + if ( + messages.some( + ({ type, text }) => + type === "say" && text?.includes("I AM DONE GRADING") && !text?.includes("be sure to say"), + ) + ) { + break } - globalThis.provider.messages.forEach(({ type, text }) => { - if (type === "say" && text?.includes("Grade:")) { - console.log(text) - } - }) - const gradeMessage = globalThis.provider.messages.find( - ({ type, text }) => type === "say" && !text?.includes("Grade: (1-10)") && text?.includes("Grade:"), - )?.text - const gradeMatch = gradeMessage?.match(/Grade: (\d+)/) - const gradeNum = gradeMatch ? parseInt(gradeMatch[1]) : undefined - assert.ok(gradeNum !== undefined && gradeNum >= 7 && gradeNum <= 10, "Grade must be between 7 and 10") - } finally { + + await new Promise((resolve) => setTimeout(resolve, interval)) + } + + if (globalThis.provider.messages.length === 0) { + assert.fail("No messages received") } + + globalThis.provider.messages.forEach(({ type, text }) => { + if (type === "say" && text?.includes("Grade:")) { + console.log(text) + } + }) + + const gradeMessage = globalThis.provider.messages.find( + ({ type, text }) => type === "say" && !text?.includes("Grade: (1-10)") && text?.includes("Grade:"), + )?.text + + const gradeMatch = gradeMessage?.match(/Grade: (\d+)/) + const gradeNum = gradeMatch ? parseInt(gradeMatch[1]) : undefined + assert.ok(gradeNum !== undefined && gradeNum >= 7 && gradeNum <= 10, "Grade must be between 7 and 10") }) }) diff --git a/src/test/suite/task.test.ts b/src/test/suite/task.test.ts index 2d34bc78ff3..6bdedcde002 100644 --- a/src/test/suite/task.test.ts +++ b/src/test/suite/task.test.ts @@ -1,5 +1,4 @@ import * as assert from "assert" -import * as vscode from "vscode" suite("Roo Code Task", () => { test("Should handle prompt and response correctly", async function () { @@ -10,48 +9,43 @@ suite("Roo Code Task", () => { assert.fail("Extension not found") } - try { - // Ensure the webview is launched. - let startTime = Date.now() + // Ensure the webview is launched. + let startTime = Date.now() - while (Date.now() - startTime < timeout) { - if (globalThis.provider.viewLaunched) { - break - } - - await new Promise((resolve) => setTimeout(resolve, interval)) + while (Date.now() - startTime < timeout) { + if (globalThis.provider.viewLaunched) { + break } - await globalThis.provider.updateGlobalState("mode", "Code") - await globalThis.provider.updateGlobalState("alwaysAllowModeSwitch", true) - await globalThis.provider.updateGlobalState("autoApprovalEnabled", true) + await new Promise((resolve) => setTimeout(resolve, interval)) + } - await globalThis.api.startNewTask("Hello world, what is your name? Respond with 'My name is ...'") + await globalThis.provider.updateGlobalState("mode", "Code") + await globalThis.provider.updateGlobalState("alwaysAllowModeSwitch", true) + await globalThis.provider.updateGlobalState("autoApprovalEnabled", true) - // Wait for task to appear in history with tokens. - startTime = Date.now() + await globalThis.api.startNewTask("Hello world, what is your name? Respond with 'My name is ...'") - while (Date.now() - startTime < timeout) { - const messages = globalThis.provider.messages + // Wait for task to appear in history with tokens. + startTime = Date.now() - if (messages.some(({ type, text }) => type === "say" && text?.includes("My name is Roo"))) { - break - } + while (Date.now() - startTime < timeout) { + const messages = globalThis.provider.messages - await new Promise((resolve) => setTimeout(resolve, interval)) + if (messages.some(({ type, text }) => type === "say" && text?.includes("My name is Roo"))) { + break } - if (globalThis.provider.messages.length === 0) { - assert.fail("No messages received") - } + await new Promise((resolve) => setTimeout(resolve, interval)) + } - assert.ok( - globalThis.provider.messages.some( - ({ type, text }) => type === "say" && text?.includes("My name is Roo"), - ), - "Did not receive expected response containing 'My name is Roo'", - ) - } finally { + if (globalThis.provider.messages.length === 0) { + assert.fail("No messages received") } + + assert.ok( + globalThis.provider.messages.some(({ type, text }) => type === "say" && text?.includes("My name is Roo")), + "Did not receive expected response containing 'My name is Roo'", + ) }) }) From b24a3355036f6f6e25bdfe01c26133110ede725e Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 13:48:47 -0800 Subject: [PATCH 016/145] Set maxTokens to 64K for Anthropic / 3.7 Sonnet --- src/shared/api.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/shared/api.ts b/src/shared/api.ts index 056a40c49ef..3a550891f52 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -96,7 +96,7 @@ export type AnthropicModelId = keyof typeof anthropicModels export const anthropicDefaultModelId: AnthropicModelId = "claude-3-7-sonnet-20250219" export const anthropicModels = { "claude-3-7-sonnet-20250219": { - maxTokens: 8192, + maxTokens: 64_000, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, From cb9077f7dc851a9b4102f1c28e054d89acf08e40 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Mon, 24 Feb 2025 15:47:54 -0600 Subject: [PATCH 017/145] README updates --- README.md | 220 ++++++++++++------------------------------------------ 1 file changed, 47 insertions(+), 173 deletions(-) diff --git a/README.md b/README.md index 53f29e07259..60161110e26 100644 --- a/README.md +++ b/README.md @@ -34,204 +34,78 @@ Check out the [CHANGELOG](CHANGELOG.md) for detailed updates and fixes. --- -## New in 3.3: Code Actions, More Powerful Modes, and a new Discord! 🚀 +## New in 3.7: Claude 3.7 Sonnet Support 🚀 -This release brings significant improvements to how you interact with Roo Code: +We're excited to announce support for Anthropic's latest model, Claude 3.7 Sonnet! The model shows notable improvements in: -### Code Actions +- Front-end development and full-stack updates +- Agentic workflows for multi-step processes +- More accurate math, coding, and instruction-following -Roo Code now integrates directly with VS Code's native code actions system, providing quick fixes and refactoring options right in your editor. Look for the lightbulb 💡 to access Roo Code's capabilities without switching context. - -### Enhanced Mode Capabilities - -- **Markdown Editing**: Addressing one of the most requested features, Ask and Architect modes can now create and edit markdown files! -- **Custom File Restrictions**: In general, custom modes can now be restricted to specific file patterns (for example, a technical writer who can only edit markdown files 👋). There's no UI for this yet, but who needs that when you can just ask Roo to set it up for you? -- **Self-Initiated Mode Switching**: Modes can intelligently request to switch between each other based on the task at hand. For instance, Code mode might request to switch to Test Engineer mode once it's ready to write tests. - -### Join Our Discord! - -We've launched a new Discord community! Join us at [https://roocode.com/discord](https://roocode.com/discord) to: - -- Share your custom modes -- Get help and support -- Connect with other Roo Code users -- Stay updated on the latest features - -## New in 3.2: Introducing Custom Modes, plus rebranding from Roo Cline → Roo Code! 🚀 - -### Introducing Roo Code - -Our biggest update yet is here - we're officially changing our name from Roo Cline to Roo Code! After growing beyond 50,000 installations across VS Marketplace and Open VSX, we're ready to chart our own course. Our heartfelt thanks to everyone in the Cline community who helped us reach this milestone. - -### Custom Modes - -To mark this new chapter, we're introducing the power to shape Roo Code into any role you need. You can now create an entire team of agents with deeply customized prompts: - -- QA Engineers who write thorough test cases and catch edge cases -- Product Managers who excel at user stories and feature prioritization -- UI/UX Designers who craft beautiful, accessible interfaces -- Code Reviewers who ensure quality and maintainability - -The best part is that Roo can help you create these new modes! Just type "Create a new mode for " in the chat to get started, and go into the Prompts tab or (carefully) edit the JSON representation to customize the prompt and allowed tools to your liking. - -We can't wait to hear more about what you build and how we can continue to evolve the Roo Code platform to support you. Please join us in our new https://www.reddit.com/r/RooCode subreddit to share your custom modes and be part of our next chapter. 🚀 - -## New in 3.1: Chat Mode Prompt Customization & Prompt Enhancements - -Hot off the heels of **v3.0** introducing Code, Architect, and Ask chat modes, one of the most requested features has arrived: **customizable prompts for each mode**! 🎉 - -You can now tailor the **role definition** and **custom instructions** for every chat mode to perfectly fit your workflow. Want to adjust Architect mode to focus more on system scalability? Or tweak Ask mode for deeper research queries? Done. Plus, you can define these via **mode-specific `.clinerules-[mode]` files**. You’ll find all of this in the new **Prompts** tab in the top menu. - -The second big feature in this release is a complete revamp of **prompt enhancements**. This feature helps you craft messages to get even better results from Cline. Here’s what’s new: - -- Works with **any provider** and API configuration, not just OpenRouter. -- Fully customizable prompts to match your unique needs. -- Same simple workflow: just hit the ✨ **Enhance Prompt** button in the chat input to try it out. - -Whether you’re using GPT-4, other APIs, or switching configurations, this gives you total control over how your prompts are optimized. - -As always, we’d love to hear your thoughts and ideas! What features do you want to see in **v3.2**? Drop by https://www.reddit.com/r/roocline and join the discussion - we're building Roo Cline together. 🚀 - -## New in 3.0 - Chat Modes! - -You can now choose between different prompts for Roo Cline to better suit your workflow. Here’s what’s available: - -- **Code:** (existing behavior) The default mode where Cline helps you write code and execute tasks. - -- **Architect:** "You are Cline, a software architecture expert..." Ideal for thinking through high-level technical design and system architecture. Can’t write code or run commands. - -- **Ask:** "You are Cline, a knowledgeable technical assistant..." Perfect for asking questions about the codebase or digging into concepts. Also can’t write code or run commands. - -**Switching Modes:** -It’s super simple! There’s a dropdown in the bottom left of the chat input to switch modes. Right next to it, you’ll find a way to switch between the API configuration profiles associated with the current mode (configured on the settings screen). - -**Why Add This?** - -- It keeps Cline from being overly eager to jump into solving problems when you just want to think or ask questions. -- Each mode remembers the API configuration you last used with it. For example, you can use more thoughtful models like OpenAI o1 for Architect and Ask, while sticking with Sonnet or DeepSeek for coding tasks. -- It builds on research suggesting better results when separating "thinking" from "coding," explained well in this very thoughtful [article](https://aider.chat/2024/09/26/architect.html) from aider. - -Right now, switching modes is a manual process. In the future, we’d love to give Cline the ability to suggest mode switches based on context. For now, we’d really appreciate your feedback on this feature. +Try it today in your provider of choice! --- -## Key Features - -### Adaptive Autonomy +## What Can Roo Code Do? -Roo Code communicates in **natural language** and proposes actions—file edits, terminal commands, browser tests, etc. You choose how it behaves: +- 🚀 **Generate Code** from natural language descriptions +- 🔧 **Refactor & Debug** existing code +- 📝 **Write & Update** documentation +- 🤔 **Answer Questions** about your codebase +- 🔄 **Automate** repetitive tasks +- 🏗️ **Create** new files and projects -- **Manual Approval**: Review and approve every step to keep total control. -- **Autonomous/Auto-Approve**: Grant Roo Code the ability to run tasks without interruption, speeding up routine workflows. -- **Hybrid**: Auto-approve specific actions (e.g., file writes) but require confirmation for riskier tasks (like deploying code). +## Quick Start -No matter your preference, you always have the final say on what Roo Code does. - ---- +1. [Install Roo Code](https://docs.roocode.com/getting-started/installing) +2. [Connect Your AI Provider](https://docs.roocode.com/getting-started/connecting-api-provider) +3. [Try Your First Task](https://docs.roocode.com/getting-started/your-first-task) -### Supports Any API or Model - -Use Roo Code with: - -- **OpenRouter**, Anthropic, Glama, OpenAI, Google Gemini, AWS Bedrock, Azure, GCP Vertex, or local models (LM Studio/Ollama)—anything **OpenAI-compatible**. -- Different models per mode. For instance, an advanced model for architecture vs. a cheaper model for daily coding tasks. -- **Usage Tracking**: Roo Code monitors token and cost usage for each session. - ---- - -### Custom Modes - -**Custom Modes** let you shape Roo Code’s persona, instructions, and permissions: - -- **Built-in**: - - **Code** – Default, multi-purpose coding assistant - - **Architect** – High-level system and design insights - - **Ask** – Research and Q&A for deeper exploration -- **User-Created**: Type `Create a new mode for ` and Roo Code generates a brand-new persona for that role—complete with tailored prompts and optional tool restrictions. - -Modes can each have unique instructions and skill sets. Manage them in the **Prompts** tab. - -**Advanced Mode Features:** - -- **File Restrictions**: Modes can be restricted to specific file types (e.g., Ask and Architect modes can edit markdown files) -- **Custom File Rules**: Define your own file access patterns (e.g., `.test.ts` for test files only) -- **Direct Mode Switching**: Modes can request to switch to other modes when needed (e.g., switching to Code mode for implementation) -- **Self-Creation**: Roo Code can help create new modes, complete with role definitions and file restrictions - ---- - -### File & Editor Operations - -Roo Code can: - -- **Create and edit** files in your project (showing you diffs). -- **React** to linting or compile-time errors automatically (missing imports, syntax errors, etc.). -- **Track changes** via your editor’s timeline so you can review or revert if needed. - ---- - -### Command Line Integration - -Easily run commands in your terminal—Roo Code: - -- Installs packages, runs builds, or executes tests. -- Monitors output and adapts if it detects errors. -- Lets you keep dev servers running in the background while continuing to work. - -You approve or decline each command, or set auto-approval for routine operations. - ---- - -### Browser Automation - -Roo Code can also open a **browser** session to: - -- Launch your local or remote web app. -- Click, type, scroll, and capture screenshots. -- Collect console logs to debug runtime or UI/UX issues. - -Ideal for **end-to-end testing** or visually verifying changes without constant copy-pasting. - ---- +## Key Features -### Adding Tools with MCP +### Multiple Modes -Extend Roo Code with the **Model Context Protocol (MCP)**: +Roo Code adapts to your needs with specialized [modes](https://docs.roocode.com/basic-usage/modes): -- “Add a tool that manages AWS EC2 resources.” -- “Add a tool that queries the company Jira.” -- “Add a tool that pulls the latest PagerDuty incidents.” +- **Code Mode:** For general-purpose coding tasks +- **Architect Mode:** For planning and technical leadership +- **Ask Mode:** For answering questions and providing information +- **Debug Mode:** For systematic problem diagnosis +- **[Custom Modes](https://docs.roocode.com/advanced-usage/custom-modes):** Create unlimited specialized personas for security auditing, performance optimization, documentation, or any other task -Roo Code can build and configure new tools autonomously (with your approval) to expand its capabilities instantly. +### Smart Tools ---- +Roo Code comes with powerful [tools](https://docs.roocode.com/basic-usage/using-tools) that can: -### Context Mentions +- Read and write files in your project +- Execute commands in your VS Code terminal +- Control a web browser +- Use external tools via [MCP (Model Context Protocol)](https://docs.roocode.com/advanced-usage/mcp) -When you need to provide extra context: +MCP extends Roo Code's capabilities by allowing you to add unlimited custom tools. Integrate with external APIs, connect to databases, or create specialized development tools - MCP provides the framework to expand Roo Code's functionality to meet your specific needs. -- **@file** – Embed a file’s contents in the conversation. -- **@folder** – Include entire folder structures. -- **@problems** – Pull in workspace errors/warnings for Roo Code to fix. -- **@url** – Fetch docs from a URL, converting them to markdown. -- **@git** – Supply a list of Git commits or diffs for Roo Code to analyze code history. +### Customization -Help Roo Code focus on the most relevant details without blowing the token budget. +Make Roo Code work your way with: ---- +- [Custom Instructions](https://docs.roocode.com/advanced-usage/custom-instructions) for personalized behavior +- [Custom Modes](https://docs.roocode.com/advanced-usage/custom-modes) for specialized tasks +- [Local Models](https://docs.roocode.com/advanced-usage/local-models) for offline use +- [Auto-Approval Settings](https://docs.roocode.com/advanced-usage/auto-approving-actions) for faster workflows -## Installation +## Resources -Roo Code is available on: +### Documentation -- **[VSCode Marketplace](https://marketplace.visualstudio.com/items?itemName=RooVeterinaryInc.roo-cline)** -- **[Open-VSX](https://open-vsx.org/extension/RooVeterinaryInc/roo-cline)** +- [Basic Usage Guide](https://docs.roocode.com/basic-usage/the-chat-interface) +- [Advanced Features](https://docs.roocode.com/advanced-usage/auto-approving-actions) +- [Frequently Asked Questions](https://docs.roocode.com/faq) -1. **Search “Roo Code”** in your editor’s Extensions panel to install directly. -2. Or grab the `.vsix` file from Marketplace / Open-VSX and **drag-and-drop** into your editor. -3. **Open** Roo Code from the Activity Bar or Command Palette to start chatting. +### Community -> **Tip**: Use `Cmd/Ctrl + Shift + P` → “Roo Code: Open in New Tab” to dock the AI assistant alongside your file explorer. +- **Discord:** [Join our Discord server](https://discord.gg/roocode) for real-time help and discussions +- **Reddit:** [Visit our subreddit](https://www.reddit.com/r/RooCode) to share experiences and tips +- **GitHub:** Report [issues](https://github.com/RooVetGit/Roo-Code/issues) or request [features](https://github.com/RooVetGit/Roo-Code/discussions/categories/feature-requests?discussions_q=is%3Aopen+category%3A%22Feature+Requests%22+sort%3Atop) --- From 3f68a9c3c6554538c9b715611d143028a44988f0 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Mon, 24 Feb 2025 15:56:29 -0600 Subject: [PATCH 018/145] v3.7.1 --- .changeset/large-ladybugs-behave.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/large-ladybugs-behave.md diff --git a/.changeset/large-ladybugs-behave.md b/.changeset/large-ladybugs-behave.md new file mode 100644 index 00000000000..330255b6a37 --- /dev/null +++ b/.changeset/large-ladybugs-behave.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +v3.7.1 From a890d3d131ee481bef5cdf4406592a92fb42c0c4 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 24 Feb 2025 21:58:10 +0000 Subject: [PATCH 019/145] changeset version bump --- .changeset/large-ladybugs-behave.md | 5 ----- CHANGELOG.md | 6 ++++++ package-lock.json | 4 ++-- package.json | 2 +- 4 files changed, 9 insertions(+), 8 deletions(-) delete mode 100644 .changeset/large-ladybugs-behave.md diff --git a/.changeset/large-ladybugs-behave.md b/.changeset/large-ladybugs-behave.md deleted file mode 100644 index 330255b6a37..00000000000 --- a/.changeset/large-ladybugs-behave.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -v3.7.1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d4546a2e14..0acca15cb46 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Roo Code Changelog +## 3.7.1 + +### Patch Changes + +- v3.7.1 + ## [3.7.0] - Introducing Roo Code 3.7, with support for the new Claude Sonnet 3.7. Because who cares about skipping version numbers anymore? Thanks @lupuletic and @cte for the PRs! diff --git a/package-lock.json b/package-lock.json index 7650f96ea58..b8c65fd8e8d 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "roo-cline", - "version": "3.7.0", + "version": "3.7.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "roo-cline", - "version": "3.7.0", + "version": "3.7.1", "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/sdk": "^0.26.0", diff --git a/package.json b/package.json index 0d9e545090c..76d1273e5c8 100644 --- a/package.json +++ b/package.json @@ -3,7 +3,7 @@ "displayName": "Roo Code (prev. Roo Cline)", "description": "An AI-powered autonomous coding agent that lives in your editor.", "publisher": "RooVeterinaryInc", - "version": "3.7.0", + "version": "3.7.1", "icon": "assets/icons/rocket.png", "galleryBanner": { "color": "#617A91", From d01370370e3c1efcaf9332c9a91d854e9fe595ce Mon Sep 17 00:00:00 2001 From: R00-B0T Date: Mon, 24 Feb 2025 21:58:34 +0000 Subject: [PATCH 020/145] Updating CHANGELOG.md format --- CHANGELOG.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0acca15cb46..7081d366d57 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,6 @@ # Roo Code Changelog -## 3.7.1 - -### Patch Changes +## [3.7.1] - v3.7.1 From 076861f8de6a81d03f19379c0a22bd4358b5cae0 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Mon, 24 Feb 2025 15:59:22 -0600 Subject: [PATCH 021/145] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7081d366d57..83b84e1d413 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ ## [3.7.1] -- v3.7.1 +- Add AWS Bedrock support for Sonnet 3.7 and update some defaults to Sonnet 3.7 instead of 3.5 ## [3.7.0] From de54a55d2245065611c82724a64e9caa70e510d4 Mon Sep 17 00:00:00 2001 From: Roo Code Date: Mon, 24 Feb 2025 15:15:48 -0700 Subject: [PATCH 022/145] Update test cases for capabilities section to match new tool order --- .../__snapshots__/system.test.ts.snap | 82 +++++++++---------- src/core/prompts/__tests__/sections.test.ts | 8 +- 2 files changed, 45 insertions(+), 45 deletions(-) diff --git a/src/core/prompts/__tests__/__snapshots__/system.test.ts.snap b/src/core/prompts/__tests__/__snapshots__/system.test.ts.snap index 2abc6138619..b8050adb77a 100644 --- a/src/core/prompts/__tests__/__snapshots__/system.test.ts.snap +++ b/src/core/prompts/__tests__/__snapshots__/system.test.ts.snap @@ -2543,43 +2543,6 @@ Example: Requesting to list all top level source code definitions in the current . -## write_to_file -Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file. -Parameters: -- path: (required) The path of the file to write to (relative to the current working directory /test/path) -- content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file. -- line_count: (required) The number of lines in the file. Make sure to compute this based on the actual content of the file, not the number of lines in the content you're providing. -Usage: - -File path here - -Your file content here - -total number of lines in the file, including empty lines - - -Example: Requesting to write to frontend-config.json - -frontend-config.json - -{ - "apiEndpoint": "https://api.example.com", - "theme": { - "primaryColor": "#007bff", - "secondaryColor": "#6c757d", - "fontFamily": "Arial, sans-serif" - }, - "features": { - "darkMode": true, - "notifications": true, - "analytics": false - }, - "version": "1.0.0" -} - -14 - - ## apply_diff Description: Request to replace existing code using a search and replace block. This tool allows for precise, surgical replaces to files by specifying exactly what content to search for and what to replace it with. @@ -2640,6 +2603,43 @@ Your search/replace content here 5 +## write_to_file +Description: Request to write full content to a file at the specified path. If the file exists, it will be overwritten with the provided content. If the file doesn't exist, it will be created. This tool will automatically create any directories needed to write the file. +Parameters: +- path: (required) The path of the file to write to (relative to the current working directory /test/path) +- content: (required) The content to write to the file. ALWAYS provide the COMPLETE intended content of the file, without any truncation or omissions. You MUST include ALL parts of the file, even if they haven't been modified. Do NOT include the line numbers in the content though, just the actual content of the file. +- line_count: (required) The number of lines in the file. Make sure to compute this based on the actual content of the file, not the number of lines in the content you're providing. +Usage: + +File path here + +Your file content here + +total number of lines in the file, including empty lines + + +Example: Requesting to write to frontend-config.json + +frontend-config.json + +{ + "apiEndpoint": "https://api.example.com", + "theme": { + "primaryColor": "#007bff", + "secondaryColor": "#6c757d", + "fontFamily": "Arial, sans-serif" + }, + "features": { + "darkMode": true, + "notifications": true, + "analytics": false + }, + "version": "1.0.0" +} + +14 + + ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Commands will be executed in the current working directory: /test/path Parameters: @@ -2758,7 +2758,7 @@ CAPABILITIES - When the user initially gives you a task, a recursive list of all filepaths in the current working directory ('/test/path') will be included in environment_details. This provides an overview of the project's file structure, offering key insights into the project from directory/file names (how developers conceptualize and organize their code) and file extensions (the language used). This can also guide decision-making on which files to explore further. If you need to further explore directories such as outside the current working directory, you can use the list_files tool. If you pass 'true' for the recursive parameter, it will list files recursively. Otherwise, it will list files at the top level, which is better suited for generic directories where you don't necessarily need the nested structure, like the Desktop. - You can use search_files to perform regex searches across files in a specified directory, outputting context-rich results that include surrounding lines. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need refactoring. - You can use the list_code_definition_names tool to get an overview of source code definitions for all files at the top level of a specified directory. This can be particularly useful when you need to understand the broader context and relationships between certain parts of the code. You may need to call this tool multiple times to understand various parts of the codebase related to the task. - - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the write_to_file or apply_diff tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. + - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the apply_diff or write_to_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. - You can use the execute_command tool to run commands on the user's computer whenever you feel it can help accomplish the user's task. When you need to execute a CLI command, you must provide a clear explanation of what the command does. Prefer to execute complex CLI commands over creating executable scripts, since they are more flexible and easier to run. Interactive and long-running commands are allowed, since the commands are run in the user's VSCode terminal. The user may keep commands running in the background and you will be kept updated on their status along the way. Each command you execute is run in a new terminal instance. ==== @@ -2775,11 +2775,11 @@ RULES - You cannot \`cd\` into a different directory to complete a task. You are stuck operating from '/test/path', so be sure to pass in the correct 'path' parameter when using tools that require a path. - Do not use the ~ character or $HOME to refer to the home directory. - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with \`cd\`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run \`npm install\` in a project outside of '/test/path', you would need to prepend with a \`cd\` i.e. pseudocode for this would be \`cd (path to project) && (command, in this case npm install)\`. -- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. +- When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using apply_diff or write_to_file to make informed changes. - When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), apply_diff (for replacing lines in existing files). -- When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. +- For editing files, you have access to these tools: apply_diff (for replacing lines in existing files), write_to_file (for creating new files or complete file rewrites). - You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files. +- When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. - Be sure to consider the type of project (e.g. Python, JavaScript, web application) when determining the appropriate structure and files to include. Also consider what files may be most relevant to accomplishing the task, for example looking at a project's manifest file would help you understand the project's dependencies, which you could incorporate into any code you write. * For example, in architect mode trying to edit app.js would be rejected because architect mode can only edit files matching "\\.md$" diff --git a/src/core/prompts/__tests__/sections.test.ts b/src/core/prompts/__tests__/sections.test.ts index 2100016e467..fe92e63f92e 100644 --- a/src/core/prompts/__tests__/sections.test.ts +++ b/src/core/prompts/__tests__/sections.test.ts @@ -42,15 +42,15 @@ describe("getCapabilitiesSection", () => { test("includes apply_diff in capabilities when diffStrategy is provided", () => { const result = getCapabilitiesSection(cwd, false, mcpHub, mockDiffStrategy) - expect(result).toContain("or apply_diff") - expect(result).toContain("then use the write_to_file or apply_diff tool") + expect(result).toContain("apply_diff or") + expect(result).toContain("then use the apply_diff or write_to_file tool") }) test("excludes apply_diff from capabilities when diffStrategy is undefined", () => { const result = getCapabilitiesSection(cwd, false, mcpHub, undefined) - expect(result).not.toContain("or apply_diff") + expect(result).not.toContain("apply_diff or") expect(result).toContain("then use the write_to_file tool") - expect(result).not.toContain("write_to_file or apply_diff") + expect(result).not.toContain("apply_diff or write_to_file") }) }) From 1d67f88aba2ccd1ab77343202c57799353a6f082 Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 15:09:46 -0800 Subject: [PATCH 023/145] Sliding window fix --- src/core/sliding-window/index.ts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/core/sliding-window/index.ts b/src/core/sliding-window/index.ts index ee4a1543e77..c84f308387c 100644 --- a/src/core/sliding-window/index.ts +++ b/src/core/sliding-window/index.ts @@ -55,13 +55,15 @@ export function truncateConversationIfNeeded( /** * Calculates the maximum allowed tokens for models that support prompt caching. * - * The maximum is computed as the greater of (contextWindow - 40000) and 80% of the contextWindow. + * The maximum is computed as the greater of (contextWindow - buffer) and 80% of the contextWindow. * * @param {ModelInfo} modelInfo - The model information containing the context window size. * @returns {number} The maximum number of tokens allowed for prompt caching models. */ function getMaxTokensForPromptCachingModels(modelInfo: ModelInfo): number { - return Math.max(modelInfo.contextWindow - 40_000, modelInfo.contextWindow * 0.8) + // The buffer needs to be at least as large as `modelInfo.maxTokens`. + const buffer = modelInfo.maxTokens ? Math.max(40_000, modelInfo.maxTokens) : 40_000 + return Math.max(modelInfo.contextWindow - buffer, modelInfo.contextWindow * 0.8) } /** From c9713ea40a64b761d91e610fdf37212c3ef3211a Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 15:10:53 -0800 Subject: [PATCH 024/145] Fix labels for anthropic/claude-3.7-sonnet:beta --- src/core/webview/ClineProvider.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index b4819d96833..12beecb9765 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -2126,6 +2126,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { switch (rawModel.id) { case "anthropic/claude-3.7-sonnet": + case "anthropic/claude-3.7-sonnet:beta": case "anthropic/claude-3.5-sonnet": case "anthropic/claude-3.5-sonnet:beta": // NOTE: this needs to be synced with api.ts/openrouter default model info. From 6245aa7629abe365b50d4550287b1d79853b4b5d Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 15:40:31 -0800 Subject: [PATCH 025/145] Add tests --- .../__tests__/sliding-window.test.ts | 130 ++++++++++++++++++ src/core/sliding-window/index.ts | 5 +- 2 files changed, 134 insertions(+), 1 deletion(-) create mode 100644 src/core/sliding-window/__tests__/sliding-window.test.ts diff --git a/src/core/sliding-window/__tests__/sliding-window.test.ts b/src/core/sliding-window/__tests__/sliding-window.test.ts new file mode 100644 index 00000000000..182dea67f5d --- /dev/null +++ b/src/core/sliding-window/__tests__/sliding-window.test.ts @@ -0,0 +1,130 @@ +// npx jest src/core/sliding-window/__tests__/sliding-window.test.ts + +import { Anthropic } from "@anthropic-ai/sdk" + +import { ModelInfo } from "../../../shared/api" +import { truncateConversation, truncateConversationIfNeeded } from "../index" + +describe("truncateConversation", () => { + it("should retain the first message", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { role: "user", content: "First message" }, + { role: "assistant", content: "Second message" }, + { role: "user", content: "Third message" }, + ] + + const result = truncateConversation(messages, 0.5) + + // With 2 messages after the first, 0.5 fraction means remove 1 message + // But 1 is odd, so it rounds down to 0 (to make it even) + expect(result.length).toBe(3) // First message + 2 remaining messages + expect(result[0]).toEqual(messages[0]) + expect(result[1]).toEqual(messages[1]) + expect(result[2]).toEqual(messages[2]) + }) + + it("should remove the specified fraction of messages (rounded to even number)", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { role: "user", content: "First message" }, + { role: "assistant", content: "Second message" }, + { role: "user", content: "Third message" }, + { role: "assistant", content: "Fourth message" }, + { role: "user", content: "Fifth message" }, + ] + + // 4 messages excluding first, 0.5 fraction = 2 messages to remove + // 2 is already even, so no rounding needed + const result = truncateConversation(messages, 0.5) + + expect(result.length).toBe(3) + expect(result[0]).toEqual(messages[0]) + expect(result[1]).toEqual(messages[3]) + expect(result[2]).toEqual(messages[4]) + }) + + it("should round to an even number of messages to remove", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { role: "user", content: "First message" }, + { role: "assistant", content: "Second message" }, + { role: "user", content: "Third message" }, + { role: "assistant", content: "Fourth message" }, + { role: "user", content: "Fifth message" }, + { role: "assistant", content: "Sixth message" }, + { role: "user", content: "Seventh message" }, + ] + + // 6 messages excluding first, 0.3 fraction = 1.8 messages to remove + // 1.8 rounds down to 1, then to 0 to make it even + const result = truncateConversation(messages, 0.3) + + expect(result.length).toBe(7) // No messages removed + expect(result).toEqual(messages) + }) + + it("should handle edge case with fracToRemove = 0", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { role: "user", content: "First message" }, + { role: "assistant", content: "Second message" }, + { role: "user", content: "Third message" }, + ] + + const result = truncateConversation(messages, 0) + + expect(result).toEqual(messages) + }) + + it("should handle edge case with fracToRemove = 1", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { role: "user", content: "First message" }, + { role: "assistant", content: "Second message" }, + { role: "user", content: "Third message" }, + { role: "assistant", content: "Fourth message" }, + ] + + // 3 messages excluding first, 1.0 fraction = 3 messages to remove + // But 3 is odd, so it rounds down to 2 to make it even + const result = truncateConversation(messages, 1) + + expect(result.length).toBe(2) + expect(result[0]).toEqual(messages[0]) + expect(result[1]).toEqual(messages[3]) + }) +}) + +describe("truncateConversationIfNeeded", () => { + const createModelInfo = (contextWindow: number, supportsPromptCache: boolean, maxTokens?: number): ModelInfo => ({ + contextWindow, + supportsPromptCache, + maxTokens, + }) + + const messages: Anthropic.Messages.MessageParam[] = [ + { role: "user", content: "First message" }, + { role: "assistant", content: "Second message" }, + { role: "user", content: "Third message" }, + { role: "assistant", content: "Fourth message" }, + { role: "user", content: "Fifth message" }, + ] + + it("should not truncate if tokens are below threshold for prompt caching models", () => { + const modelInfo = createModelInfo(200000, true, 50000) + const totalTokens = 100000 // Below threshold + const result = truncateConversationIfNeeded(messages, totalTokens, modelInfo) + expect(result).toEqual(messages) + }) + + it("should not truncate if tokens are below threshold for non-prompt caching models", () => { + const modelInfo = createModelInfo(200000, false) + const totalTokens = 100000 // Below threshold + const result = truncateConversationIfNeeded(messages, totalTokens, modelInfo) + expect(result).toEqual(messages) + }) + + it("should use 80% of context window as threshold if it's greater than (contextWindow - buffer)", () => { + const modelInfo = createModelInfo(50000, true) // Small context window + const totalTokens = 40001 // Above 80% threshold (40000) + const mockResult = [messages[0], messages[3], messages[4]] + const result = truncateConversationIfNeeded(messages, totalTokens, modelInfo) + expect(result).toEqual(mockResult) + }) +}) diff --git a/src/core/sliding-window/index.ts b/src/core/sliding-window/index.ts index c84f308387c..d213f069f18 100644 --- a/src/core/sliding-window/index.ts +++ b/src/core/sliding-window/index.ts @@ -1,4 +1,5 @@ import { Anthropic } from "@anthropic-ai/sdk" + import { ModelInfo } from "../../shared/api" /** @@ -85,7 +86,9 @@ function getTruncFractionForPromptCachingModels(modelInfo: ModelInfo): number { * @returns {number} The maximum number of tokens allowed for non-prompt caching models. */ function getMaxTokensForNonPromptCachingModels(modelInfo: ModelInfo): number { - return Math.max(modelInfo.contextWindow - 40_000, modelInfo.contextWindow * 0.8) + // The buffer needs to be at least as large as `modelInfo.maxTokens`. + const buffer = modelInfo.maxTokens ? Math.max(40_000, modelInfo.maxTokens) : 40_000 + return Math.max(modelInfo.contextWindow - buffer, modelInfo.contextWindow * 0.8) } /** From aa98734cba4e8000ef54d32dd49803acc3d2af3f Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Mon, 24 Feb 2025 17:44:45 -0600 Subject: [PATCH 026/145] v3.7.2 --- .changeset/fifty-files-jog.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/fifty-files-jog.md diff --git a/.changeset/fifty-files-jog.md b/.changeset/fifty-files-jog.md new file mode 100644 index 00000000000..6a166a5b611 --- /dev/null +++ b/.changeset/fifty-files-jog.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +v3.7.2 From b2b135c05c47b46cb223c1e448741a67a262a04e Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 15:45:26 -0800 Subject: [PATCH 027/145] [WIP] Claude 3.7 Sonnet (Thinking) --- package-lock.json | 9 ++--- package.json | 2 +- src/api/providers/anthropic.ts | 65 +++++++++++++++++++++------------- src/shared/api.ts | 1 + 4 files changed, 47 insertions(+), 30 deletions(-) diff --git a/package-lock.json b/package-lock.json index b8c65fd8e8d..52b1e0bcb46 100644 --- a/package-lock.json +++ b/package-lock.json @@ -9,7 +9,7 @@ "version": "3.7.1", "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", - "@anthropic-ai/sdk": "^0.26.0", + "@anthropic-ai/sdk": "^0.37.0", "@anthropic-ai/vertex-sdk": "^0.4.1", "@aws-sdk/client-bedrock-runtime": "^3.706.0", "@google/generative-ai": "^0.18.0", @@ -122,9 +122,10 @@ } }, "node_modules/@anthropic-ai/sdk": { - "version": "0.26.1", - "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.26.1.tgz", - "integrity": "sha512-HeMJP1bDFfQPQS3XTJAmfXkFBdZ88wvfkE05+vsoA9zGn5dHqEaHOPsqkazf/i0gXYg2XlLxxZrf6rUAarSqzw==", + "version": "0.37.0", + "resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.37.0.tgz", + "integrity": "sha512-tHjX2YbkUBwEgg0JZU3EFSSAQPoK4qQR/NFYa8Vtzd5UAyXzZksCw2In69Rml4R/TyHPBfRYaLK35XiOe33pjw==", + "license": "MIT", "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", diff --git a/package.json b/package.json index 76d1273e5c8..ced42da9f0b 100644 --- a/package.json +++ b/package.json @@ -304,7 +304,7 @@ }, "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", - "@anthropic-ai/sdk": "^0.26.0", + "@anthropic-ai/sdk": "^0.37.0", "@anthropic-ai/vertex-sdk": "^0.4.1", "@aws-sdk/client-bedrock-runtime": "^3.706.0", "@google/generative-ai": "^0.18.0", diff --git a/src/api/providers/anthropic.ts b/src/api/providers/anthropic.ts index 4c62238f461..4bde475e80d 100644 --- a/src/api/providers/anthropic.ts +++ b/src/api/providers/anthropic.ts @@ -1,5 +1,7 @@ import { Anthropic } from "@anthropic-ai/sdk" import { Stream as AnthropicStream } from "@anthropic-ai/sdk/streaming" +import { CacheControlEphemeral } from "@anthropic-ai/sdk/resources" +import { BetaThinkingConfigParam } from "@anthropic-ai/sdk/resources/beta" import { anthropicDefaultModelId, AnthropicModelId, @@ -12,12 +14,15 @@ import { ApiStream } from "../transform/stream" const ANTHROPIC_DEFAULT_TEMPERATURE = 0 +const THINKING_MODELS = ["claude-3-7-sonnet-20250219"] + export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { private options: ApiHandlerOptions private client: Anthropic constructor(options: ApiHandlerOptions) { this.options = options + this.client = new Anthropic({ apiKey: this.options.apiKey, baseURL: this.options.anthropicBaseUrl || undefined, @@ -25,26 +30,36 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { } async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { - let stream: AnthropicStream + let stream: AnthropicStream + const cacheControl: CacheControlEphemeral = { type: "ephemeral" } const modelId = this.getModel().id + let thinking: BetaThinkingConfigParam | undefined = undefined + + if (THINKING_MODELS.includes(modelId)) { + thinking = this.options.anthropicThinking + ? { type: "enabled", budget_tokens: this.options.anthropicThinking } + : { type: "disabled" } + } switch (modelId) { - // 'latest' alias does not support cache_control case "claude-3-7-sonnet-20250219": case "claude-3-5-sonnet-20241022": case "claude-3-5-haiku-20241022": case "claude-3-opus-20240229": case "claude-3-haiku-20240307": { - /* - The latest message will be the new user message, one before will be the assistant message from a previous request, and the user message before that will be a previously cached user message. So we need to mark the latest user message as ephemeral to cache it for the next request, and mark the second to last user message as ephemeral to let the server know the last message to retrieve from the cache for the current request.. - */ + /** + * The latest message will be the new user message, one before will + * be the assistant message from a previous request, and the user message before that will be a previously cached user message. So we need to mark the latest user message as ephemeral to cache it for the next request, and mark the second to last user message as ephemeral to let the server know the last message to retrieve from the cache for the current request.. + */ const userMsgIndices = messages.reduce( (acc, msg, index) => (msg.role === "user" ? [...acc, index] : acc), [] as number[], ) + const lastUserMsgIndex = userMsgIndices[userMsgIndices.length - 1] ?? -1 const secondLastMsgUserIndex = userMsgIndices[userMsgIndices.length - 2] ?? -1 - stream = await this.client.beta.promptCaching.messages.create( + + stream = await this.client.messages.create( { model: modelId, max_tokens: this.getModel().info.maxTokens || 8192, @@ -60,12 +75,12 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { { type: "text", text: message.content, - cache_control: { type: "ephemeral" }, + cache_control: cacheControl, }, ] : message.content.map((content, contentIndex) => contentIndex === message.content.length - 1 - ? { ...content, cache_control: { type: "ephemeral" } } + ? { ...content, cache_control: cacheControl } : content, ), } @@ -76,6 +91,7 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { // tool_choice: { type: "auto" }, // tools: tools, stream: true, + thinking, }, (() => { // prompt caching: https://x.com/alexalbert__/status/1823751995901272068 @@ -114,8 +130,9 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { for await (const chunk of stream) { switch (chunk.type) { case "message_start": - // tells us cache reads/writes/input/output + // Tells us cache reads/writes/input/output. const usage = chunk.message.usage + yield { type: "usage", inputTokens: usage.input_tokens || 0, @@ -123,43 +140,41 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { cacheWriteTokens: usage.cache_creation_input_tokens || undefined, cacheReadTokens: usage.cache_read_input_tokens || undefined, } + break case "message_delta": - // tells us stop_reason, stop_sequence, and output tokens along the way and at the end of the message - + // Tells us stop_reason, stop_sequence, and output tokens + // along the way and at the end of the message. yield { type: "usage", inputTokens: 0, outputTokens: chunk.usage.output_tokens || 0, } + break case "message_stop": - // no usage data, just an indicator that the message is done + // No usage data, just an indicator that the message is done. break case "content_block_start": switch (chunk.content_block.type) { + case "thinking": + yield { type: "reasoning", text: chunk.content_block.thinking } + break case "text": - // we may receive multiple text blocks, in which case just insert a line break between them + // We may receive multiple text blocks, in which + // case just insert a line break between them. if (chunk.index > 0) { - yield { - type: "text", - text: "\n", - } - } - yield { - type: "text", - text: chunk.content_block.text, + yield { type: "text", text: "\n" } } + + yield { type: "text", text: chunk.content_block.text } break } break case "content_block_delta": switch (chunk.delta.type) { case "text_delta": - yield { - type: "text", - text: chunk.delta.text, - } + yield { type: "text", text: chunk.delta.text } break } break diff --git a/src/shared/api.ts b/src/shared/api.ts index 9c91f8b76dd..cea760c7760 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -21,6 +21,7 @@ export interface ApiHandlerOptions { apiModelId?: string apiKey?: string // anthropic anthropicBaseUrl?: string + anthropicThinking?: number vsCodeLmModelSelector?: vscode.LanguageModelChatSelector glamaModelId?: string glamaModelInfo?: ModelInfo From 972b5424ec9d3bd5ead7557cae81e1a54eaa7e18 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 24 Feb 2025 23:53:22 +0000 Subject: [PATCH 028/145] changeset version bump --- .changeset/fifty-files-jog.md | 5 ----- CHANGELOG.md | 6 ++++++ package-lock.json | 4 ++-- package.json | 2 +- 4 files changed, 9 insertions(+), 8 deletions(-) delete mode 100644 .changeset/fifty-files-jog.md diff --git a/.changeset/fifty-files-jog.md b/.changeset/fifty-files-jog.md deleted file mode 100644 index 6a166a5b611..00000000000 --- a/.changeset/fifty-files-jog.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -v3.7.2 diff --git a/CHANGELOG.md b/CHANGELOG.md index 83b84e1d413..2c688be8cb8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Roo Code Changelog +## 3.7.2 + +### Patch Changes + +- v3.7.2 + ## [3.7.1] - Add AWS Bedrock support for Sonnet 3.7 and update some defaults to Sonnet 3.7 instead of 3.5 diff --git a/package-lock.json b/package-lock.json index b8c65fd8e8d..f2c9c75aad4 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "roo-cline", - "version": "3.7.1", + "version": "3.7.2", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "roo-cline", - "version": "3.7.1", + "version": "3.7.2", "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/sdk": "^0.26.0", diff --git a/package.json b/package.json index 76d1273e5c8..5b4eef2813a 100644 --- a/package.json +++ b/package.json @@ -3,7 +3,7 @@ "displayName": "Roo Code (prev. Roo Cline)", "description": "An AI-powered autonomous coding agent that lives in your editor.", "publisher": "RooVeterinaryInc", - "version": "3.7.1", + "version": "3.7.2", "icon": "assets/icons/rocket.png", "galleryBanner": { "color": "#617A91", From 6a4e4ec8b2aefc07f428236b34b2a7e59103fb51 Mon Sep 17 00:00:00 2001 From: R00-B0T Date: Mon, 24 Feb 2025 23:53:47 +0000 Subject: [PATCH 029/145] Updating CHANGELOG.md format --- CHANGELOG.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c688be8cb8..372c15181e7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,6 @@ # Roo Code Changelog -## 3.7.2 - -### Patch Changes +## [3.7.2] - v3.7.2 From 22391e5011c3dfbcfaa4dc1d5620e63fa9b2bdd3 Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 15:55:04 -0800 Subject: [PATCH 030/145] Fix type errors --- src/api/transform/bedrock-converse-format.ts | 4 +++- src/api/transform/gemini-format.ts | 15 ++++----------- src/api/transform/openai-format.ts | 3 +++ src/api/transform/simple-format.ts | 11 +---------- src/api/transform/vscode-lm-format.ts | 3 +++ src/core/Cline.ts | 4 +--- src/integrations/misc/export-markdown.ts | 9 +-------- 7 files changed, 16 insertions(+), 33 deletions(-) diff --git a/src/api/transform/bedrock-converse-format.ts b/src/api/transform/bedrock-converse-format.ts index 07529db1bc0..e4dc9eecc85 100644 --- a/src/api/transform/bedrock-converse-format.ts +++ b/src/api/transform/bedrock-converse-format.ts @@ -193,6 +193,8 @@ export function convertToAnthropicMessage( usage: { input_tokens: streamEvent.metadata.usage.inputTokens || 0, output_tokens: streamEvent.metadata.usage.outputTokens || 0, + cache_creation_input_tokens: null, + cache_read_input_tokens: null, }, } } @@ -203,7 +205,7 @@ export function convertToAnthropicMessage( return { type: "message", role: "assistant", - content: [{ type: "text", text: text }], + content: [{ type: "text", text: text, citations: null }], model: modelId, } } diff --git a/src/api/transform/gemini-format.ts b/src/api/transform/gemini-format.ts index 935e47147aa..3a149dc149b 100644 --- a/src/api/transform/gemini-format.ts +++ b/src/api/transform/gemini-format.ts @@ -11,16 +11,7 @@ import { TextPart, } from "@google/generative-ai" -export function convertAnthropicContentToGemini( - content: - | string - | Array< - | Anthropic.Messages.TextBlockParam - | Anthropic.Messages.ImageBlockParam - | Anthropic.Messages.ToolUseBlockParam - | Anthropic.Messages.ToolResultBlockParam - >, -): Part[] { +export function convertAnthropicContentToGemini(content: Anthropic.Messages.MessageParam["content"]): Part[] { if (typeof content === "string") { return [{ text: content } as TextPart] } @@ -140,7 +131,7 @@ export function convertGeminiResponseToAnthropic( // Add the main text response const text = response.text() if (text) { - content.push({ type: "text", text }) + content.push({ type: "text", text, citations: null }) } // Add function calls as tool_use blocks @@ -190,6 +181,8 @@ export function convertGeminiResponseToAnthropic( usage: { input_tokens: response.usageMetadata?.promptTokenCount ?? 0, output_tokens: response.usageMetadata?.candidatesTokenCount ?? 0, + cache_creation_input_tokens: null, + cache_read_input_tokens: null, }, } } diff --git a/src/api/transform/openai-format.ts b/src/api/transform/openai-format.ts index fe23b9b2ff4..f421769054f 100644 --- a/src/api/transform/openai-format.ts +++ b/src/api/transform/openai-format.ts @@ -158,6 +158,7 @@ export function convertToAnthropicMessage( { type: "text", text: openAiMessage.content || "", + citations: null, }, ], model: completion.model, @@ -178,6 +179,8 @@ export function convertToAnthropicMessage( usage: { input_tokens: completion.usage?.prompt_tokens || 0, output_tokens: completion.usage?.completion_tokens || 0, + cache_creation_input_tokens: null, + cache_read_input_tokens: null, }, } diff --git a/src/api/transform/simple-format.ts b/src/api/transform/simple-format.ts index c1e4895bba9..39049f76c27 100644 --- a/src/api/transform/simple-format.ts +++ b/src/api/transform/simple-format.ts @@ -3,16 +3,7 @@ import { Anthropic } from "@anthropic-ai/sdk" /** * Convert complex content blocks to simple string content */ -export function convertToSimpleContent( - content: - | string - | Array< - | Anthropic.Messages.TextBlockParam - | Anthropic.Messages.ImageBlockParam - | Anthropic.Messages.ToolUseBlockParam - | Anthropic.Messages.ToolResultBlockParam - >, -): string { +export function convertToSimpleContent(content: Anthropic.Messages.MessageParam["content"]): string { if (typeof content === "string") { return content } diff --git a/src/api/transform/vscode-lm-format.ts b/src/api/transform/vscode-lm-format.ts index 6d7bea92bad..f258dd17328 100644 --- a/src/api/transform/vscode-lm-format.ts +++ b/src/api/transform/vscode-lm-format.ts @@ -175,6 +175,7 @@ export async function convertToAnthropicMessage( return { type: "text", text: part.value, + citations: null, } } @@ -195,6 +196,8 @@ export async function convertToAnthropicMessage( usage: { input_tokens: 0, output_tokens: 0, + cache_creation_input_tokens: null, + cache_read_input_tokens: null, }, } } diff --git a/src/core/Cline.ts b/src/core/Cline.ts index f1f5e41b331..549cec5eea9 100644 --- a/src/core/Cline.ts +++ b/src/core/Cline.ts @@ -69,9 +69,7 @@ const cwd = vscode.workspace.workspaceFolders?.map((folder) => folder.uri.fsPath).at(0) ?? path.join(os.homedir(), "Desktop") // may or may not exist but fs checking existence would immediately ask for permission which would be bad UX, need to come up with a better solution type ToolResponse = string | Array -type UserContent = Array< - Anthropic.TextBlockParam | Anthropic.ImageBlockParam | Anthropic.ToolUseBlockParam | Anthropic.ToolResultBlockParam -> +type UserContent = Array export type ClineOptions = { provider: ClineProvider diff --git a/src/integrations/misc/export-markdown.ts b/src/integrations/misc/export-markdown.ts index 2aa9d7b6edc..05b31671d85 100644 --- a/src/integrations/misc/export-markdown.ts +++ b/src/integrations/misc/export-markdown.ts @@ -41,14 +41,7 @@ export async function downloadTask(dateTs: number, conversationHistory: Anthropi } } -export function formatContentBlockToMarkdown( - block: - | Anthropic.TextBlockParam - | Anthropic.ImageBlockParam - | Anthropic.ToolUseBlockParam - | Anthropic.ToolResultBlockParam, - // messages: Anthropic.MessageParam[] -): string { +export function formatContentBlockToMarkdown(block: Anthropic.Messages.ContentBlockParam): string { switch (block.type) { case "text": return block.text From f69e9c16b3252d7637c8ee0e26a3ceb9308dea13 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Mon, 24 Feb 2025 17:58:48 -0600 Subject: [PATCH 031/145] Update CHANGELOG.md --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 372c15181e7..b30e77534e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,9 @@ ## [3.7.2] -- v3.7.2 +- Fix computer use and prompt caching for OpenRouter's `anthropic:claude-3.7-sonnet:beta` (thanks @cte!) +- Fix sliding window calculations for Sonnet 3.7 that were causing a context window overflow (thanks @cte!) +- Encourage diff editing more strongly in the system prompt (thanks @hannesrudolph!) ## [3.7.1] From 83a909713657b65f0e203ce552dd2bdf006f2e4f Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Mon, 24 Feb 2025 18:00:03 -0600 Subject: [PATCH 032/145] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b30e77534e3..381e0907eb1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ ## [3.7.2] -- Fix computer use and prompt caching for OpenRouter's `anthropic:claude-3.7-sonnet:beta` (thanks @cte!) +- Fix computer use and prompt caching for OpenRouter's `anthropic/claude-3.7-sonnet:beta` (thanks @cte!) - Fix sliding window calculations for Sonnet 3.7 that were causing a context window overflow (thanks @cte!) - Encourage diff editing more strongly in the system prompt (thanks @hannesrudolph!) From 543b68f785f98f62976655d5f4510bccfd0ffb42 Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 16:09:31 -0800 Subject: [PATCH 033/145] Fix tests --- src/api/providers/__tests__/anthropic.test.ts | 76 +++++-------------- .../__tests__/bedrock-converse-format.test.ts | 8 +- .../transform/__tests__/openai-format.test.ts | 6 ++ .../__tests__/vscode-lm-format.test.ts | 3 + 4 files changed, 33 insertions(+), 60 deletions(-) diff --git a/src/api/providers/__tests__/anthropic.test.ts b/src/api/providers/__tests__/anthropic.test.ts index df0050ab9cd..ff7bdb40549 100644 --- a/src/api/providers/__tests__/anthropic.test.ts +++ b/src/api/providers/__tests__/anthropic.test.ts @@ -1,50 +1,13 @@ +// npx jest src/api/providers/__tests__/anthropic.test.ts + import { AnthropicHandler } from "../anthropic" import { ApiHandlerOptions } from "../../../shared/api" -import { ApiStream } from "../../transform/stream" -import { Anthropic } from "@anthropic-ai/sdk" -// Mock Anthropic client -const mockBetaCreate = jest.fn() const mockCreate = jest.fn() + jest.mock("@anthropic-ai/sdk", () => { return { Anthropic: jest.fn().mockImplementation(() => ({ - beta: { - promptCaching: { - messages: { - create: mockBetaCreate.mockImplementation(async () => ({ - async *[Symbol.asyncIterator]() { - yield { - type: "message_start", - message: { - usage: { - input_tokens: 100, - output_tokens: 50, - cache_creation_input_tokens: 20, - cache_read_input_tokens: 10, - }, - }, - } - yield { - type: "content_block_start", - index: 0, - content_block: { - type: "text", - text: "Hello", - }, - } - yield { - type: "content_block_delta", - delta: { - type: "text_delta", - text: " world", - }, - } - }, - })), - }, - }, - }, messages: { create: mockCreate.mockImplementation(async (options) => { if (!options.stream) { @@ -65,16 +28,26 @@ jest.mock("@anthropic-ai/sdk", () => { type: "message_start", message: { usage: { - input_tokens: 10, - output_tokens: 5, + input_tokens: 100, + output_tokens: 50, + cache_creation_input_tokens: 20, + cache_read_input_tokens: 10, }, }, } yield { type: "content_block_start", + index: 0, content_block: { type: "text", - text: "Test response", + text: "Hello", + }, + } + yield { + type: "content_block_delta", + delta: { + type: "text_delta", + text: " world", }, } }, @@ -95,7 +68,6 @@ describe("AnthropicHandler", () => { apiModelId: "claude-3-5-sonnet-20241022", } handler = new AnthropicHandler(mockOptions) - mockBetaCreate.mockClear() mockCreate.mockClear() }) @@ -126,17 +98,6 @@ describe("AnthropicHandler", () => { describe("createMessage", () => { const systemPrompt = "You are a helpful assistant." - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: [ - { - type: "text" as const, - text: "Hello!", - }, - ], - }, - ] it("should handle prompt caching for supported models", async () => { const stream = handler.createMessage(systemPrompt, [ @@ -173,9 +134,8 @@ describe("AnthropicHandler", () => { expect(textChunks[0].text).toBe("Hello") expect(textChunks[1].text).toBe(" world") - // Verify beta API was used - expect(mockBetaCreate).toHaveBeenCalled() - expect(mockCreate).not.toHaveBeenCalled() + // Verify API + expect(mockCreate).toHaveBeenCalled() }) }) diff --git a/src/api/transform/__tests__/bedrock-converse-format.test.ts b/src/api/transform/__tests__/bedrock-converse-format.test.ts index c46eb94a2e0..fdd29c75bf2 100644 --- a/src/api/transform/__tests__/bedrock-converse-format.test.ts +++ b/src/api/transform/__tests__/bedrock-converse-format.test.ts @@ -1,3 +1,5 @@ +// npx jest src/api/transform/__tests__/bedrock-converse-format.test.ts + import { convertToBedrockConverseMessages, convertToAnthropicMessage } from "../bedrock-converse-format" import { Anthropic } from "@anthropic-ai/sdk" import { ContentBlock, ToolResultContentBlock } from "@aws-sdk/client-bedrock-runtime" @@ -187,6 +189,8 @@ describe("bedrock-converse-format", () => { usage: { input_tokens: 10, output_tokens: 20, + cache_creation_input_tokens: null, + cache_read_input_tokens: null, }, }) }) @@ -205,7 +209,7 @@ describe("bedrock-converse-format", () => { expect(result).toEqual({ type: "message", role: "assistant", - content: [{ type: "text", text: "Hello" }], + content: [{ type: "text", text: "Hello", citations: null }], model: "test-model", }) }) @@ -224,7 +228,7 @@ describe("bedrock-converse-format", () => { expect(result).toEqual({ type: "message", role: "assistant", - content: [{ type: "text", text: " world" }], + content: [{ type: "text", text: " world", citations: null }], model: "test-model", }) }) diff --git a/src/api/transform/__tests__/openai-format.test.ts b/src/api/transform/__tests__/openai-format.test.ts index f37d369d701..812208acd1e 100644 --- a/src/api/transform/__tests__/openai-format.test.ts +++ b/src/api/transform/__tests__/openai-format.test.ts @@ -1,3 +1,5 @@ +// npx jest src/api/transform/__tests__/openai-format.test.ts + import { convertToOpenAiMessages, convertToAnthropicMessage } from "../openai-format" import { Anthropic } from "@anthropic-ai/sdk" import OpenAI from "openai" @@ -172,11 +174,14 @@ describe("OpenAI Format Transformations", () => { expect(anthropicMessage.content[0]).toEqual({ type: "text", text: "Hello there!", + citations: null, }) expect(anthropicMessage.stop_reason).toBe("end_turn") expect(anthropicMessage.usage).toEqual({ input_tokens: 10, output_tokens: 5, + cache_creation_input_tokens: null, + cache_read_input_tokens: null, }) }) @@ -221,6 +226,7 @@ describe("OpenAI Format Transformations", () => { expect(anthropicMessage.content[0]).toEqual({ type: "text", text: "Let me check the weather.", + citations: null, }) expect(anthropicMessage.content[1]).toEqual({ type: "tool_use", diff --git a/src/api/transform/__tests__/vscode-lm-format.test.ts b/src/api/transform/__tests__/vscode-lm-format.test.ts index b27097fd17e..eb800e2b7a8 100644 --- a/src/api/transform/__tests__/vscode-lm-format.test.ts +++ b/src/api/transform/__tests__/vscode-lm-format.test.ts @@ -1,3 +1,5 @@ +// npx jest src/api/transform/__tests__/vscode-lm-format.test.ts + import { Anthropic } from "@anthropic-ai/sdk" import * as vscode from "vscode" import { convertToVsCodeLmMessages, convertToAnthropicRole, convertToAnthropicMessage } from "../vscode-lm-format" @@ -216,6 +218,7 @@ describe("vscode-lm-format", () => { expect(result.content[0]).toEqual({ type: "text", text: "Hello", + citations: null, }) expect(result.id).toBe("test-uuid") }) From bf1aa4c7b266325d7dee86dd290dbff39f32f608 Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 16:47:27 -0800 Subject: [PATCH 034/145] Add settings --- src/api/providers/anthropic.ts | 37 +++++++++++++------ .../src/components/settings/ApiOptions.tsx | 31 +++++++++++++++- 2 files changed, 55 insertions(+), 13 deletions(-) diff --git a/src/api/providers/anthropic.ts b/src/api/providers/anthropic.ts index 4bde475e80d..0d4b67b0678 100644 --- a/src/api/providers/anthropic.ts +++ b/src/api/providers/anthropic.ts @@ -33,12 +33,16 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { let stream: AnthropicStream const cacheControl: CacheControlEphemeral = { type: "ephemeral" } const modelId = this.getModel().id + const maxTokens = this.getModel().info.maxTokens || 8192 + let temperature = this.options.modelTemperature ?? ANTHROPIC_DEFAULT_TEMPERATURE let thinking: BetaThinkingConfigParam | undefined = undefined if (THINKING_MODELS.includes(modelId)) { thinking = this.options.anthropicThinking ? { type: "enabled", budget_tokens: this.options.anthropicThinking } : { type: "disabled" } + + temperature = 1.0 } switch (modelId) { @@ -62,22 +66,18 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { stream = await this.client.messages.create( { model: modelId, - max_tokens: this.getModel().info.maxTokens || 8192, - temperature: this.options.modelTemperature ?? ANTHROPIC_DEFAULT_TEMPERATURE, - system: [{ text: systemPrompt, type: "text", cache_control: { type: "ephemeral" } }], // setting cache breakpoint for system prompt so new tasks can reuse it + max_tokens: maxTokens, + temperature, + thinking, + // Setting cache breakpoint for system prompt so new tasks can reuse it. + system: [{ text: systemPrompt, type: "text", cache_control: cacheControl }], messages: messages.map((message, index) => { if (index === lastUserMsgIndex || index === secondLastMsgUserIndex) { return { ...message, content: typeof message.content === "string" - ? [ - { - type: "text", - text: message.content, - cache_control: cacheControl, - }, - ] + ? [{ type: "text", text: message.content, cache_control: cacheControl }] : message.content.map((content, contentIndex) => contentIndex === message.content.length - 1 ? { ...content, cache_control: cacheControl } @@ -91,7 +91,6 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { // tool_choice: { type: "auto" }, // tools: tools, stream: true, - thinking, }, (() => { // prompt caching: https://x.com/alexalbert__/status/1823751995901272068 @@ -132,6 +131,7 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { case "message_start": // Tells us cache reads/writes/input/output. const usage = chunk.message.usage + console.log("usage", usage) yield { type: "usage", @@ -158,6 +158,12 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { case "content_block_start": switch (chunk.content_block.type) { case "thinking": + // We may receive multiple text blocks, in which + // case just insert a line break between them. + if (chunk.index > 0) { + yield { type: "reasoning", text: "\n" } + } + yield { type: "reasoning", text: chunk.content_block.thinking } break case "text": @@ -173,10 +179,14 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { break case "content_block_delta": switch (chunk.delta.type) { + case "thinking_delta": + yield { type: "reasoning", text: chunk.delta.thinking } + break case "text_delta": yield { type: "text", text: chunk.delta.text } break } + break case "content_block_stop": break @@ -186,10 +196,12 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { getModel(): { id: AnthropicModelId; info: ModelInfo } { const modelId = this.options.apiModelId + if (modelId && modelId in anthropicModels) { const id = modelId as AnthropicModelId return { id, info: anthropicModels[id] } } + return { id: anthropicDefaultModelId, info: anthropicModels[anthropicDefaultModelId] } } @@ -204,14 +216,17 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { }) const content = response.content[0] + if (content.type === "text") { return content.text } + return "" } catch (error) { if (error instanceof Error) { throw new Error(`Anthropic completion error: ${error.message}`) } + throw error } } diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 1303e79c7ab..48529d48527 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -43,6 +43,7 @@ import { UnboundModelPicker } from "./UnboundModelPicker" import { ModelInfoView } from "./ModelInfoView" import { DROPDOWN_Z_INDEX } from "./styles" import { RequestyModelPicker } from "./RequestyModelPicker" +import { Slider } from "../ui" interface ApiOptionsProps { uriScheme: string | undefined @@ -65,6 +66,8 @@ const ApiOptions = ({ const [lmStudioModels, setLmStudioModels] = useState([]) const [vsCodeLmModels, setVsCodeLmModels] = useState([]) const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl) + const [anthropicThinkingEnabled, setAnthropicThinkingEnabled] = useState(!!apiConfiguration?.anthropicThinking) + const [anthropicThinkingBudget, setAnthropicThinkingBudget] = useState(apiConfiguration?.anthropicThinking ?? 1024) const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion) const [openRouterBaseUrlSelected, setOpenRouterBaseUrlSelected] = useState(!!apiConfiguration?.openRouterBaseUrl) const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false) @@ -1224,7 +1227,6 @@ const ApiOptions = ({ )} {selectedProvider === "glama" && } - {selectedProvider === "openrouter" && } {selectedProvider === "requesty" && } @@ -1258,8 +1260,33 @@ const ApiOptions = ({ )} + {selectedProvider === "anthropic" && ( +
+ + Thinking? + + {anthropicThinkingEnabled && ( + <> +
+ Number of tokens Claude is allowed use for its internal reasoning process +
+
+ setAnthropicThinkingBudget(value[0])} + /> +
{anthropicThinkingBudget}
+
+ + )} +
+ )} + {!fromWelcomeView && ( -
+
Date: Mon, 24 Feb 2025 21:08:08 -0800 Subject: [PATCH 035/145] Add tests, remove unused code --- .../transform/__tests__/gemini-format.test.ts | 338 ++++++++++++++++++ src/api/transform/gemini-format.ts | 111 +----- src/api/transform/vscode-lm-format.ts | 1 + 3 files changed, 342 insertions(+), 108 deletions(-) create mode 100644 src/api/transform/__tests__/gemini-format.test.ts diff --git a/src/api/transform/__tests__/gemini-format.test.ts b/src/api/transform/__tests__/gemini-format.test.ts new file mode 100644 index 00000000000..fe6b2564047 --- /dev/null +++ b/src/api/transform/__tests__/gemini-format.test.ts @@ -0,0 +1,338 @@ +// npx jest src/api/transform/__tests__/gemini-format.test.ts + +import { Anthropic } from "@anthropic-ai/sdk" + +import { convertAnthropicMessageToGemini } from "../gemini-format" + +describe("convertAnthropicMessageToGemini", () => { + it("should convert a simple text message", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: "Hello, world!", + } + + const result = convertAnthropicMessageToGemini(anthropicMessage) + + expect(result).toEqual({ + role: "user", + parts: [{ text: "Hello, world!" }], + }) + }) + + it("should convert assistant role to model role", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "assistant", + content: "I'm an assistant", + } + + const result = convertAnthropicMessageToGemini(anthropicMessage) + + expect(result).toEqual({ + role: "model", + parts: [{ text: "I'm an assistant" }], + }) + }) + + it("should convert a message with text blocks", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { type: "text", text: "First paragraph" }, + { type: "text", text: "Second paragraph" }, + ], + } + + const result = convertAnthropicMessageToGemini(anthropicMessage) + + expect(result).toEqual({ + role: "user", + parts: [{ text: "First paragraph" }, { text: "Second paragraph" }], + }) + }) + + it("should convert a message with an image", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { type: "text", text: "Check out this image:" }, + { + type: "image", + source: { + type: "base64", + media_type: "image/jpeg", + data: "base64encodeddata", + }, + }, + ], + } + + const result = convertAnthropicMessageToGemini(anthropicMessage) + + expect(result).toEqual({ + role: "user", + parts: [ + { text: "Check out this image:" }, + { + inlineData: { + data: "base64encodeddata", + mimeType: "image/jpeg", + }, + }, + ], + }) + }) + + it("should throw an error for unsupported image source type", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { + type: "image", + source: { + type: "url", // Not supported + url: "https://example.com/image.jpg", + } as any, + }, + ], + } + + expect(() => convertAnthropicMessageToGemini(anthropicMessage)).toThrow("Unsupported image source type") + }) + + it("should convert a message with tool use", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "assistant", + content: [ + { type: "text", text: "Let me calculate that for you." }, + { + type: "tool_use", + id: "calc-123", + name: "calculator", + input: { operation: "add", numbers: [2, 3] }, + }, + ], + } + + const result = convertAnthropicMessageToGemini(anthropicMessage) + + expect(result).toEqual({ + role: "model", + parts: [ + { text: "Let me calculate that for you." }, + { + functionCall: { + name: "calculator", + args: { operation: "add", numbers: [2, 3] }, + }, + }, + ], + }) + }) + + it("should convert a message with tool result as string", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { type: "text", text: "Here's the result:" }, + { + type: "tool_result", + tool_use_id: "calculator-123", + content: "The result is 5", + }, + ], + } + + const result = convertAnthropicMessageToGemini(anthropicMessage) + + expect(result).toEqual({ + role: "user", + parts: [ + { text: "Here's the result:" }, + { + functionResponse: { + name: "calculator", + response: { + name: "calculator", + content: "The result is 5", + }, + }, + }, + ], + }) + }) + + it("should handle empty tool result content", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "calculator-123", + content: null as any, // Empty content + }, + ], + } + + const result = convertAnthropicMessageToGemini(anthropicMessage) + + // Should skip the empty tool result + expect(result).toEqual({ + role: "user", + parts: [], + }) + }) + + it("should convert a message with tool result as array with text only", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "search-123", + content: [ + { type: "text", text: "First result" }, + { type: "text", text: "Second result" }, + ], + }, + ], + } + + const result = convertAnthropicMessageToGemini(anthropicMessage) + + expect(result).toEqual({ + role: "user", + parts: [ + { + functionResponse: { + name: "search", + response: { + name: "search", + content: "First result\n\nSecond result", + }, + }, + }, + ], + }) + }) + + it("should convert a message with tool result as array with text and images", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "search-123", + content: [ + { type: "text", text: "Search results:" }, + { + type: "image", + source: { + type: "base64", + media_type: "image/png", + data: "image1data", + }, + }, + { + type: "image", + source: { + type: "base64", + media_type: "image/jpeg", + data: "image2data", + }, + }, + ], + }, + ], + } + + const result = convertAnthropicMessageToGemini(anthropicMessage) + + expect(result).toEqual({ + role: "user", + parts: [ + { + functionResponse: { + name: "search", + response: { + name: "search", + content: "Search results:\n\n(See next part for image)", + }, + }, + }, + { + inlineData: { + data: "image1data", + mimeType: "image/png", + }, + }, + { + inlineData: { + data: "image2data", + mimeType: "image/jpeg", + }, + }, + ], + }) + }) + + it("should convert a message with tool result containing only images", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "imagesearch-123", + content: [ + { + type: "image", + source: { + type: "base64", + media_type: "image/png", + data: "onlyimagedata", + }, + }, + ], + }, + ], + } + + const result = convertAnthropicMessageToGemini(anthropicMessage) + + expect(result).toEqual({ + role: "user", + parts: [ + { + functionResponse: { + name: "imagesearch", + response: { + name: "imagesearch", + content: "\n\n(See next part for image)", + }, + }, + }, + { + inlineData: { + data: "onlyimagedata", + mimeType: "image/png", + }, + }, + ], + }) + }) + + it("should throw an error for unsupported content block type", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { + type: "unknown_type", // Unsupported type + data: "some data", + } as any, + ], + } + + expect(() => convertAnthropicMessageToGemini(anthropicMessage)).toThrow( + "Unsupported content block type: unknown_type", + ) + }) +}) diff --git a/src/api/transform/gemini-format.ts b/src/api/transform/gemini-format.ts index 3a149dc149b..c8fc80d769d 100644 --- a/src/api/transform/gemini-format.ts +++ b/src/api/transform/gemini-format.ts @@ -1,20 +1,11 @@ import { Anthropic } from "@anthropic-ai/sdk" -import { - Content, - EnhancedGenerateContentResponse, - FunctionCallPart, - FunctionDeclaration, - FunctionResponsePart, - InlineDataPart, - Part, - SchemaType, - TextPart, -} from "@google/generative-ai" +import { Content, FunctionCallPart, FunctionResponsePart, InlineDataPart, Part, TextPart } from "@google/generative-ai" -export function convertAnthropicContentToGemini(content: Anthropic.Messages.MessageParam["content"]): Part[] { +function convertAnthropicContentToGemini(content: Anthropic.Messages.MessageParam["content"]): Part[] { if (typeof content === "string") { return [{ text: content } as TextPart] } + return content.flatMap((block) => { switch (block.type) { case "text": @@ -90,99 +81,3 @@ export function convertAnthropicMessageToGemini(message: Anthropic.Messages.Mess parts: convertAnthropicContentToGemini(message.content), } } - -export function convertAnthropicToolToGemini(tool: Anthropic.Messages.Tool): FunctionDeclaration { - return { - name: tool.name, - description: tool.description || "", - parameters: { - type: SchemaType.OBJECT, - properties: Object.fromEntries( - Object.entries(tool.input_schema.properties || {}).map(([key, value]) => [ - key, - { - type: (value as any).type.toUpperCase(), - description: (value as any).description || "", - }, - ]), - ), - required: (tool.input_schema.required as string[]) || [], - }, - } -} - -/* -It looks like gemini likes to double escape certain characters when writing file contents: https://discuss.ai.google.dev/t/function-call-string-property-is-double-escaped/37867 -*/ -export function unescapeGeminiContent(content: string) { - return content - .replace(/\\n/g, "\n") - .replace(/\\'/g, "'") - .replace(/\\"/g, '"') - .replace(/\\r/g, "\r") - .replace(/\\t/g, "\t") -} - -export function convertGeminiResponseToAnthropic( - response: EnhancedGenerateContentResponse, -): Anthropic.Messages.Message { - const content: Anthropic.Messages.ContentBlock[] = [] - - // Add the main text response - const text = response.text() - if (text) { - content.push({ type: "text", text, citations: null }) - } - - // Add function calls as tool_use blocks - const functionCalls = response.functionCalls() - if (functionCalls) { - functionCalls.forEach((call, index) => { - if ("content" in call.args && typeof call.args.content === "string") { - call.args.content = unescapeGeminiContent(call.args.content) - } - content.push({ - type: "tool_use", - id: `${call.name}-${index}-${Date.now()}`, - name: call.name, - input: call.args, - }) - }) - } - - // Determine stop reason - let stop_reason: Anthropic.Messages.Message["stop_reason"] = null - const finishReason = response.candidates?.[0]?.finishReason - if (finishReason) { - switch (finishReason) { - case "STOP": - stop_reason = "end_turn" - break - case "MAX_TOKENS": - stop_reason = "max_tokens" - break - case "SAFETY": - case "RECITATION": - case "OTHER": - stop_reason = "stop_sequence" - break - // Add more cases if needed - } - } - - return { - id: `msg_${Date.now()}`, // Generate a unique ID - type: "message", - role: "assistant", - content, - model: "", - stop_reason, - stop_sequence: null, // Gemini doesn't provide this information - usage: { - input_tokens: response.usageMetadata?.promptTokenCount ?? 0, - output_tokens: response.usageMetadata?.candidatesTokenCount ?? 0, - cache_creation_input_tokens: null, - cache_read_input_tokens: null, - }, - } -} diff --git a/src/api/transform/vscode-lm-format.ts b/src/api/transform/vscode-lm-format.ts index f258dd17328..85c2fc7ba5b 100644 --- a/src/api/transform/vscode-lm-format.ts +++ b/src/api/transform/vscode-lm-format.ts @@ -160,6 +160,7 @@ export async function convertToAnthropicMessage( vsCodeLmMessage: vscode.LanguageModelChatMessage, ): Promise { const anthropicRole: string | null = convertToAnthropicRole(vsCodeLmMessage.role) + if (anthropicRole !== "assistant") { throw new Error("Roo Code : Only assistant messages are supported.") } From 849f8bf8e814d9321324c7d19f3b6063b880b67a Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 21:13:23 -0800 Subject: [PATCH 036/145] Add tests --- .../__tests__/mistral-format.test.ts | 301 ++++++++++++++++++ src/api/transform/mistral-format.ts | 2 +- 2 files changed, 302 insertions(+), 1 deletion(-) create mode 100644 src/api/transform/__tests__/mistral-format.test.ts diff --git a/src/api/transform/__tests__/mistral-format.test.ts b/src/api/transform/__tests__/mistral-format.test.ts new file mode 100644 index 00000000000..b8e9412edaf --- /dev/null +++ b/src/api/transform/__tests__/mistral-format.test.ts @@ -0,0 +1,301 @@ +// npx jest src/api/transform/__tests__/mistral-format.test.ts + +import { Anthropic } from "@anthropic-ai/sdk" + +import { convertToMistralMessages } from "../mistral-format" + +describe("convertToMistralMessages", () => { + it("should convert simple text messages for user and assistant roles", () => { + const anthropicMessages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: "Hello", + }, + { + role: "assistant", + content: "Hi there!", + }, + ] + + const mistralMessages = convertToMistralMessages(anthropicMessages) + expect(mistralMessages).toHaveLength(2) + expect(mistralMessages[0]).toEqual({ + role: "user", + content: "Hello", + }) + expect(mistralMessages[1]).toEqual({ + role: "assistant", + content: "Hi there!", + }) + }) + + it("should handle user messages with image content", () => { + const anthropicMessages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: [ + { + type: "text", + text: "What is in this image?", + }, + { + type: "image", + source: { + type: "base64", + media_type: "image/jpeg", + data: "base64data", + }, + }, + ], + }, + ] + + const mistralMessages = convertToMistralMessages(anthropicMessages) + expect(mistralMessages).toHaveLength(1) + expect(mistralMessages[0].role).toBe("user") + + const content = mistralMessages[0].content as Array<{ + type: string + text?: string + imageUrl?: { url: string } + }> + + expect(Array.isArray(content)).toBe(true) + expect(content).toHaveLength(2) + expect(content[0]).toEqual({ type: "text", text: "What is in this image?" }) + expect(content[1]).toEqual({ + type: "image_url", + imageUrl: { url: "data:image/jpeg;base64,base64data" }, + }) + }) + + it("should handle user messages with only tool results", () => { + const anthropicMessages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "weather-123", + content: "Current temperature in London: 20°C", + }, + ], + }, + ] + + // Based on the implementation, tool results without accompanying text/image + // don't generate any messages + const mistralMessages = convertToMistralMessages(anthropicMessages) + expect(mistralMessages).toHaveLength(0) + }) + + it("should handle user messages with mixed content (text, image, and tool results)", () => { + const anthropicMessages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: [ + { + type: "text", + text: "Here's the weather data and an image:", + }, + { + type: "image", + source: { + type: "base64", + media_type: "image/png", + data: "imagedata123", + }, + }, + { + type: "tool_result", + tool_use_id: "weather-123", + content: "Current temperature in London: 20°C", + }, + ], + }, + ] + + const mistralMessages = convertToMistralMessages(anthropicMessages) + // Based on the implementation, only the text and image content is included + // Tool results are not converted to separate messages + expect(mistralMessages).toHaveLength(1) + + // Message should be the user message with text and image + expect(mistralMessages[0].role).toBe("user") + const userContent = mistralMessages[0].content as Array<{ + type: string + text?: string + imageUrl?: { url: string } + }> + expect(Array.isArray(userContent)).toBe(true) + expect(userContent).toHaveLength(2) + expect(userContent[0]).toEqual({ type: "text", text: "Here's the weather data and an image:" }) + expect(userContent[1]).toEqual({ + type: "image_url", + imageUrl: { url: "data:image/png;base64,imagedata123" }, + }) + }) + + it("should handle assistant messages with text content", () => { + const anthropicMessages: Anthropic.Messages.MessageParam[] = [ + { + role: "assistant", + content: [ + { + type: "text", + text: "I'll help you with that question.", + }, + ], + }, + ] + + const mistralMessages = convertToMistralMessages(anthropicMessages) + expect(mistralMessages).toHaveLength(1) + expect(mistralMessages[0].role).toBe("assistant") + expect(mistralMessages[0].content).toBe("I'll help you with that question.") + }) + + it("should handle assistant messages with tool use", () => { + const anthropicMessages: Anthropic.Messages.MessageParam[] = [ + { + role: "assistant", + content: [ + { + type: "text", + text: "Let me check the weather for you.", + }, + { + type: "tool_use", + id: "weather-123", + name: "get_weather", + input: { city: "London" }, + }, + ], + }, + ] + + const mistralMessages = convertToMistralMessages(anthropicMessages) + expect(mistralMessages).toHaveLength(1) + expect(mistralMessages[0].role).toBe("assistant") + expect(mistralMessages[0].content).toBe("Let me check the weather for you.") + }) + + it("should handle multiple text blocks in assistant messages", () => { + const anthropicMessages: Anthropic.Messages.MessageParam[] = [ + { + role: "assistant", + content: [ + { + type: "text", + text: "First paragraph of information.", + }, + { + type: "text", + text: "Second paragraph with more details.", + }, + ], + }, + ] + + const mistralMessages = convertToMistralMessages(anthropicMessages) + expect(mistralMessages).toHaveLength(1) + expect(mistralMessages[0].role).toBe("assistant") + expect(mistralMessages[0].content).toBe("First paragraph of information.\nSecond paragraph with more details.") + }) + + it("should handle a conversation with mixed message types", () => { + const anthropicMessages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: [ + { + type: "text", + text: "What's in this image?", + }, + { + type: "image", + source: { + type: "base64", + media_type: "image/jpeg", + data: "imagedata", + }, + }, + ], + }, + { + role: "assistant", + content: [ + { + type: "text", + text: "This image shows a landscape with mountains.", + }, + { + type: "tool_use", + id: "search-123", + name: "search_info", + input: { query: "mountain types" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "search-123", + content: "Found information about different mountain types.", + }, + ], + }, + { + role: "assistant", + content: "Based on the search results, I can tell you more about the mountains in the image.", + }, + ] + + const mistralMessages = convertToMistralMessages(anthropicMessages) + // Based on the implementation, user messages with only tool results don't generate messages + expect(mistralMessages).toHaveLength(3) + + // User message with image + expect(mistralMessages[0].role).toBe("user") + const userContent = mistralMessages[0].content as Array<{ + type: string + text?: string + imageUrl?: { url: string } + }> + expect(Array.isArray(userContent)).toBe(true) + expect(userContent).toHaveLength(2) + + // Assistant message with text (tool_use is not included in Mistral format) + expect(mistralMessages[1].role).toBe("assistant") + expect(mistralMessages[1].content).toBe("This image shows a landscape with mountains.") + + // Final assistant message + expect(mistralMessages[2]).toEqual({ + role: "assistant", + content: "Based on the search results, I can tell you more about the mountains in the image.", + }) + }) + + it("should handle empty content in assistant messages", () => { + const anthropicMessages: Anthropic.Messages.MessageParam[] = [ + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "search-123", + name: "search_info", + input: { query: "test query" }, + }, + ], + }, + ] + + const mistralMessages = convertToMistralMessages(anthropicMessages) + expect(mistralMessages).toHaveLength(1) + expect(mistralMessages[0].role).toBe("assistant") + expect(mistralMessages[0].content).toBeUndefined() + }) +}) diff --git a/src/api/transform/mistral-format.ts b/src/api/transform/mistral-format.ts index 16c6aaf2384..baf81ef24d2 100644 --- a/src/api/transform/mistral-format.ts +++ b/src/api/transform/mistral-format.ts @@ -1,5 +1,4 @@ import { Anthropic } from "@anthropic-ai/sdk" -import { Mistral } from "@mistralai/mistralai" import { AssistantMessage } from "@mistralai/mistralai/models/components/assistantmessage" import { SystemMessage } from "@mistralai/mistralai/models/components/systemmessage" import { ToolMessage } from "@mistralai/mistralai/models/components/toolmessage" @@ -13,6 +12,7 @@ export type MistralMessage = export function convertToMistralMessages(anthropicMessages: Anthropic.Messages.MessageParam[]): MistralMessage[] { const mistralMessages: MistralMessage[] = [] + for (const anthropicMessage of anthropicMessages) { if (typeof anthropicMessage.content === "string") { mistralMessages.push({ From 09902904f371a89b980d68c757023808f363be7c Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 22:08:14 -0800 Subject: [PATCH 037/145] Persist anthropicThinking --- src/core/webview/ClineProvider.ts | 6 +++ .../__tests__/checkExistApiConfig.test.ts | 1 + .../src/components/settings/ApiOptions.tsx | 38 +++++++++++----- .../src/components/settings/SettingsView.tsx | 44 +++++++++---------- webview-ui/src/components/ui/index.ts | 1 + 5 files changed, 55 insertions(+), 35 deletions(-) diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index b4819d96833..99caceffc29 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -89,6 +89,7 @@ type GlobalStateKey = | "lmStudioModelId" | "lmStudioBaseUrl" | "anthropicBaseUrl" + | "anthropicThinking" | "azureApiVersion" | "openAiStreamingEnabled" | "openRouterModelId" @@ -1654,6 +1655,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { lmStudioModelId, lmStudioBaseUrl, anthropicBaseUrl, + anthropicThinking, geminiApiKey, openAiNativeApiKey, deepSeekApiKey, @@ -1701,6 +1703,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { this.updateGlobalState("lmStudioModelId", lmStudioModelId), this.updateGlobalState("lmStudioBaseUrl", lmStudioBaseUrl), this.updateGlobalState("anthropicBaseUrl", anthropicBaseUrl), + this.updateGlobalState("anthropicThinking", anthropicThinking), this.storeSecret("geminiApiKey", geminiApiKey), this.storeSecret("openAiNativeApiKey", openAiNativeApiKey), this.storeSecret("deepSeekApiKey", deepSeekApiKey), @@ -2510,6 +2513,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { lmStudioModelId, lmStudioBaseUrl, anthropicBaseUrl, + anthropicThinking, geminiApiKey, openAiNativeApiKey, deepSeekApiKey, @@ -2592,6 +2596,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { this.getGlobalState("lmStudioModelId") as Promise, this.getGlobalState("lmStudioBaseUrl") as Promise, this.getGlobalState("anthropicBaseUrl") as Promise, + this.getGlobalState("anthropicThinking") as Promise, this.getSecret("geminiApiKey") as Promise, this.getSecret("openAiNativeApiKey") as Promise, this.getSecret("deepSeekApiKey") as Promise, @@ -2691,6 +2696,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { lmStudioModelId, lmStudioBaseUrl, anthropicBaseUrl, + anthropicThinking, geminiApiKey, openAiNativeApiKey, deepSeekApiKey, diff --git a/src/shared/__tests__/checkExistApiConfig.test.ts b/src/shared/__tests__/checkExistApiConfig.test.ts index 914f4933d62..62517d69584 100644 --- a/src/shared/__tests__/checkExistApiConfig.test.ts +++ b/src/shared/__tests__/checkExistApiConfig.test.ts @@ -32,6 +32,7 @@ describe("checkExistKey", () => { apiKey: "test-key", apiProvider: undefined, anthropicBaseUrl: undefined, + anthropicThinking: undefined, } expect(checkExistKey(config)).toBe(true) }) diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 48529d48527..3761ddce500 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -2,9 +2,10 @@ import { memo, useCallback, useMemo, useState } from "react" import { useDebounce, useEvent } from "react-use" import { Checkbox, Dropdown, Pane, type DropdownOption } from "vscrui" import { VSCodeLink, VSCodeRadio, VSCodeRadioGroup, VSCodeTextField } from "@vscode/webview-ui-toolkit/react" -import { TemperatureControl } from "./TemperatureControl" import * as vscodemodels from "vscode" +import { Slider } from "@/components/ui" + import { ApiConfiguration, ModelInfo, @@ -34,6 +35,7 @@ import { requestyDefaultModelInfo, } from "../../../../src/shared/api" import { ExtensionMessage } from "../../../../src/shared/ExtensionMessage" + import { vscode } from "../../utils/vscode" import VSCodeButtonLink from "../common/VSCodeButtonLink" import { OpenRouterModelPicker } from "./OpenRouterModelPicker" @@ -43,7 +45,7 @@ import { UnboundModelPicker } from "./UnboundModelPicker" import { ModelInfoView } from "./ModelInfoView" import { DROPDOWN_Z_INDEX } from "./styles" import { RequestyModelPicker } from "./RequestyModelPicker" -import { Slider } from "../ui" +import { TemperatureControl } from "./TemperatureControl" interface ApiOptionsProps { uriScheme: string | undefined @@ -66,8 +68,7 @@ const ApiOptions = ({ const [lmStudioModels, setLmStudioModels] = useState([]) const [vsCodeLmModels, setVsCodeLmModels] = useState([]) const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl) - const [anthropicThinkingEnabled, setAnthropicThinkingEnabled] = useState(!!apiConfiguration?.anthropicThinking) - const [anthropicThinkingBudget, setAnthropicThinkingBudget] = useState(apiConfiguration?.anthropicThinking ?? 1024) + const [anthropicThinkingBudget, setAnthropicThinkingBudget] = useState(apiConfiguration?.anthropicThinking) const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion) const [openRouterBaseUrlSelected, setOpenRouterBaseUrlSelected] = useState(!!apiConfiguration?.openRouterBaseUrl) const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false) @@ -188,6 +189,7 @@ const ApiOptions = ({ checked={anthropicBaseUrlSelected} onChange={(checked: boolean) => { setAnthropicBaseUrlSelected(checked) + if (!checked) { setApiConfigurationField("anthropicBaseUrl", "") } @@ -387,6 +389,7 @@ const ApiOptions = ({ checked={openRouterBaseUrlSelected} onChange={(checked: boolean) => { setOpenRouterBaseUrlSelected(checked) + if (!checked) { setApiConfigurationField("openRouterBaseUrl", "") } @@ -510,7 +513,7 @@ const ApiOptions = ({
)} - {apiConfiguration?.apiProvider === "vertex" && ( + {selectedProvider === "vertex" && (
{ setAzureApiVersionSelected(checked) + if (!checked) { setApiConfigurationField("azureApiVersion", "") } @@ -1260,23 +1264,33 @@ const ApiOptions = ({ )} - {selectedProvider === "anthropic" && ( + {selectedProvider === "anthropic" && selectedModelId === "claude-3-7-sonnet-20250219" && (
- + { + const budget = checked ? 16_384 : undefined + setAnthropicThinkingBudget(budget) + setApiConfigurationField("anthropicThinking", budget) + }}> Thinking? - {anthropicThinkingEnabled && ( + {anthropicThinkingBudget && ( <>
- Number of tokens Claude is allowed use for its internal reasoning process + Number of tokens Claude is allowed use for its internal reasoning process.
setAnthropicThinkingBudget(value[0])} + onValueChange={(value) => { + const budget = value[0] + setAnthropicThinkingBudget(budget) + setApiConfigurationField("anthropicThinking", budget) + }} />
{anthropicThinkingBudget}
diff --git a/webview-ui/src/components/settings/SettingsView.tsx b/webview-ui/src/components/settings/SettingsView.tsx index 0d80580b491..29384826acf 100644 --- a/webview-ui/src/components/settings/SettingsView.tsx +++ b/webview-ui/src/components/settings/SettingsView.tsx @@ -1,15 +1,7 @@ -import { VSCodeButton, VSCodeCheckbox, VSCodeLink, VSCodeTextField } from "@vscode/webview-ui-toolkit/react" import { forwardRef, memo, useCallback, useEffect, useImperativeHandle, useRef, useState } from "react" -import { ExtensionStateContextType, useExtensionState } from "../../context/ExtensionStateContext" -import { validateApiConfiguration, validateModelId } from "../../utils/validate" -import { vscode } from "../../utils/vscode" -import ApiOptions from "./ApiOptions" -import ExperimentalFeature from "./ExperimentalFeature" -import { EXPERIMENT_IDS, experimentConfigsMap, ExperimentId } from "../../../../src/shared/experiments" -import ApiConfigManager from "./ApiConfigManager" -import { Dropdown } from "vscrui" -import type { DropdownOption } from "vscrui" -import { ApiConfiguration } from "../../../../src/shared/api" +import { VSCodeButton, VSCodeCheckbox, VSCodeLink, VSCodeTextField } from "@vscode/webview-ui-toolkit/react" +import { Dropdown, type DropdownOption } from "vscrui" + import { AlertDialog, AlertDialogContent, @@ -19,7 +11,17 @@ import { AlertDialogAction, AlertDialogHeader, AlertDialogFooter, -} from "../ui/alert-dialog" +} from "@/components/ui" + +import { vscode } from "../../utils/vscode" +import { validateApiConfiguration, validateModelId } from "../../utils/validate" +import { ExtensionStateContextType, useExtensionState } from "../../context/ExtensionStateContext" +import { EXPERIMENT_IDS, experimentConfigsMap, ExperimentId } from "../../../../src/shared/experiments" +import { ApiConfiguration } from "../../../../src/shared/api" + +import ExperimentalFeature from "./ExperimentalFeature" +import ApiConfigManager from "./ApiConfigManager" +import ApiOptions from "./ApiOptions" type SettingsViewProps = { onDone: () => void @@ -104,7 +106,9 @@ const SettingsView = forwardRef(({ onDone }, if (prevState.apiConfiguration?.[field] === value) { return prevState } + setChangeDetected(true) + return { ...prevState, apiConfiguration: { @@ -131,7 +135,9 @@ const SettingsView = forwardRef(({ onDone }, }, []) const handleSubmit = () => { + console.log("handleSubmit", apiConfiguration) const apiValidationResult = validateApiConfiguration(apiConfiguration) + const modelIdValidationResult = validateModelId( apiConfiguration, extensionState.glamaModels, @@ -140,6 +146,7 @@ const SettingsView = forwardRef(({ onDone }, setApiErrorMessage(apiValidationResult) setModelIdErrorMessage(modelIdValidationResult) + if (!apiValidationResult && !modelIdValidationResult) { vscode.postMessage({ type: "alwaysAllowReadOnly", bool: alwaysAllowReadOnly }) vscode.postMessage({ type: "alwaysAllowWrite", bool: alwaysAllowWrite }) @@ -162,18 +169,9 @@ const SettingsView = forwardRef(({ onDone }, vscode.postMessage({ type: "rateLimitSeconds", value: rateLimitSeconds }) vscode.postMessage({ type: "maxOpenTabsContext", value: maxOpenTabsContext }) vscode.postMessage({ type: "currentApiConfigName", text: currentApiConfigName }) - vscode.postMessage({ - type: "updateExperimental", - values: experiments, - }) + vscode.postMessage({ type: "updateExperimental", values: experiments }) vscode.postMessage({ type: "alwaysAllowModeSwitch", bool: alwaysAllowModeSwitch }) - - vscode.postMessage({ - type: "upsertApiConfiguration", - text: currentApiConfigName, - apiConfiguration, - }) - // onDone() + vscode.postMessage({ type: "upsertApiConfiguration", text: currentApiConfigName, apiConfiguration }) setChangeDetected(false) } } diff --git a/webview-ui/src/components/ui/index.ts b/webview-ui/src/components/ui/index.ts index bf00aa64425..6eb8dd25ba9 100644 --- a/webview-ui/src/components/ui/index.ts +++ b/webview-ui/src/components/ui/index.ts @@ -1,3 +1,4 @@ +export * from "./alert-dialog" export * from "./autosize-textarea" export * from "./badge" export * from "./button" From 52030b12e1bee2fbf6164fdc2c6009a5346b599d Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 22:13:45 -0800 Subject: [PATCH 038/145] Remove unused code --- src/api/providers/bedrock.ts | 2 +- .../__tests__/bedrock-converse-format.test.ts | 381 +++++++----------- .../transform/__tests__/openai-format.test.ts | 348 +++++----------- .../__tests__/vscode-lm-format.test.ts | 259 +++++------- src/api/transform/bedrock-converse-format.ts | 52 +-- src/api/transform/openai-format.ts | 60 --- src/api/transform/vscode-lm-format.ts | 47 --- 7 files changed, 344 insertions(+), 805 deletions(-) diff --git a/src/api/providers/bedrock.ts b/src/api/providers/bedrock.ts index 8f897fda2a7..3bca70338df 100644 --- a/src/api/providers/bedrock.ts +++ b/src/api/providers/bedrock.ts @@ -9,7 +9,7 @@ import { Anthropic } from "@anthropic-ai/sdk" import { ApiHandler, SingleCompletionHandler } from "../" import { ApiHandlerOptions, BedrockModelId, ModelInfo, bedrockDefaultModelId, bedrockModels } from "../../shared/api" import { ApiStream } from "../transform/stream" -import { convertToBedrockConverseMessages, convertToAnthropicMessage } from "../transform/bedrock-converse-format" +import { convertToBedrockConverseMessages } from "../transform/bedrock-converse-format" const BEDROCK_DEFAULT_TEMPERATURE = 0.3 diff --git a/src/api/transform/__tests__/bedrock-converse-format.test.ts b/src/api/transform/__tests__/bedrock-converse-format.test.ts index fdd29c75bf2..c56b8a07fc4 100644 --- a/src/api/transform/__tests__/bedrock-converse-format.test.ts +++ b/src/api/transform/__tests__/bedrock-converse-format.test.ts @@ -1,254 +1,167 @@ // npx jest src/api/transform/__tests__/bedrock-converse-format.test.ts -import { convertToBedrockConverseMessages, convertToAnthropicMessage } from "../bedrock-converse-format" +import { convertToBedrockConverseMessages } from "../bedrock-converse-format" import { Anthropic } from "@anthropic-ai/sdk" import { ContentBlock, ToolResultContentBlock } from "@aws-sdk/client-bedrock-runtime" -import { StreamEvent } from "../../providers/bedrock" - -describe("bedrock-converse-format", () => { - describe("convertToBedrockConverseMessages", () => { - test("converts simple text messages correctly", () => { - const messages: Anthropic.Messages.MessageParam[] = [ - { role: "user", content: "Hello" }, - { role: "assistant", content: "Hi there" }, - ] - - const result = convertToBedrockConverseMessages(messages) - - expect(result).toEqual([ - { - role: "user", - content: [{ text: "Hello" }], - }, - { - role: "assistant", - content: [{ text: "Hi there" }], - }, - ]) - }) - - test("converts messages with images correctly", () => { - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: [ - { - type: "text", - text: "Look at this image:", - }, - { - type: "image", - source: { - type: "base64", - data: "SGVsbG8=", // "Hello" in base64 - media_type: "image/jpeg" as const, - }, - }, - ], - }, - ] - - const result = convertToBedrockConverseMessages(messages) - - if (!result[0] || !result[0].content) { - fail("Expected result to have content") - return - } - - expect(result[0].role).toBe("user") - expect(result[0].content).toHaveLength(2) - expect(result[0].content[0]).toEqual({ text: "Look at this image:" }) - - const imageBlock = result[0].content[1] as ContentBlock - if ("image" in imageBlock && imageBlock.image && imageBlock.image.source) { - expect(imageBlock.image.format).toBe("jpeg") - expect(imageBlock.image.source).toBeDefined() - expect(imageBlock.image.source.bytes).toBeDefined() - } else { - fail("Expected image block not found") - } - }) - - test("converts tool use messages correctly", () => { - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "assistant", - content: [ - { - type: "tool_use", - id: "test-id", - name: "read_file", - input: { - path: "test.txt", - }, - }, - ], - }, - ] - - const result = convertToBedrockConverseMessages(messages) - - if (!result[0] || !result[0].content) { - fail("Expected result to have content") - return - } - - expect(result[0].role).toBe("assistant") - const toolBlock = result[0].content[0] as ContentBlock - if ("toolUse" in toolBlock && toolBlock.toolUse) { - expect(toolBlock.toolUse).toEqual({ - toolUseId: "test-id", - name: "read_file", - input: "\n\ntest.txt\n\n", - }) - } else { - fail("Expected tool use block not found") - } - }) - - test("converts tool result messages correctly", () => { - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "assistant", - content: [ - { - type: "tool_result", - tool_use_id: "test-id", - content: [{ type: "text", text: "File contents here" }], - }, - ], - }, - ] - - const result = convertToBedrockConverseMessages(messages) - - if (!result[0] || !result[0].content) { - fail("Expected result to have content") - return - } - - expect(result[0].role).toBe("assistant") - const resultBlock = result[0].content[0] as ContentBlock - if ("toolResult" in resultBlock && resultBlock.toolResult) { - const expectedContent: ToolResultContentBlock[] = [{ text: "File contents here" }] - expect(resultBlock.toolResult).toEqual({ - toolUseId: "test-id", - content: expectedContent, - status: "success", - }) - } else { - fail("Expected tool result block not found") - } - }) - - test("handles text content correctly", () => { - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: [ - { - type: "text", - text: "Hello world", - }, - ], - }, - ] - - const result = convertToBedrockConverseMessages(messages) - - if (!result[0] || !result[0].content) { - fail("Expected result to have content") - return - } - - expect(result[0].role).toBe("user") - expect(result[0].content).toHaveLength(1) - const textBlock = result[0].content[0] as ContentBlock - expect(textBlock).toEqual({ text: "Hello world" }) - }) + +describe("convertToBedrockConverseMessages", () => { + test("converts simple text messages correctly", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { role: "user", content: "Hello" }, + { role: "assistant", content: "Hi there" }, + ] + + const result = convertToBedrockConverseMessages(messages) + + expect(result).toEqual([ + { + role: "user", + content: [{ text: "Hello" }], + }, + { + role: "assistant", + content: [{ text: "Hi there" }], + }, + ]) }) - describe("convertToAnthropicMessage", () => { - test("converts metadata events correctly", () => { - const event: StreamEvent = { - metadata: { - usage: { - inputTokens: 10, - outputTokens: 20, + test("converts messages with images correctly", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: [ + { + type: "text", + text: "Look at this image:", }, - }, - } - - const result = convertToAnthropicMessage(event, "test-model") + { + type: "image", + source: { + type: "base64", + data: "SGVsbG8=", // "Hello" in base64 + media_type: "image/jpeg" as const, + }, + }, + ], + }, + ] + + const result = convertToBedrockConverseMessages(messages) + + if (!result[0] || !result[0].content) { + fail("Expected result to have content") + return + } + + expect(result[0].role).toBe("user") + expect(result[0].content).toHaveLength(2) + expect(result[0].content[0]).toEqual({ text: "Look at this image:" }) + + const imageBlock = result[0].content[1] as ContentBlock + if ("image" in imageBlock && imageBlock.image && imageBlock.image.source) { + expect(imageBlock.image.format).toBe("jpeg") + expect(imageBlock.image.source).toBeDefined() + expect(imageBlock.image.source.bytes).toBeDefined() + } else { + fail("Expected image block not found") + } + }) - expect(result).toEqual({ - id: "", - type: "message", + test("converts tool use messages correctly", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { role: "assistant", - model: "test-model", - usage: { - input_tokens: 10, - output_tokens: 20, - cache_creation_input_tokens: null, - cache_read_input_tokens: null, - }, - }) - }) - - test("converts content block start events correctly", () => { - const event: StreamEvent = { - contentBlockStart: { - start: { - text: "Hello", + content: [ + { + type: "tool_use", + id: "test-id", + name: "read_file", + input: { + path: "test.txt", + }, }, - }, - } - - const result = convertToAnthropicMessage(event, "test-model") + ], + }, + ] + + const result = convertToBedrockConverseMessages(messages) + + if (!result[0] || !result[0].content) { + fail("Expected result to have content") + return + } + + expect(result[0].role).toBe("assistant") + const toolBlock = result[0].content[0] as ContentBlock + if ("toolUse" in toolBlock && toolBlock.toolUse) { + expect(toolBlock.toolUse).toEqual({ + toolUseId: "test-id", + name: "read_file", + input: "\n\ntest.txt\n\n", + }) + } else { + fail("Expected tool use block not found") + } + }) - expect(result).toEqual({ - type: "message", + test("converts tool result messages correctly", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { role: "assistant", - content: [{ type: "text", text: "Hello", citations: null }], - model: "test-model", + content: [ + { + type: "tool_result", + tool_use_id: "test-id", + content: [{ type: "text", text: "File contents here" }], + }, + ], + }, + ] + + const result = convertToBedrockConverseMessages(messages) + + if (!result[0] || !result[0].content) { + fail("Expected result to have content") + return + } + + expect(result[0].role).toBe("assistant") + const resultBlock = result[0].content[0] as ContentBlock + if ("toolResult" in resultBlock && resultBlock.toolResult) { + const expectedContent: ToolResultContentBlock[] = [{ text: "File contents here" }] + expect(resultBlock.toolResult).toEqual({ + toolUseId: "test-id", + content: expectedContent, + status: "success", }) - }) + } else { + fail("Expected tool result block not found") + } + }) - test("converts content block delta events correctly", () => { - const event: StreamEvent = { - contentBlockDelta: { - delta: { - text: " world", + test("handles text content correctly", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: [ + { + type: "text", + text: "Hello world", }, - }, - } + ], + }, + ] - const result = convertToAnthropicMessage(event, "test-model") + const result = convertToBedrockConverseMessages(messages) - expect(result).toEqual({ - type: "message", - role: "assistant", - content: [{ type: "text", text: " world", citations: null }], - model: "test-model", - }) - }) - - test("converts message stop events correctly", () => { - const event: StreamEvent = { - messageStop: { - stopReason: "end_turn" as const, - }, - } + if (!result[0] || !result[0].content) { + fail("Expected result to have content") + return + } - const result = convertToAnthropicMessage(event, "test-model") - - expect(result).toEqual({ - type: "message", - role: "assistant", - stop_reason: "end_turn", - stop_sequence: null, - model: "test-model", - }) - }) + expect(result[0].role).toBe("user") + expect(result[0].content).toHaveLength(1) + const textBlock = result[0].content[0] as ContentBlock + expect(textBlock).toEqual({ text: "Hello world" }) }) }) diff --git a/src/api/transform/__tests__/openai-format.test.ts b/src/api/transform/__tests__/openai-format.test.ts index 812208acd1e..f0aa5e1a563 100644 --- a/src/api/transform/__tests__/openai-format.test.ts +++ b/src/api/transform/__tests__/openai-format.test.ts @@ -1,281 +1,131 @@ // npx jest src/api/transform/__tests__/openai-format.test.ts -import { convertToOpenAiMessages, convertToAnthropicMessage } from "../openai-format" import { Anthropic } from "@anthropic-ai/sdk" import OpenAI from "openai" -type PartialChatCompletion = Omit & { - choices: Array< - Partial & { - message: OpenAI.Chat.Completions.ChatCompletion.Choice["message"] - finish_reason: string - index: number - } - > -} +import { convertToOpenAiMessages } from "../openai-format" -describe("OpenAI Format Transformations", () => { - describe("convertToOpenAiMessages", () => { - it("should convert simple text messages", () => { - const anthropicMessages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: "Hello", - }, - { - role: "assistant", - content: "Hi there!", - }, - ] - - const openAiMessages = convertToOpenAiMessages(anthropicMessages) - expect(openAiMessages).toHaveLength(2) - expect(openAiMessages[0]).toEqual({ +describe("convertToOpenAiMessages", () => { + it("should convert simple text messages", () => { + const anthropicMessages: Anthropic.Messages.MessageParam[] = [ + { role: "user", content: "Hello", - }) - expect(openAiMessages[1]).toEqual({ + }, + { role: "assistant", content: "Hi there!", - }) + }, + ] + + const openAiMessages = convertToOpenAiMessages(anthropicMessages) + expect(openAiMessages).toHaveLength(2) + expect(openAiMessages[0]).toEqual({ + role: "user", + content: "Hello", }) - - it("should handle messages with image content", () => { - const anthropicMessages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: [ - { - type: "text", - text: "What is in this image?", - }, - { - type: "image", - source: { - type: "base64", - media_type: "image/jpeg", - data: "base64data", - }, - }, - ], - }, - ] - - const openAiMessages = convertToOpenAiMessages(anthropicMessages) - expect(openAiMessages).toHaveLength(1) - expect(openAiMessages[0].role).toBe("user") - - const content = openAiMessages[0].content as Array<{ - type: string - text?: string - image_url?: { url: string } - }> - - expect(Array.isArray(content)).toBe(true) - expect(content).toHaveLength(2) - expect(content[0]).toEqual({ type: "text", text: "What is in this image?" }) - expect(content[1]).toEqual({ - type: "image_url", - image_url: { url: "data:image/jpeg;base64,base64data" }, - }) - }) - - it("should handle assistant messages with tool use", () => { - const anthropicMessages: Anthropic.Messages.MessageParam[] = [ - { - role: "assistant", - content: [ - { - type: "text", - text: "Let me check the weather.", - }, - { - type: "tool_use", - id: "weather-123", - name: "get_weather", - input: { city: "London" }, - }, - ], - }, - ] - - const openAiMessages = convertToOpenAiMessages(anthropicMessages) - expect(openAiMessages).toHaveLength(1) - - const assistantMessage = openAiMessages[0] as OpenAI.Chat.ChatCompletionAssistantMessageParam - expect(assistantMessage.role).toBe("assistant") - expect(assistantMessage.content).toBe("Let me check the weather.") - expect(assistantMessage.tool_calls).toHaveLength(1) - expect(assistantMessage.tool_calls![0]).toEqual({ - id: "weather-123", - type: "function", - function: { - name: "get_weather", - arguments: JSON.stringify({ city: "London" }), - }, - }) - }) - - it("should handle user messages with tool results", () => { - const anthropicMessages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: [ - { - type: "tool_result", - tool_use_id: "weather-123", - content: "Current temperature in London: 20°C", - }, - ], - }, - ] - - const openAiMessages = convertToOpenAiMessages(anthropicMessages) - expect(openAiMessages).toHaveLength(1) - - const toolMessage = openAiMessages[0] as OpenAI.Chat.ChatCompletionToolMessageParam - expect(toolMessage.role).toBe("tool") - expect(toolMessage.tool_call_id).toBe("weather-123") - expect(toolMessage.content).toBe("Current temperature in London: 20°C") + expect(openAiMessages[1]).toEqual({ + role: "assistant", + content: "Hi there!", }) }) - describe("convertToAnthropicMessage", () => { - it("should convert simple completion", () => { - const openAiCompletion: PartialChatCompletion = { - id: "completion-123", - model: "gpt-4", - choices: [ + it("should handle messages with image content", () => { + const anthropicMessages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: [ + { + type: "text", + text: "What is in this image?", + }, { - message: { - role: "assistant", - content: "Hello there!", - refusal: null, + type: "image", + source: { + type: "base64", + media_type: "image/jpeg", + data: "base64data", }, - finish_reason: "stop", - index: 0, }, ], - usage: { - prompt_tokens: 10, - completion_tokens: 5, - total_tokens: 15, - }, - created: 123456789, - object: "chat.completion", - } - - const anthropicMessage = convertToAnthropicMessage( - openAiCompletion as OpenAI.Chat.Completions.ChatCompletion, - ) - expect(anthropicMessage.id).toBe("completion-123") - expect(anthropicMessage.role).toBe("assistant") - expect(anthropicMessage.content).toHaveLength(1) - expect(anthropicMessage.content[0]).toEqual({ - type: "text", - text: "Hello there!", - citations: null, - }) - expect(anthropicMessage.stop_reason).toBe("end_turn") - expect(anthropicMessage.usage).toEqual({ - input_tokens: 10, - output_tokens: 5, - cache_creation_input_tokens: null, - cache_read_input_tokens: null, - }) + }, + ] + + const openAiMessages = convertToOpenAiMessages(anthropicMessages) + expect(openAiMessages).toHaveLength(1) + expect(openAiMessages[0].role).toBe("user") + + const content = openAiMessages[0].content as Array<{ + type: string + text?: string + image_url?: { url: string } + }> + + expect(Array.isArray(content)).toBe(true) + expect(content).toHaveLength(2) + expect(content[0]).toEqual({ type: "text", text: "What is in this image?" }) + expect(content[1]).toEqual({ + type: "image_url", + image_url: { url: "data:image/jpeg;base64,base64data" }, }) + }) - it("should handle tool calls in completion", () => { - const openAiCompletion: PartialChatCompletion = { - id: "completion-123", - model: "gpt-4", - choices: [ + it("should handle assistant messages with tool use", () => { + const anthropicMessages: Anthropic.Messages.MessageParam[] = [ + { + role: "assistant", + content: [ { - message: { - role: "assistant", - content: "Let me check the weather.", - tool_calls: [ - { - id: "weather-123", - type: "function", - function: { - name: "get_weather", - arguments: '{"city":"London"}', - }, - }, - ], - refusal: null, - }, - finish_reason: "tool_calls", - index: 0, + type: "text", + text: "Let me check the weather.", + }, + { + type: "tool_use", + id: "weather-123", + name: "get_weather", + input: { city: "London" }, }, ], - usage: { - prompt_tokens: 15, - completion_tokens: 8, - total_tokens: 23, - }, - created: 123456789, - object: "chat.completion", - } - - const anthropicMessage = convertToAnthropicMessage( - openAiCompletion as OpenAI.Chat.Completions.ChatCompletion, - ) - expect(anthropicMessage.content).toHaveLength(2) - expect(anthropicMessage.content[0]).toEqual({ - type: "text", - text: "Let me check the weather.", - citations: null, - }) - expect(anthropicMessage.content[1]).toEqual({ - type: "tool_use", - id: "weather-123", + }, + ] + + const openAiMessages = convertToOpenAiMessages(anthropicMessages) + expect(openAiMessages).toHaveLength(1) + + const assistantMessage = openAiMessages[0] as OpenAI.Chat.ChatCompletionAssistantMessageParam + expect(assistantMessage.role).toBe("assistant") + expect(assistantMessage.content).toBe("Let me check the weather.") + expect(assistantMessage.tool_calls).toHaveLength(1) + expect(assistantMessage.tool_calls![0]).toEqual({ + id: "weather-123", + type: "function", + function: { name: "get_weather", - input: { city: "London" }, - }) - expect(anthropicMessage.stop_reason).toBe("tool_use") + arguments: JSON.stringify({ city: "London" }), + }, }) + }) - it("should handle invalid tool call arguments", () => { - const openAiCompletion: PartialChatCompletion = { - id: "completion-123", - model: "gpt-4", - choices: [ + it("should handle user messages with tool results", () => { + const anthropicMessages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: [ { - message: { - role: "assistant", - content: "Testing invalid arguments", - tool_calls: [ - { - id: "test-123", - type: "function", - function: { - name: "test_function", - arguments: "invalid json", - }, - }, - ], - refusal: null, - }, - finish_reason: "tool_calls", - index: 0, + type: "tool_result", + tool_use_id: "weather-123", + content: "Current temperature in London: 20°C", }, ], - created: 123456789, - object: "chat.completion", - } + }, + ] - const anthropicMessage = convertToAnthropicMessage( - openAiCompletion as OpenAI.Chat.Completions.ChatCompletion, - ) - expect(anthropicMessage.content).toHaveLength(2) - expect(anthropicMessage.content[1]).toEqual({ - type: "tool_use", - id: "test-123", - name: "test_function", - input: {}, // Should default to empty object for invalid JSON - }) - }) + const openAiMessages = convertToOpenAiMessages(anthropicMessages) + expect(openAiMessages).toHaveLength(1) + + const toolMessage = openAiMessages[0] as OpenAI.Chat.ChatCompletionToolMessageParam + expect(toolMessage.role).toBe("tool") + expect(toolMessage.tool_call_id).toBe("weather-123") + expect(toolMessage.content).toBe("Current temperature in London: 20°C") }) }) diff --git a/src/api/transform/__tests__/vscode-lm-format.test.ts b/src/api/transform/__tests__/vscode-lm-format.test.ts index eb800e2b7a8..eea8de7c9a5 100644 --- a/src/api/transform/__tests__/vscode-lm-format.test.ts +++ b/src/api/transform/__tests__/vscode-lm-format.test.ts @@ -1,8 +1,8 @@ // npx jest src/api/transform/__tests__/vscode-lm-format.test.ts import { Anthropic } from "@anthropic-ai/sdk" -import * as vscode from "vscode" -import { convertToVsCodeLmMessages, convertToAnthropicRole, convertToAnthropicMessage } from "../vscode-lm-format" + +import { convertToVsCodeLmMessages, convertToAnthropicRole } from "../vscode-lm-format" // Mock crypto const mockCrypto = { @@ -29,14 +29,6 @@ interface MockLanguageModelToolResultPart { parts: MockLanguageModelTextPart[] } -type MockMessageContent = MockLanguageModelTextPart | MockLanguageModelToolCallPart | MockLanguageModelToolResultPart - -interface MockLanguageModelChatMessage { - role: string - name?: string - content: MockMessageContent[] -} - // Mock vscode namespace jest.mock("vscode", () => { const LanguageModelChatMessageRole = { @@ -86,174 +78,115 @@ jest.mock("vscode", () => { } }) -describe("vscode-lm-format", () => { - describe("convertToVsCodeLmMessages", () => { - it("should convert simple string messages", () => { - const messages: Anthropic.Messages.MessageParam[] = [ - { role: "user", content: "Hello" }, - { role: "assistant", content: "Hi there" }, - ] - - const result = convertToVsCodeLmMessages(messages) - - expect(result).toHaveLength(2) - expect(result[0].role).toBe("user") - expect((result[0].content[0] as MockLanguageModelTextPart).value).toBe("Hello") - expect(result[1].role).toBe("assistant") - expect((result[1].content[0] as MockLanguageModelTextPart).value).toBe("Hi there") - }) - - it("should handle complex user messages with tool results", () => { - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: [ - { type: "text", text: "Here is the result:" }, - { - type: "tool_result", - tool_use_id: "tool-1", - content: "Tool output", - }, - ], - }, - ] - - const result = convertToVsCodeLmMessages(messages) +describe("convertToVsCodeLmMessages", () => { + it("should convert simple string messages", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { role: "user", content: "Hello" }, + { role: "assistant", content: "Hi there" }, + ] - expect(result).toHaveLength(1) - expect(result[0].role).toBe("user") - expect(result[0].content).toHaveLength(2) - const [toolResult, textContent] = result[0].content as [ - MockLanguageModelToolResultPart, - MockLanguageModelTextPart, - ] - expect(toolResult.type).toBe("tool_result") - expect(textContent.type).toBe("text") - }) + const result = convertToVsCodeLmMessages(messages) - it("should handle complex assistant messages with tool calls", () => { - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "assistant", - content: [ - { type: "text", text: "Let me help you with that." }, - { - type: "tool_use", - id: "tool-1", - name: "calculator", - input: { operation: "add", numbers: [2, 2] }, - }, - ], - }, - ] - - const result = convertToVsCodeLmMessages(messages) - - expect(result).toHaveLength(1) - expect(result[0].role).toBe("assistant") - expect(result[0].content).toHaveLength(2) - const [toolCall, textContent] = result[0].content as [ - MockLanguageModelToolCallPart, - MockLanguageModelTextPart, - ] - expect(toolCall.type).toBe("tool_call") - expect(textContent.type).toBe("text") - }) - - it("should handle image blocks with appropriate placeholders", () => { - const messages: Anthropic.Messages.MessageParam[] = [ - { - role: "user", - content: [ - { type: "text", text: "Look at this:" }, - { - type: "image", - source: { - type: "base64", - media_type: "image/png", - data: "base64data", - }, - }, - ], - }, - ] - - const result = convertToVsCodeLmMessages(messages) - - expect(result).toHaveLength(1) - const imagePlaceholder = result[0].content[1] as MockLanguageModelTextPart - expect(imagePlaceholder.value).toContain("[Image (base64): image/png not supported by VSCode LM API]") - }) + expect(result).toHaveLength(2) + expect(result[0].role).toBe("user") + expect((result[0].content[0] as MockLanguageModelTextPart).value).toBe("Hello") + expect(result[1].role).toBe("assistant") + expect((result[1].content[0] as MockLanguageModelTextPart).value).toBe("Hi there") }) - describe("convertToAnthropicRole", () => { - it("should convert assistant role correctly", () => { - const result = convertToAnthropicRole("assistant" as any) - expect(result).toBe("assistant") - }) - - it("should convert user role correctly", () => { - const result = convertToAnthropicRole("user" as any) - expect(result).toBe("user") - }) - - it("should return null for unknown roles", () => { - const result = convertToAnthropicRole("unknown" as any) - expect(result).toBeNull() - }) + it("should handle complex user messages with tool results", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: [ + { type: "text", text: "Here is the result:" }, + { + type: "tool_result", + tool_use_id: "tool-1", + content: "Tool output", + }, + ], + }, + ] + + const result = convertToVsCodeLmMessages(messages) + + expect(result).toHaveLength(1) + expect(result[0].role).toBe("user") + expect(result[0].content).toHaveLength(2) + const [toolResult, textContent] = result[0].content as [ + MockLanguageModelToolResultPart, + MockLanguageModelTextPart, + ] + expect(toolResult.type).toBe("tool_result") + expect(textContent.type).toBe("text") }) - describe("convertToAnthropicMessage", () => { - it("should convert assistant message with text content", async () => { - const vsCodeMessage = { + it("should handle complex assistant messages with tool calls", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { role: "assistant", - name: "assistant", - content: [new vscode.LanguageModelTextPart("Hello")], - } + content: [ + { type: "text", text: "Let me help you with that." }, + { + type: "tool_use", + id: "tool-1", + name: "calculator", + input: { operation: "add", numbers: [2, 2] }, + }, + ], + }, + ] - const result = await convertToAnthropicMessage(vsCodeMessage as any) + const result = convertToVsCodeLmMessages(messages) - expect(result.role).toBe("assistant") - expect(result.content).toHaveLength(1) - expect(result.content[0]).toEqual({ - type: "text", - text: "Hello", - citations: null, - }) - expect(result.id).toBe("test-uuid") - }) + expect(result).toHaveLength(1) + expect(result[0].role).toBe("assistant") + expect(result[0].content).toHaveLength(2) + const [toolCall, textContent] = result[0].content as [MockLanguageModelToolCallPart, MockLanguageModelTextPart] + expect(toolCall.type).toBe("tool_call") + expect(textContent.type).toBe("text") + }) - it("should convert assistant message with tool calls", async () => { - const vsCodeMessage = { - role: "assistant", - name: "assistant", + it("should handle image blocks with appropriate placeholders", () => { + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", content: [ - new vscode.LanguageModelToolCallPart("call-1", "calculator", { operation: "add", numbers: [2, 2] }), + { type: "text", text: "Look at this:" }, + { + type: "image", + source: { + type: "base64", + media_type: "image/png", + data: "base64data", + }, + }, ], - } + }, + ] - const result = await convertToAnthropicMessage(vsCodeMessage as any) + const result = convertToVsCodeLmMessages(messages) - expect(result.content).toHaveLength(1) - expect(result.content[0]).toEqual({ - type: "tool_use", - id: "call-1", - name: "calculator", - input: { operation: "add", numbers: [2, 2] }, - }) - expect(result.id).toBe("test-uuid") - }) + expect(result).toHaveLength(1) + const imagePlaceholder = result[0].content[1] as MockLanguageModelTextPart + expect(imagePlaceholder.value).toContain("[Image (base64): image/png not supported by VSCode LM API]") + }) +}) - it("should throw error for non-assistant messages", async () => { - const vsCodeMessage = { - role: "user", - name: "user", - content: [new vscode.LanguageModelTextPart("Hello")], - } +describe("convertToAnthropicRole", () => { + it("should convert assistant role correctly", () => { + const result = convertToAnthropicRole("assistant" as any) + expect(result).toBe("assistant") + }) + + it("should convert user role correctly", () => { + const result = convertToAnthropicRole("user" as any) + expect(result).toBe("user") + }) - await expect(convertToAnthropicMessage(vsCodeMessage as any)).rejects.toThrow( - "Roo Code : Only assistant messages are supported.", - ) - }) + it("should return null for unknown roles", () => { + const result = convertToAnthropicRole("unknown" as any) + expect(result).toBeNull() }) }) diff --git a/src/api/transform/bedrock-converse-format.ts b/src/api/transform/bedrock-converse-format.ts index e4dc9eecc85..68d21e4d5bc 100644 --- a/src/api/transform/bedrock-converse-format.ts +++ b/src/api/transform/bedrock-converse-format.ts @@ -1,9 +1,7 @@ import { Anthropic } from "@anthropic-ai/sdk" -import { MessageContent } from "../../shared/api" import { ConversationRole, Message, ContentBlock } from "@aws-sdk/client-bedrock-runtime" -// Import StreamEvent type from bedrock.ts -import { StreamEvent } from "../providers/bedrock" +import { MessageContent } from "../../shared/api" /** * Convert Anthropic messages to Bedrock Converse format @@ -175,51 +173,3 @@ export function convertToBedrockConverseMessages(anthropicMessages: Anthropic.Me } }) } - -/** - * Convert Bedrock Converse stream events to Anthropic message format - */ -export function convertToAnthropicMessage( - streamEvent: StreamEvent, - modelId: string, -): Partial { - // Handle metadata events - if (streamEvent.metadata?.usage) { - return { - id: "", // Bedrock doesn't provide message IDs - type: "message", - role: "assistant", - model: modelId, - usage: { - input_tokens: streamEvent.metadata.usage.inputTokens || 0, - output_tokens: streamEvent.metadata.usage.outputTokens || 0, - cache_creation_input_tokens: null, - cache_read_input_tokens: null, - }, - } - } - - // Handle content blocks - const text = streamEvent.contentBlockStart?.start?.text || streamEvent.contentBlockDelta?.delta?.text - if (text !== undefined) { - return { - type: "message", - role: "assistant", - content: [{ type: "text", text: text, citations: null }], - model: modelId, - } - } - - // Handle message stop - if (streamEvent.messageStop) { - return { - type: "message", - role: "assistant", - stop_reason: streamEvent.messageStop.stopReason || null, - stop_sequence: null, - model: modelId, - } - } - - return {} -} diff --git a/src/api/transform/openai-format.ts b/src/api/transform/openai-format.ts index f421769054f..134f9f2ed6e 100644 --- a/src/api/transform/openai-format.ts +++ b/src/api/transform/openai-format.ts @@ -144,63 +144,3 @@ export function convertToOpenAiMessages( return openAiMessages } - -// Convert OpenAI response to Anthropic format -export function convertToAnthropicMessage( - completion: OpenAI.Chat.Completions.ChatCompletion, -): Anthropic.Messages.Message { - const openAiMessage = completion.choices[0].message - const anthropicMessage: Anthropic.Messages.Message = { - id: completion.id, - type: "message", - role: openAiMessage.role, // always "assistant" - content: [ - { - type: "text", - text: openAiMessage.content || "", - citations: null, - }, - ], - model: completion.model, - stop_reason: (() => { - switch (completion.choices[0].finish_reason) { - case "stop": - return "end_turn" - case "length": - return "max_tokens" - case "tool_calls": - return "tool_use" - case "content_filter": // Anthropic doesn't have an exact equivalent - default: - return null - } - })(), - stop_sequence: null, // which custom stop_sequence was generated, if any (not applicable if you don't use stop_sequence) - usage: { - input_tokens: completion.usage?.prompt_tokens || 0, - output_tokens: completion.usage?.completion_tokens || 0, - cache_creation_input_tokens: null, - cache_read_input_tokens: null, - }, - } - - if (openAiMessage.tool_calls && openAiMessage.tool_calls.length > 0) { - anthropicMessage.content.push( - ...openAiMessage.tool_calls.map((toolCall): Anthropic.ToolUseBlock => { - let parsedInput = {} - try { - parsedInput = JSON.parse(toolCall.function.arguments || "{}") - } catch (error) { - console.error("Failed to parse tool arguments:", error) - } - return { - type: "tool_use", - id: toolCall.id, - name: toolCall.function.name, - input: parsedInput, - } - }), - ) - } - return anthropicMessage -} diff --git a/src/api/transform/vscode-lm-format.ts b/src/api/transform/vscode-lm-format.ts index 85c2fc7ba5b..73716cf912d 100644 --- a/src/api/transform/vscode-lm-format.ts +++ b/src/api/transform/vscode-lm-format.ts @@ -155,50 +155,3 @@ export function convertToAnthropicRole(vsCodeLmMessageRole: vscode.LanguageModel return null } } - -export async function convertToAnthropicMessage( - vsCodeLmMessage: vscode.LanguageModelChatMessage, -): Promise { - const anthropicRole: string | null = convertToAnthropicRole(vsCodeLmMessage.role) - - if (anthropicRole !== "assistant") { - throw new Error("Roo Code : Only assistant messages are supported.") - } - - return { - id: crypto.randomUUID(), - type: "message", - model: "vscode-lm", - role: anthropicRole, - content: vsCodeLmMessage.content - .map((part): Anthropic.ContentBlock | null => { - if (part instanceof vscode.LanguageModelTextPart) { - return { - type: "text", - text: part.value, - citations: null, - } - } - - if (part instanceof vscode.LanguageModelToolCallPart) { - return { - type: "tool_use", - id: part.callId || crypto.randomUUID(), - name: part.name, - input: asObjectSafe(part.input), - } - } - - return null - }) - .filter((part): part is Anthropic.ContentBlock => part !== null), - stop_reason: null, - stop_sequence: null, - usage: { - input_tokens: 0, - output_tokens: 0, - cache_creation_input_tokens: null, - cache_read_input_tokens: null, - }, - } -} From fc4d50100e9ae36a6de657701cd4d4525526c9e2 Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 22:19:05 -0800 Subject: [PATCH 039/145] Remove console.log --- src/api/providers/anthropic.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/api/providers/anthropic.ts b/src/api/providers/anthropic.ts index 0d4b67b0678..2d1f07f833f 100644 --- a/src/api/providers/anthropic.ts +++ b/src/api/providers/anthropic.ts @@ -131,7 +131,6 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { case "message_start": // Tells us cache reads/writes/input/output. const usage = chunk.message.usage - console.log("usage", usage) yield { type: "usage", From 4dd9972c4130d611432284e249ad82089833acd2 Mon Sep 17 00:00:00 2001 From: Chris Estreich Date: Mon, 24 Feb 2025 22:21:44 -0800 Subject: [PATCH 040/145] Update webview-ui/src/components/settings/ApiOptions.tsx Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com> --- webview-ui/src/components/settings/ApiOptions.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 3761ddce500..0b6a1186562 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -1278,7 +1278,7 @@ const ApiOptions = ({ {anthropicThinkingBudget && ( <>
- Number of tokens Claude is allowed use for its internal reasoning process. + Number of tokens Claude is allowed to use for its internal reasoning process.
Date: Mon, 24 Feb 2025 22:22:07 -0800 Subject: [PATCH 041/145] Update webview-ui/src/components/settings/SettingsView.tsx Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com> --- webview-ui/src/components/settings/SettingsView.tsx | 1 - 1 file changed, 1 deletion(-) diff --git a/webview-ui/src/components/settings/SettingsView.tsx b/webview-ui/src/components/settings/SettingsView.tsx index 29384826acf..761e8565214 100644 --- a/webview-ui/src/components/settings/SettingsView.tsx +++ b/webview-ui/src/components/settings/SettingsView.tsx @@ -135,7 +135,6 @@ const SettingsView = forwardRef(({ onDone }, }, []) const handleSubmit = () => { - console.log("handleSubmit", apiConfiguration) const apiValidationResult = validateApiConfiguration(apiConfiguration) const modelIdValidationResult = validateModelId( From 6c6087e9a2d7c200cdbd19599b328b88ba9af45e Mon Sep 17 00:00:00 2001 From: cte Date: Mon, 24 Feb 2025 23:53:01 -0800 Subject: [PATCH 042/145] v3.7.3 --- .changeset/real-zebras-remain.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/real-zebras-remain.md diff --git a/.changeset/real-zebras-remain.md b/.changeset/real-zebras-remain.md new file mode 100644 index 00000000000..94a1dc5d364 --- /dev/null +++ b/.changeset/real-zebras-remain.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +v3.7.3 From 2655bebc8fb9abc1ebd923249286bf92b40e72ad Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 25 Feb 2025 07:57:37 +0000 Subject: [PATCH 043/145] changeset version bump --- .changeset/real-zebras-remain.md | 5 ----- CHANGELOG.md | 6 ++++++ package-lock.json | 4 ++-- package.json | 2 +- 4 files changed, 9 insertions(+), 8 deletions(-) delete mode 100644 .changeset/real-zebras-remain.md diff --git a/.changeset/real-zebras-remain.md b/.changeset/real-zebras-remain.md deleted file mode 100644 index 94a1dc5d364..00000000000 --- a/.changeset/real-zebras-remain.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -v3.7.3 diff --git a/CHANGELOG.md b/CHANGELOG.md index 381e0907eb1..df704302268 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Roo Code Changelog +## 3.7.3 + +### Patch Changes + +- v3.7.3 + ## [3.7.2] - Fix computer use and prompt caching for OpenRouter's `anthropic/claude-3.7-sonnet:beta` (thanks @cte!) diff --git a/package-lock.json b/package-lock.json index ba152e81f35..71e3aa8648b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "roo-cline", - "version": "3.7.2", + "version": "3.7.3", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "roo-cline", - "version": "3.7.2", + "version": "3.7.3", "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/sdk": "^0.37.0", diff --git a/package.json b/package.json index 651de2b764f..5166affcaaa 100644 --- a/package.json +++ b/package.json @@ -3,7 +3,7 @@ "displayName": "Roo Code (prev. Roo Cline)", "description": "An AI-powered autonomous coding agent that lives in your editor.", "publisher": "RooVeterinaryInc", - "version": "3.7.2", + "version": "3.7.3", "icon": "assets/icons/rocket.png", "galleryBanner": { "color": "#617A91", From 8ba179d3b00fbdb528738b996fad6e88eab9fd43 Mon Sep 17 00:00:00 2001 From: R00-B0T Date: Tue, 25 Feb 2025 07:58:01 +0000 Subject: [PATCH 044/145] Updating CHANGELOG.md format --- CHANGELOG.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index df704302268..f3a0db08a2a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,6 @@ # Roo Code Changelog -## 3.7.3 - -### Patch Changes +## [3.7.3] - v3.7.3 From be7f71c49890d7176c19e7b14da16493557437e5 Mon Sep 17 00:00:00 2001 From: Chris Estreich Date: Mon, 24 Feb 2025 23:58:45 -0800 Subject: [PATCH 045/145] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f3a0db08a2a..fc9b387f7f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ ## [3.7.3] -- v3.7.3 +- Support for Sonnet 3.7 "Thinking". ## [3.7.2] From 613f74b717247ff4112452ff9152207418c4e71a Mon Sep 17 00:00:00 2001 From: Chris Estreich Date: Tue, 25 Feb 2025 00:05:04 -0800 Subject: [PATCH 046/145] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fc9b387f7f2..f72dd8a0ca8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ ## [3.7.3] -- Support for Sonnet 3.7 "Thinking". +- Support for ["Thinking"](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking) Sonnet 3.7 when using the Anthropic provider. ## [3.7.2] From c6092ce170492ae99bbb3fffe39706212474f17f Mon Sep 17 00:00:00 2001 From: cte Date: Tue, 25 Feb 2025 01:03:12 -0800 Subject: [PATCH 047/145] Properly reset thinking setting when changing profiles --- .../src/components/settings/ApiOptions.tsx | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 0b6a1186562..8c2f382db6f 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -68,15 +68,17 @@ const ApiOptions = ({ const [lmStudioModels, setLmStudioModels] = useState([]) const [vsCodeLmModels, setVsCodeLmModels] = useState([]) const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl) - const [anthropicThinkingBudget, setAnthropicThinkingBudget] = useState(apiConfiguration?.anthropicThinking) const [azureApiVersionSelected, setAzureApiVersionSelected] = useState(!!apiConfiguration?.azureApiVersion) const [openRouterBaseUrlSelected, setOpenRouterBaseUrlSelected] = useState(!!apiConfiguration?.openRouterBaseUrl) const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false) - const inputEventTransform = (event: E) => (event as { target: HTMLInputElement })?.target?.value as any + const anthropicThinkingBudget = apiConfiguration?.anthropicThinking + const noTransform = (value: T) => value + const inputEventTransform = (event: E) => (event as { target: HTMLInputElement })?.target?.value as any const dropdownEventTransform = (event: DropdownOption | string | undefined) => (typeof event == "string" ? event : event?.value) as T + const handleInputChange = useCallback( ( field: K, @@ -107,8 +109,10 @@ const ApiOptions = ({ 250, [selectedProvider, apiConfiguration?.ollamaBaseUrl, apiConfiguration?.lmStudioBaseUrl], ) + const handleMessage = useCallback((event: MessageEvent) => { const message: ExtensionMessage = event.data + if (message.type === "ollamaModels" && Array.isArray(message.ollamaModels)) { const newModels = message.ollamaModels setOllamaModels(newModels) @@ -120,6 +124,7 @@ const ApiOptions = ({ setVsCodeLmModels(newModels) } }, []) + useEvent("message", handleMessage) const createDropdown = (models: Record) => { @@ -130,6 +135,7 @@ const ApiOptions = ({ label: modelId, })), ] + return ( { - const budget = checked ? 16_384 : undefined - setAnthropicThinkingBudget(budget) - setApiConfigurationField("anthropicThinking", budget) - }}> + onChange={(checked) => + setApiConfigurationField("anthropicThinking", checked ? 16_384 : undefined) + }> Thinking? {anthropicThinkingBudget && ( @@ -1286,11 +1290,7 @@ const ApiOptions = ({ max={anthropicModels["claude-3-7-sonnet-20250219"].maxTokens - 1} step={1024} value={[anthropicThinkingBudget]} - onValueChange={(value) => { - const budget = value[0] - setAnthropicThinkingBudget(budget) - setApiConfigurationField("anthropicThinking", budget) - }} + onValueChange={(value) => setApiConfigurationField("anthropicThinking", value[0])} />
{anthropicThinkingBudget}
From 3621deb725937a5b97fcc62a49c7e464b67501f1 Mon Sep 17 00:00:00 2001 From: cte Date: Tue, 25 Feb 2025 01:04:31 -0800 Subject: [PATCH 048/145] Add changeset --- .changeset/little-deers-occur.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/little-deers-occur.md diff --git a/.changeset/little-deers-occur.md b/.changeset/little-deers-occur.md new file mode 100644 index 00000000000..e699d277c1d --- /dev/null +++ b/.changeset/little-deers-occur.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Fix a bug that prevented the "Thinking" setting from properly updating when switching profiles. From 53ead4c886b236db2d744e1c5148112a246aff18 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 25 Feb 2025 14:19:17 +0000 Subject: [PATCH 049/145] changeset version bump --- .changeset/little-deers-occur.md | 5 ----- CHANGELOG.md | 6 ++++++ package-lock.json | 4 ++-- package.json | 2 +- 4 files changed, 9 insertions(+), 8 deletions(-) delete mode 100644 .changeset/little-deers-occur.md diff --git a/.changeset/little-deers-occur.md b/.changeset/little-deers-occur.md deleted file mode 100644 index e699d277c1d..00000000000 --- a/.changeset/little-deers-occur.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -Fix a bug that prevented the "Thinking" setting from properly updating when switching profiles. diff --git a/CHANGELOG.md b/CHANGELOG.md index f72dd8a0ca8..2586e8540fe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Roo Code Changelog +## 3.7.4 + +### Patch Changes + +- Fix a bug that prevented the "Thinking" setting from properly updating when switching profiles. + ## [3.7.3] - Support for ["Thinking"](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking) Sonnet 3.7 when using the Anthropic provider. diff --git a/package-lock.json b/package-lock.json index 71e3aa8648b..4bcdf8136df 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "roo-cline", - "version": "3.7.3", + "version": "3.7.4", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "roo-cline", - "version": "3.7.3", + "version": "3.7.4", "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/sdk": "^0.37.0", diff --git a/package.json b/package.json index 5166affcaaa..84bec2645a5 100644 --- a/package.json +++ b/package.json @@ -3,7 +3,7 @@ "displayName": "Roo Code (prev. Roo Cline)", "description": "An AI-powered autonomous coding agent that lives in your editor.", "publisher": "RooVeterinaryInc", - "version": "3.7.3", + "version": "3.7.4", "icon": "assets/icons/rocket.png", "galleryBanner": { "color": "#617A91", From df4d2a549c6ac1464de40cfc8bd8340da5ead9d1 Mon Sep 17 00:00:00 2001 From: R00-B0T Date: Tue, 25 Feb 2025 14:19:45 +0000 Subject: [PATCH 050/145] Updating CHANGELOG.md format --- CHANGELOG.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2586e8540fe..52fb7540976 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,6 @@ # Roo Code Changelog -## 3.7.4 - -### Patch Changes +## [3.7.4] - Fix a bug that prevented the "Thinking" setting from properly updating when switching profiles. From 9b267e9afc8cc4308e5decfa0a9c312caaafc4bf Mon Sep 17 00:00:00 2001 From: Aitor Oses Date: Tue, 25 Feb 2025 15:30:10 +0100 Subject: [PATCH 051/145] Add Vertex AI prompt caching support and enhance streaming handling - Implemented comprehensive prompt caching strategy for Vertex AI models - Added support for caching system prompts and user message text blocks - Enhanced stream processing to handle cache-related usage metrics - Updated model configurations to enable prompt caching - Improved type definitions for Vertex AI message handling --- src/api/providers/__tests__/vertex.test.ts | 215 ++++++++++++++++++- src/api/providers/vertex.ts | 235 ++++++++++++++++++--- src/shared/api.ts | 20 +- 3 files changed, 435 insertions(+), 35 deletions(-) diff --git a/src/api/providers/__tests__/vertex.test.ts b/src/api/providers/__tests__/vertex.test.ts index ebe60ba0c68..6e81fd771b7 100644 --- a/src/api/providers/__tests__/vertex.test.ts +++ b/src/api/providers/__tests__/vertex.test.ts @@ -4,6 +4,7 @@ import { Anthropic } from "@anthropic-ai/sdk" import { AnthropicVertex } from "@anthropic-ai/vertex-sdk" import { VertexHandler } from "../vertex" +import { ApiStreamChunk } from "../../transform/stream" // Mock Vertex SDK jest.mock("@anthropic-ai/vertex-sdk", () => ({ @@ -128,7 +129,7 @@ describe("VertexHandler", () => { ;(handler["client"].messages as any).create = mockCreate const stream = handler.createMessage(systemPrompt, mockMessages) - const chunks = [] + const chunks: ApiStreamChunk[] = [] for await (const chunk of stream) { chunks.push(chunk) @@ -158,8 +159,29 @@ describe("VertexHandler", () => { model: "claude-3-5-sonnet-v2@20241022", max_tokens: 8192, temperature: 0, - system: systemPrompt, - messages: mockMessages, + system: [ + { + type: "text", + text: "You are a helpful assistant", + cache_control: { type: "ephemeral" }, + }, + ], + messages: [ + { + role: "user", + content: [ + { + type: "text", + text: "Hello", + cache_control: { type: "ephemeral" }, + }, + ], + }, + { + role: "assistant", + content: "Hi there!", + }, + ], stream: true, }) }) @@ -196,7 +218,7 @@ describe("VertexHandler", () => { ;(handler["client"].messages as any).create = mockCreate const stream = handler.createMessage(systemPrompt, mockMessages) - const chunks = [] + const chunks: ApiStreamChunk[] = [] for await (const chunk of stream) { chunks.push(chunk) @@ -230,6 +252,183 @@ describe("VertexHandler", () => { } }).rejects.toThrow("Vertex API error") }) + + it("should handle prompt caching for supported models", async () => { + const mockStream = [ + { + type: "message_start", + message: { + usage: { + input_tokens: 10, + output_tokens: 0, + cache_creation_input_tokens: 3, + cache_read_input_tokens: 2, + }, + }, + }, + { + type: "content_block_start", + index: 0, + content_block: { + type: "text", + text: "Hello", + }, + }, + { + type: "content_block_delta", + delta: { + type: "text_delta", + text: " world!", + }, + }, + { + type: "message_delta", + usage: { + output_tokens: 5, + }, + }, + ] + + const asyncIterator = { + async *[Symbol.asyncIterator]() { + for (const chunk of mockStream) { + yield chunk + } + }, + } + + const mockCreate = jest.fn().mockResolvedValue(asyncIterator) + ;(handler["client"].messages as any).create = mockCreate + + const stream = handler.createMessage(systemPrompt, [ + { + role: "user", + content: "First message", + }, + { + role: "assistant", + content: "Response", + }, + { + role: "user", + content: "Second message", + }, + ]) + + const chunks: ApiStreamChunk[] = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + // Verify usage information + const usageChunks = chunks.filter((chunk) => chunk.type === "usage") + expect(usageChunks).toHaveLength(2) + expect(usageChunks[0]).toEqual({ + type: "usage", + inputTokens: 10, + outputTokens: 0, + cacheWriteTokens: 3, + cacheReadTokens: 2, + }) + expect(usageChunks[1]).toEqual({ + type: "usage", + inputTokens: 0, + outputTokens: 5, + }) + + // Verify text content + const textChunks = chunks.filter((chunk) => chunk.type === "text") + expect(textChunks).toHaveLength(2) + expect(textChunks[0].text).toBe("Hello") + expect(textChunks[1].text).toBe(" world!") + + // Verify cache control was added correctly + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + system: [ + { + type: "text", + text: "You are a helpful assistant", + cache_control: { type: "ephemeral" }, + }, + ], + messages: [ + expect.objectContaining({ + role: "user", + content: [ + { + type: "text", + text: "First message", + cache_control: { type: "ephemeral" }, + }, + ], + }), + expect.objectContaining({ + role: "assistant", + content: "Response", + }), + expect.objectContaining({ + role: "user", + content: [ + { + type: "text", + text: "Second message", + cache_control: { type: "ephemeral" }, + }, + ], + }), + ], + }), + ) + }) + + it("should handle cache-related usage metrics", async () => { + const mockStream = [ + { + type: "message_start", + message: { + usage: { + input_tokens: 10, + output_tokens: 0, + cache_creation_input_tokens: 5, + cache_read_input_tokens: 3, + }, + }, + }, + { + type: "content_block_start", + index: 0, + content_block: { + type: "text", + text: "Hello", + }, + }, + ] + + const asyncIterator = { + async *[Symbol.asyncIterator]() { + for (const chunk of mockStream) { + yield chunk + } + }, + } + + const mockCreate = jest.fn().mockResolvedValue(asyncIterator) + ;(handler["client"].messages as any).create = mockCreate + + const stream = handler.createMessage(systemPrompt, mockMessages) + const chunks: ApiStreamChunk[] = [] + + for await (const chunk of stream) { + chunks.push(chunk) + } + + // Check for cache-related metrics in usage chunk + const usageChunks = chunks.filter((chunk) => chunk.type === "usage") + expect(usageChunks.length).toBeGreaterThan(0) + expect(usageChunks[0]).toHaveProperty("cacheWriteTokens", 5) + expect(usageChunks[0]).toHaveProperty("cacheReadTokens", 3) + }) }) describe("completePrompt", () => { @@ -240,7 +439,13 @@ describe("VertexHandler", () => { model: "claude-3-5-sonnet-v2@20241022", max_tokens: 8192, temperature: 0, - messages: [{ role: "user", content: "Test prompt" }], + system: "", + messages: [ + { + role: "user", + content: [{ type: "text", text: "Test prompt", cache_control: { type: "ephemeral" } }], + }, + ], stream: false, }) }) diff --git a/src/api/providers/vertex.ts b/src/api/providers/vertex.ts index 0ee22e5893d..70562766c3b 100644 --- a/src/api/providers/vertex.ts +++ b/src/api/providers/vertex.ts @@ -1,9 +1,86 @@ import { Anthropic } from "@anthropic-ai/sdk" import { AnthropicVertex } from "@anthropic-ai/vertex-sdk" +import { Stream as AnthropicStream } from "@anthropic-ai/sdk/streaming" import { ApiHandler, SingleCompletionHandler } from "../" import { ApiHandlerOptions, ModelInfo, vertexDefaultModelId, VertexModelId, vertexModels } from "../../shared/api" import { ApiStream } from "../transform/stream" +// Types for Vertex SDK + +/** + * Vertex API has specific limitations for prompt caching: + * 1. Maximum of 4 blocks can have cache_control + * 2. Only text blocks can be cached (images and other content types cannot) + * 3. Cache control can only be applied to user messages, not assistant messages + * + * Our caching strategy: + * - Cache the system prompt (1 block) + * - Cache the last text block of the second-to-last user message (1 block) + * - Cache the last text block of the last user message (1 block) + * This ensures we stay under the 4-block limit while maintaining effective caching + * for the most relevant context. + */ + +interface VertexTextBlock { + type: "text" + text: string + cache_control?: { type: "ephemeral" } +} + +interface VertexImageBlock { + type: "image" + source: { + type: "base64" + media_type: "image/jpeg" | "image/png" | "image/gif" | "image/webp" + data: string + } +} + +type VertexContentBlock = VertexTextBlock | VertexImageBlock + +interface VertexUsage { + input_tokens?: number + output_tokens?: number + cache_creation_input_tokens?: number + cache_read_input_tokens?: number +} + +interface VertexMessage extends Omit { + content: string | VertexContentBlock[] +} + +interface VertexMessageCreateParams { + model: string + max_tokens: number + temperature: number + system: string | VertexTextBlock[] + messages: VertexMessage[] + stream: boolean +} + +interface VertexMessageResponse { + content: Array<{ type: "text"; text: string }> +} + +interface VertexMessageStreamEvent { + type: "message_start" | "message_delta" | "content_block_start" | "content_block_delta" + message?: { + usage: VertexUsage + } + usage?: { + output_tokens: number + } + content_block?: { + type: "text" + text: string + } + index?: number + delta?: { + type: "text_delta" + text: string + } +} + // https://docs.anthropic.com/en/api/claude-on-vertex-ai export class VertexHandler implements ApiHandler, SingleCompletionHandler { private options: ApiHandlerOptions @@ -18,37 +95,120 @@ export class VertexHandler implements ApiHandler, SingleCompletionHandler { }) } + private formatMessageForCache(message: Anthropic.Messages.MessageParam, shouldCache: boolean): VertexMessage { + // Assistant messages are kept as-is since they can't be cached + if (message.role === "assistant") { + return message as VertexMessage + } + + // For string content, we convert to array format with optional cache control + if (typeof message.content === "string") { + return { + ...message, + content: [ + { + type: "text" as const, + text: message.content, + // For string content, we only have one block so it's always the last + ...(shouldCache && { cache_control: { type: "ephemeral" } }), + }, + ], + } + } + + // For array content, find the last text block index once before mapping + const lastTextBlockIndex = message.content.reduce( + (lastIndex, content, index) => (content.type === "text" ? index : lastIndex), + -1, + ) + + // Then use this pre-calculated index in the map function + return { + ...message, + content: message.content.map((content, contentIndex) => { + // Images and other non-text content are passed through unchanged + if (content.type === "image") { + return content as VertexImageBlock + } + + // Check if this is the last text block using our pre-calculated index + const isLastTextBlock = contentIndex === lastTextBlockIndex + + return { + type: "text" as const, + text: (content as { text: string }).text, + ...(shouldCache && isLastTextBlock && { cache_control: { type: "ephemeral" } }), + } + }), + } + } + async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { - const stream = await this.client.messages.create({ - model: this.getModel().id, - max_tokens: this.getModel().info.maxTokens || 8192, + const model = this.getModel() + const useCache = model.info.supportsPromptCache + + // Find indices of user messages that we want to cache + // We only cache the last two user messages to stay within the 4-block limit + // (1 block for system + 1 block each for last two user messages = 3 total) + const userMsgIndices = useCache + ? messages.reduce((acc, msg, i) => (msg.role === "user" ? [...acc, i] : acc), [] as number[]) + : [] + const lastUserMsgIndex = userMsgIndices[userMsgIndices.length - 1] ?? -1 + const secondLastMsgUserIndex = userMsgIndices[userMsgIndices.length - 2] ?? -1 + + // Create the stream with appropriate caching configuration + const params = { + model: model.id, + max_tokens: model.info.maxTokens || 8192, temperature: this.options.modelTemperature ?? 0, - system: systemPrompt, - messages, + // Cache the system prompt if caching is enabled + system: useCache + ? [ + { + text: systemPrompt, + type: "text" as const, + cache_control: { type: "ephemeral" }, + }, + ] + : systemPrompt, + messages: messages.map((message, index) => { + // Only cache the last two user messages + const shouldCache = useCache && (index === lastUserMsgIndex || index === secondLastMsgUserIndex) + return this.formatMessageForCache(message, shouldCache) + }), stream: true, - }) + } + + const stream = (await this.client.messages.create( + params as Anthropic.Messages.MessageCreateParamsStreaming, + )) as unknown as AnthropicStream + + // Process the stream chunks for await (const chunk of stream) { switch (chunk.type) { - case "message_start": - const usage = chunk.message.usage + case "message_start": { + const usage = chunk.message!.usage yield { type: "usage", inputTokens: usage.input_tokens || 0, outputTokens: usage.output_tokens || 0, + cacheWriteTokens: usage.cache_creation_input_tokens, + cacheReadTokens: usage.cache_read_input_tokens, } break - case "message_delta": + } + case "message_delta": { yield { type: "usage", inputTokens: 0, - outputTokens: chunk.usage.output_tokens || 0, + outputTokens: chunk.usage!.output_tokens || 0, } break - - case "content_block_start": - switch (chunk.content_block.type) { - case "text": - if (chunk.index > 0) { + } + case "content_block_start": { + switch (chunk.content_block!.type) { + case "text": { + if (chunk.index! > 0) { yield { type: "text", text: "\n", @@ -56,21 +216,25 @@ export class VertexHandler implements ApiHandler, SingleCompletionHandler { } yield { type: "text", - text: chunk.content_block.text, + text: chunk.content_block!.text, } break + } } break - case "content_block_delta": - switch (chunk.delta.type) { - case "text_delta": + } + case "content_block_delta": { + switch (chunk.delta!.type) { + case "text_delta": { yield { type: "text", - text: chunk.delta.text, + text: chunk.delta!.text, } break + } } break + } } } } @@ -86,13 +250,34 @@ export class VertexHandler implements ApiHandler, SingleCompletionHandler { async completePrompt(prompt: string): Promise { try { - const response = await this.client.messages.create({ - model: this.getModel().id, - max_tokens: this.getModel().info.maxTokens || 8192, + const model = this.getModel() + const useCache = model.info.supportsPromptCache + + const params = { + model: model.id, + max_tokens: model.info.maxTokens || 8192, temperature: this.options.modelTemperature ?? 0, - messages: [{ role: "user", content: prompt }], + system: "", // No system prompt needed for single completions + messages: [ + { + role: "user", + content: useCache + ? [ + { + type: "text" as const, + text: prompt, + cache_control: { type: "ephemeral" }, + }, + ] + : prompt, + }, + ], stream: false, - }) + } + + const response = (await this.client.messages.create( + params as Anthropic.Messages.MessageCreateParamsNonStreaming, + )) as unknown as VertexMessageResponse const content = response.content[0] if (content.type === "text") { diff --git a/src/shared/api.ts b/src/shared/api.ts index cea760c7760..95399cca4aa 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -435,41 +435,51 @@ export const vertexModels = { contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, - supportsPromptCache: false, + supportsPromptCache: true, inputPrice: 3.0, outputPrice: 15.0, + cacheWritesPrice: 3.75, + cacheReadsPrice: 0.3, }, "claude-3-5-sonnet@20240620": { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsPromptCache: false, + supportsPromptCache: true, inputPrice: 3.0, outputPrice: 15.0, + cacheWritesPrice: 3.75, + cacheReadsPrice: 0.3, }, "claude-3-5-haiku@20241022": { maxTokens: 8192, contextWindow: 200_000, supportsImages: false, - supportsPromptCache: false, + supportsPromptCache: true, inputPrice: 1.0, outputPrice: 5.0, + cacheWritesPrice: 1.25, + cacheReadsPrice: 0.1, }, "claude-3-opus@20240229": { maxTokens: 4096, contextWindow: 200_000, supportsImages: true, - supportsPromptCache: false, + supportsPromptCache: true, inputPrice: 15.0, outputPrice: 75.0, + cacheWritesPrice: 18.75, + cacheReadsPrice: 1.5, }, "claude-3-haiku@20240307": { maxTokens: 4096, contextWindow: 200_000, supportsImages: true, - supportsPromptCache: false, + supportsPromptCache: true, inputPrice: 0.25, outputPrice: 1.25, + cacheWritesPrice: 0.3, + cacheReadsPrice: 0.03, }, } as const satisfies Record From d5b796263dd160d4f6fe0ccc0ad8c464c97ec01f Mon Sep 17 00:00:00 2001 From: System233 Date: Wed, 26 Feb 2025 02:44:57 +0800 Subject: [PATCH 052/145] Add a combobox component with auto-complete functionality --- webview-ui/src/__mocks__/lucide-react.ts | 6 + .../src/components/ui/combobox-primitive.tsx | 522 ++++++++++++++++++ webview-ui/src/components/ui/combobox.tsx | 177 ++++++ webview-ui/src/components/ui/input-base.tsx | 157 ++++++ 4 files changed, 862 insertions(+) create mode 100644 webview-ui/src/__mocks__/lucide-react.ts create mode 100644 webview-ui/src/components/ui/combobox-primitive.tsx create mode 100644 webview-ui/src/components/ui/combobox.tsx create mode 100644 webview-ui/src/components/ui/input-base.tsx diff --git a/webview-ui/src/__mocks__/lucide-react.ts b/webview-ui/src/__mocks__/lucide-react.ts new file mode 100644 index 00000000000..d85cd25d6a7 --- /dev/null +++ b/webview-ui/src/__mocks__/lucide-react.ts @@ -0,0 +1,6 @@ +import React from "react" + +export const Check = () => React.createElement("div") +export const ChevronsUpDown = () => React.createElement("div") +export const Loader = () => React.createElement("div") +export const X = () => React.createElement("div") diff --git a/webview-ui/src/components/ui/combobox-primitive.tsx b/webview-ui/src/components/ui/combobox-primitive.tsx new file mode 100644 index 00000000000..13bad87abac --- /dev/null +++ b/webview-ui/src/components/ui/combobox-primitive.tsx @@ -0,0 +1,522 @@ +/* eslint-disable react/jsx-pascal-case */ +"use client" + +import * as React from "react" +import { composeEventHandlers } from "@radix-ui/primitive" +import { useComposedRefs } from "@radix-ui/react-compose-refs" +import * as PopoverPrimitive from "@radix-ui/react-popover" +import { Primitive } from "@radix-ui/react-primitive" +import * as RovingFocusGroupPrimitive from "@radix-ui/react-roving-focus" +import { useControllableState } from "@radix-ui/react-use-controllable-state" +import { Command as CommandPrimitive } from "cmdk" + +export type ComboboxContextProps = { + inputValue: string + onInputValueChange: (inputValue: string, reason: "inputChange" | "itemSelect" | "clearClick") => void + onInputBlur?: (e: React.FocusEvent) => void + open: boolean + onOpenChange: (open: boolean) => void + currentTabStopId: string | null + onCurrentTabStopIdChange: (currentTabStopId: string | null) => void + inputRef: React.RefObject + tagGroupRef: React.RefObject> + disabled?: boolean + required?: boolean +} & ( + | Required> + | Required> +) + +const ComboboxContext = React.createContext({ + type: "single", + value: "", + onValueChange: () => {}, + inputValue: "", + onInputValueChange: () => {}, + onInputBlur: () => {}, + open: false, + onOpenChange: () => {}, + currentTabStopId: null, + onCurrentTabStopIdChange: () => {}, + inputRef: { current: null }, + tagGroupRef: { current: null }, + disabled: false, + required: false, +}) + +export const useComboboxContext = () => React.useContext(ComboboxContext) + +export type ComboboxType = "single" | "multiple" + +export interface ComboboxBaseProps + extends React.ComponentProps, + Omit, "value" | "defaultValue" | "onValueChange"> { + type?: ComboboxType | undefined + inputValue?: string + defaultInputValue?: string + onInputValueChange?: (inputValue: string, reason: "inputChange" | "itemSelect" | "clearClick") => void + onInputBlur?: (e: React.FocusEvent) => void + disabled?: boolean + required?: boolean +} + +export type ComboboxValue = T extends "single" + ? string + : T extends "multiple" + ? string[] + : never + +export interface ComboboxSingleProps { + type: "single" + value?: string + defaultValue?: string + onValueChange?: (value: string) => void +} + +export interface ComboboxMultipleProps { + type: "multiple" + value?: string[] + defaultValue?: string[] + onValueChange?: (value: string[]) => void +} + +export type ComboboxProps = ComboboxBaseProps & (ComboboxSingleProps | ComboboxMultipleProps) + +export const Combobox = React.forwardRef( + ( + { + type = "single" as T, + open: openProp, + onOpenChange, + defaultOpen, + modal, + children, + value: valueProp, + defaultValue, + onValueChange, + inputValue: inputValueProp, + defaultInputValue, + onInputValueChange, + onInputBlur, + disabled, + required, + ...props + }: ComboboxProps, + ref: React.ForwardedRef>, + ) => { + const [value = type === "multiple" ? [] : "", setValue] = useControllableState>({ + prop: valueProp as ComboboxValue, + defaultProp: defaultValue as ComboboxValue, + onChange: onValueChange as (value: ComboboxValue) => void, + }) + const [inputValue = "", setInputValue] = useControllableState({ + prop: inputValueProp, + defaultProp: defaultInputValue, + }) + const [open = false, setOpen] = useControllableState({ + prop: openProp, + defaultProp: defaultOpen, + onChange: onOpenChange, + }) + const [currentTabStopId, setCurrentTabStopId] = React.useState(null) + const inputRef = React.useRef(null) + const tagGroupRef = React.useRef>(null) + + const handleInputValueChange: ComboboxContextProps["onInputValueChange"] = React.useCallback( + (inputValue, reason) => { + setInputValue(inputValue) + onInputValueChange?.(inputValue, reason) + }, + [setInputValue, onInputValueChange], + ) + + return ( + + + + {children} + {!open && + + + ) + }, +) +Combobox.displayName = "Combobox" + +export const ComboboxTagGroup = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>((props, ref) => { + const { currentTabStopId, onCurrentTabStopIdChange, tagGroupRef, type } = useComboboxContext() + + if (type !== "multiple") { + throw new Error(' should only be used when type is "multiple"') + } + + const composedRefs = useComposedRefs(ref, tagGroupRef) + + return ( + onCurrentTabStopIdChange(null)} + {...props} + /> + ) +}) +ComboboxTagGroup.displayName = "ComboboxTagGroup" + +export interface ComboboxTagGroupItemProps + extends React.ComponentPropsWithoutRef { + value: string + disabled?: boolean +} + +const ComboboxTagGroupItemContext = React.createContext>({ + value: "", + disabled: false, +}) + +const useComboboxTagGroupItemContext = () => React.useContext(ComboboxTagGroupItemContext) + +export const ComboboxTagGroupItem = React.forwardRef< + React.ElementRef, + ComboboxTagGroupItemProps +>(({ onClick, onKeyDown, value: valueProp, disabled, ...props }, ref) => { + const { value, onValueChange, inputRef, currentTabStopId, type } = useComboboxContext() + + if (type !== "multiple") { + throw new Error(' should only be used when type is "multiple"') + } + + const lastItemValue = value.at(-1) + + return ( + + { + if (event.key === "Escape") { + inputRef.current?.focus() + } + if (event.key === "ArrowUp" || event.key === "ArrowDown") { + event.preventDefault() + inputRef.current?.focus() + } + if (event.key === "ArrowRight" && currentTabStopId === lastItemValue) { + inputRef.current?.focus() + } + if (event.key === "Backspace" || event.key === "Delete") { + onValueChange(value.filter((v) => v !== currentTabStopId)) + inputRef.current?.focus() + } + })} + onClick={composeEventHandlers(onClick, () => disabled && inputRef.current?.focus())} + tabStopId={valueProp} + focusable={!disabled} + data-disabled={disabled} + active={valueProp === lastItemValue} + {...props} + /> + + ) +}) +ComboboxTagGroupItem.displayName = "ComboboxTagGroupItem" + +export const ComboboxTagGroupItemRemove = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ onClick, ...props }, ref) => { + const { value, onValueChange, type } = useComboboxContext() + + if (type !== "multiple") { + throw new Error(' should only be used when type is "multiple"') + } + + const { value: valueProp, disabled } = useComboboxTagGroupItemContext() + + return ( + onValueChange(value.filter((v) => v !== valueProp)))} + {...props} + /> + ) +}) +ComboboxTagGroupItemRemove.displayName = "ComboboxTagGroupItemRemove" + +export const ComboboxInput = React.forwardRef< + React.ElementRef, + Omit, "value" | "onValueChange"> +>(({ onKeyDown, onMouseDown, onFocus, onBlur, ...props }, ref) => { + const { + type, + inputValue, + onInputValueChange, + onInputBlur, + open, + onOpenChange, + value, + onValueChange, + inputRef, + disabled, + required, + tagGroupRef, + } = useComboboxContext() + + const composedRefs = useComposedRefs(ref, inputRef) + + return ( + { + if (!open) { + onOpenChange(true) + } + // Schedule input value change to the next tick. + setTimeout(() => onInputValueChange(search, "inputChange")) + if (!search && type === "single") { + onValueChange("") + } + }} + onKeyDown={composeEventHandlers(onKeyDown, (event) => { + if (event.key === "ArrowUp" || event.key === "ArrowDown") { + if (!open) { + event.preventDefault() + onOpenChange(true) + } + } + if (type !== "multiple") { + return + } + if (event.key === "ArrowLeft" && !inputValue && value.length) { + tagGroupRef.current?.focus() + } + if (event.key === "Backspace" && !inputValue) { + onValueChange(value.slice(0, -1)) + } + })} + onMouseDown={composeEventHandlers(onMouseDown, () => onOpenChange(!!inputValue || !open))} + onFocus={composeEventHandlers(onFocus, () => onOpenChange(true))} + onBlur={composeEventHandlers(onBlur, (event) => { + if (!event.relatedTarget?.hasAttribute("cmdk-list")) { + onInputBlur?.(event) + } + })} + {...props} + /> + ) +}) +ComboboxInput.displayName = "ComboboxInput" + +export const ComboboxClear = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ onClick, ...props }, ref) => { + const { value, onValueChange, inputValue, onInputValueChange, type } = useComboboxContext() + + const isValueEmpty = type === "single" ? !value : !value.length + + return ( + { + if (type === "single") { + onValueChange("") + } else { + onValueChange([]) + } + onInputValueChange("", "clearClick") + })} + {...props} + /> + ) +}) +ComboboxClear.displayName = "ComboboxClear" + +export const ComboboxTrigger = PopoverPrimitive.Trigger + +export const ComboboxAnchor = PopoverPrimitive.Anchor + +export const ComboboxPortal = PopoverPrimitive.Portal + +export const ComboboxContent = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ children, onOpenAutoFocus, onInteractOutside, ...props }, ref) => ( + event.preventDefault())} + onCloseAutoFocus={composeEventHandlers(onOpenAutoFocus, (event) => event.preventDefault())} + onInteractOutside={composeEventHandlers(onInteractOutside, (event) => { + if (event.target instanceof Element && event.target.hasAttribute("cmdk-input")) { + event.preventDefault() + } + })} + {...props}> + {children} + +)) +ComboboxContent.displayName = "ComboboxContent" + +export const ComboboxEmpty = CommandPrimitive.Empty + +export const ComboboxLoading = CommandPrimitive.Loading + +export interface ComboboxItemProps extends Omit, "value"> { + value: string +} + +const ComboboxItemContext = React.createContext({ isSelected: false }) + +const useComboboxItemContext = () => React.useContext(ComboboxItemContext) + +const findComboboxItemText = (children: React.ReactNode) => { + let text = "" + + React.Children.forEach(children, (child) => { + if (text) { + return + } + + if (React.isValidElement<{ children: React.ReactNode }>(child)) { + if (child.type === ComboboxItemText) { + text = child.props.children as string + } else { + text = findComboboxItemText(child.props.children) + } + } + }) + + return text +} + +export const ComboboxItem = React.forwardRef, ComboboxItemProps>( + ({ value: valueProp, children, onMouseDown, ...props }, ref) => { + const { type, value, onValueChange, onInputValueChange, onOpenChange } = useComboboxContext() + + const inputValue = React.useMemo(() => findComboboxItemText(children), [children]) + + const isSelected = type === "single" ? value === valueProp : value.includes(valueProp) + + return ( + + event.preventDefault())} + onSelect={() => { + if (type === "multiple") { + onValueChange( + value.includes(valueProp) + ? value.filter((v) => v !== valueProp) + : [...value, valueProp], + ) + onInputValueChange("", "itemSelect") + } else { + onValueChange(valueProp) + onInputValueChange(inputValue, "itemSelect") + // Schedule open change to the next tick. + setTimeout(() => onOpenChange(false)) + } + }} + value={inputValue} + {...props}> + {children} + + + ) + }, +) +ComboboxItem.displayName = "ComboboxItem" + +export const ComboboxItemIndicator = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>((props, ref) => { + const { isSelected } = useComboboxItemContext() + + if (!isSelected) { + return null + } + + return +}) +ComboboxItemIndicator.displayName = "ComboboxItemIndicator" + +export interface ComboboxItemTextProps extends React.ComponentPropsWithoutRef { + children: string +} + +export const ComboboxItemText = (props: ComboboxItemTextProps) => +ComboboxItemText.displayName = "ComboboxItemText" + +export const ComboboxGroup = CommandPrimitive.Group + +export const ComboboxSeparator = CommandPrimitive.Separator + +const Root = Combobox +const TagGroup = ComboboxTagGroup +const TagGroupItem = ComboboxTagGroupItem +const TagGroupItemRemove = ComboboxTagGroupItemRemove +const Input = ComboboxInput +const Clear = ComboboxClear +const Trigger = ComboboxTrigger +const Anchor = ComboboxAnchor +const Portal = ComboboxPortal +const Content = ComboboxContent +const Empty = ComboboxEmpty +const Loading = ComboboxLoading +const Item = ComboboxItem +const ItemIndicator = ComboboxItemIndicator +const ItemText = ComboboxItemText +const Group = ComboboxGroup +const Separator = ComboboxSeparator + +export { + Root, + TagGroup, + TagGroupItem, + TagGroupItemRemove, + Input, + Clear, + Trigger, + Anchor, + Portal, + Content, + Empty, + Loading, + Item, + ItemIndicator, + ItemText, + Group, + Separator, +} diff --git a/webview-ui/src/components/ui/combobox.tsx b/webview-ui/src/components/ui/combobox.tsx new file mode 100644 index 00000000000..24b2f7be1f3 --- /dev/null +++ b/webview-ui/src/components/ui/combobox.tsx @@ -0,0 +1,177 @@ +"use client" + +import * as React from "react" +import { Slottable } from "@radix-ui/react-slot" +import { cva } from "class-variance-authority" +import { Check, ChevronsUpDown, Loader, X } from "lucide-react" + +import { cn } from "@/lib/utils" +import * as ComboboxPrimitive from "@/components/ui/combobox-primitive" +import { badgeVariants } from "@/components/ui/badge" +// import * as ComboboxPrimitive from "@/registry/default/ui/combobox-primitive" +import { + InputBase, + InputBaseAdornmentButton, + InputBaseControl, + InputBaseFlexWrapper, + InputBaseInput, +} from "@/components/ui/input-base" + +export const Combobox = ComboboxPrimitive.Root + +const ComboboxInputBase = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ children, ...props }, ref) => ( + + + {children} + + + + + + + + + + + + +)) +ComboboxInputBase.displayName = "ComboboxInputBase" + +export const ComboboxInput = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>((props, ref) => ( + + + + + + + +)) +ComboboxInput.displayName = "ComboboxInput" + +export const ComboboxTagsInput = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ children, ...props }, ref) => ( + + + + {children} + + + + + + + + +)) +ComboboxTagsInput.displayName = "ComboboxTagsInput" + +export const ComboboxTag = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ children, className, ...props }, ref) => ( + + {children} + + + Remove + + +)) +ComboboxTag.displayName = "ComboboxTag" + +export const ComboboxContent = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, align = "start", alignOffset = 0, ...props }, ref) => ( + + + +)) +ComboboxContent.displayName = "ComboboxContent" + +export const ComboboxEmpty = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +ComboboxEmpty.displayName = "ComboboxEmpty" + +export const ComboboxLoading = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + + + +)) +ComboboxLoading.displayName = "ComboboxLoading" + +export const ComboboxGroup = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +ComboboxGroup.displayName = "ComboboxGroup" + +const ComboboxSeparator = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +ComboboxSeparator.displayName = "ComboboxSeparator" + +export const comboboxItemStyle = cva( + "relative flex w-full cursor-pointer select-none items-center rounded-sm px-2 py-1.5 text-sm outline-none data-[disabled=true]:pointer-events-none data-[selected=true]:bg-accent data-[selected=true]:text-vscode-dropdown-foreground data-[disabled=true]:opacity-50", +) + +export const ComboboxItem = React.forwardRef< + React.ElementRef, + Omit, "children"> & + Pick, "children"> +>(({ className, children, ...props }, ref) => ( + + {children} + + + + +)) +ComboboxItem.displayName = "ComboboxItem" diff --git a/webview-ui/src/components/ui/input-base.tsx b/webview-ui/src/components/ui/input-base.tsx new file mode 100644 index 00000000000..9dbda6eb138 --- /dev/null +++ b/webview-ui/src/components/ui/input-base.tsx @@ -0,0 +1,157 @@ +/* eslint-disable react/jsx-no-comment-textnodes */ +/* eslint-disable react/jsx-pascal-case */ +"use client" + +import * as React from "react" +import { composeEventHandlers } from "@radix-ui/primitive" +import { composeRefs } from "@radix-ui/react-compose-refs" +import { Primitive } from "@radix-ui/react-primitive" +import { Slot } from "@radix-ui/react-slot" + +import { cn } from "@/lib/utils" +import { Button } from "./button" + +export type InputBaseContextProps = Pick & { + controlRef: React.RefObject + onFocusedChange: (focused: boolean) => void +} + +const InputBaseContext = React.createContext({ + autoFocus: false, + controlRef: { current: null }, + disabled: false, + onFocusedChange: () => {}, +}) + +const useInputBaseContext = () => React.useContext(InputBaseContext) + +export interface InputBaseProps extends React.ComponentPropsWithoutRef { + autoFocus?: boolean + disabled?: boolean +} + +export const InputBase = React.forwardRef, InputBaseProps>( + ({ autoFocus, disabled, className, onClick, ...props }, ref) => { + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const [focused, setFocused] = React.useState(false) + + const controlRef = React.useRef(null) + + return ( + + { + // Based on MUI's implementation. + // https://github.com/mui/material-ui/blob/master/packages/mui-material/src/InputBase/InputBase.js#L458~L460 + if (controlRef.current && event.currentTarget === event.target) { + controlRef.current.focus() + } + })} + className={cn( + "flex w-full text-vscode-input-foreground border border-vscode-dropdown-border bg-vscode-input-background rounded-xs px-3 py-0.5 text-base transition-colors file:border-0 file:bg-transparent file:text-sm file:font-medium file:text-foreground placeholder:text-muted-foreground focus:outline-0 focus-visible:outline-none focus-visible:border-vscode-focusBorder disabled:cursor-not-allowed disabled:opacity-50", + disabled && "cursor-not-allowed opacity-50", + className, + )} + {...props} + /> + + ) + }, +) +InputBase.displayName = "InputBase" + +export const InputBaseFlexWrapper = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ className, ...props }, ref) => ( + +)) +InputBaseFlexWrapper.displayName = "InputBaseFlexWrapper" + +export const InputBaseControl = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ onFocus, onBlur, ...props }, ref) => { + const { controlRef, autoFocus, disabled, onFocusedChange } = useInputBaseContext() + + return ( + onFocusedChange(true))} + onBlur={composeEventHandlers(onBlur, () => onFocusedChange(false))} + {...{ disabled }} + {...props} + /> + ) +}) +InputBaseControl.displayName = "InputBaseControl" + +export interface InputBaseAdornmentProps extends React.ComponentPropsWithoutRef<"div"> { + asChild?: boolean + disablePointerEvents?: boolean +} + +export const InputBaseAdornment = React.forwardRef, InputBaseAdornmentProps>( + ({ className, disablePointerEvents, asChild, children, ...props }, ref) => { + const Comp = asChild ? Slot : typeof children === "string" ? "p" : "div" + + const isAction = React.isValidElement(children) && children.type === InputBaseAdornmentButton + + return ( + + {children} + + ) + }, +) +InputBaseAdornment.displayName = "InputBaseAdornment" + +export const InputBaseAdornmentButton = React.forwardRef< + React.ElementRef, + React.ComponentPropsWithoutRef +>(({ type = "button", variant = "ghost", size = "icon", disabled: disabledProp, className, ...props }, ref) => { + const { disabled } = useInputBaseContext() + + return ( + - {item?.size && ( + {!!item?.size && item.size > 0 && (
- ( - -) diff --git a/webview-ui/src/components/settings/ModelPicker.tsx b/webview-ui/src/components/settings/ModelPicker.tsx index b21b37ef0f4..8fd6d82daa7 100644 --- a/webview-ui/src/components/settings/ModelPicker.tsx +++ b/webview-ui/src/components/settings/ModelPicker.tsx @@ -1,185 +1,90 @@ import { VSCodeLink } from "@vscode/webview-ui-toolkit/react" -import debounce from "debounce" -import { useMemo, useState, useCallback, useEffect, useRef } from "react" -import { useMount } from "react-use" -import { CaretSortIcon, CheckIcon } from "@radix-ui/react-icons" +import { useMemo, useState, useCallback, useEffect } from "react" -import { cn } from "@/lib/utils" -import { - Button, - Command, - CommandEmpty, - CommandGroup, - CommandInput, - CommandItem, - CommandList, - Popover, - PopoverContent, - PopoverTrigger, -} from "@/components/ui" - -import { useExtensionState } from "../../context/ExtensionStateContext" -import { vscode } from "../../utils/vscode" import { normalizeApiConfiguration } from "./ApiOptions" import { ModelInfoView } from "./ModelInfoView" - -type ModelProvider = "glama" | "openRouter" | "unbound" | "requesty" | "openAi" - -type ModelKeys = `${T}Models` -type ConfigKeys = `${T}ModelId` -type InfoKeys = `${T}ModelInfo` -type RefreshMessageType = `refresh${Capitalize}Models` - -interface ModelPickerProps { - defaultModelId: string - modelsKey: ModelKeys - configKey: ConfigKeys - infoKey: InfoKeys - refreshMessageType: RefreshMessageType - refreshValues?: Record +import { ApiConfiguration, ModelInfo } from "../../../../src/shared/api" +import { Combobox, ComboboxContent, ComboboxEmpty, ComboboxInput, ComboboxItem } from "../ui/combobox" + +type ExtractType = NonNullable< + { [K in keyof ApiConfiguration]: Required[K] extends T ? K : never }[keyof ApiConfiguration] +> + +type ModelIdKeys = NonNullable< + { [K in keyof ApiConfiguration]: K extends `${string}ModelId` ? K : never }[keyof ApiConfiguration] +> +declare module "react" { + interface CSSProperties { + // Allow CSS variables + [key: `--${string}`]: string | number + } +} +interface ModelPickerProps { + defaultModelId?: string + models: Record | null + modelIdKey: ModelIdKeys + modelInfoKey: ExtractType serviceName: string serviceUrl: string recommendedModel: string - allowCustomModel?: boolean + apiConfiguration: ApiConfiguration + setApiConfigurationField: (field: K, value: ApiConfiguration[K]) => void + defaultModelInfo?: ModelInfo } export const ModelPicker = ({ defaultModelId, - modelsKey, - configKey, - infoKey, - refreshMessageType, - refreshValues, + models, + modelIdKey, + modelInfoKey, serviceName, serviceUrl, recommendedModel, - allowCustomModel = false, + apiConfiguration, + setApiConfigurationField, + defaultModelInfo, }: ModelPickerProps) => { - const [customModelId, setCustomModelId] = useState("") - const [isCustomModel, setIsCustomModel] = useState(false) - const [open, setOpen] = useState(false) - const [value, setValue] = useState(defaultModelId) const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false) - const prevRefreshValuesRef = useRef | undefined>() - - const { apiConfiguration, [modelsKey]: models, onUpdateApiConfig, setApiConfiguration } = useExtensionState() - const modelIds = useMemo( - () => (Array.isArray(models) ? models : Object.keys(models)).sort((a, b) => a.localeCompare(b)), - [models], - ) + const modelIds = useMemo(() => Object.keys(models ?? {}).sort((a, b) => a.localeCompare(b)), [models]) const { selectedModelId, selectedModelInfo } = useMemo( () => normalizeApiConfiguration(apiConfiguration), [apiConfiguration], ) - - const onSelectCustomModel = useCallback( - (modelId: string) => { - setCustomModelId(modelId) - const modelInfo = { id: modelId } - const apiConfig = { ...apiConfiguration, [configKey]: modelId, [infoKey]: modelInfo } - setApiConfiguration(apiConfig) - onUpdateApiConfig(apiConfig) - setValue(modelId) - setOpen(false) - setIsCustomModel(false) - }, - [apiConfiguration, configKey, infoKey, onUpdateApiConfig, setApiConfiguration], - ) - const onSelect = useCallback( (modelId: string) => { - const modelInfo = Array.isArray(models) - ? { id: modelId } // For OpenAI models which are just strings - : models[modelId] // For other models that have full info objects - const apiConfig = { ...apiConfiguration, [configKey]: modelId, [infoKey]: modelInfo } - setApiConfiguration(apiConfig) - onUpdateApiConfig(apiConfig) - setValue(modelId) - setOpen(false) + const modelInfo = models?.[modelId] + setApiConfigurationField(modelIdKey, modelId) + setApiConfigurationField(modelInfoKey, modelInfo ?? defaultModelInfo) }, - [apiConfiguration, configKey, infoKey, models, onUpdateApiConfig, setApiConfiguration], + [modelIdKey, modelInfoKey, models, setApiConfigurationField, defaultModelInfo], ) - - const debouncedRefreshModels = useMemo(() => { - return debounce(() => { - const message = refreshValues - ? { type: refreshMessageType, values: refreshValues } - : { type: refreshMessageType } - vscode.postMessage(message) - }, 100) - }, [refreshMessageType, refreshValues]) - - useMount(() => { - debouncedRefreshModels() - return () => debouncedRefreshModels.clear() - }) - useEffect(() => { - if (!refreshValues) { - prevRefreshValuesRef.current = undefined - return - } - - // Check if all values in refreshValues are truthy - if (Object.values(refreshValues).some((value) => !value)) { - prevRefreshValuesRef.current = undefined - return - } - - // Compare with previous values - const prevValues = prevRefreshValuesRef.current - if (prevValues && JSON.stringify(prevValues) === JSON.stringify(refreshValues)) { - return + if (apiConfiguration[modelIdKey] == null && defaultModelId) { + onSelect(defaultModelId) } - - prevRefreshValuesRef.current = refreshValues - debouncedRefreshModels() - }, [debouncedRefreshModels, refreshValues]) - - useEffect(() => setValue(selectedModelId), [selectedModelId]) + }, [apiConfiguration, defaultModelId, modelIdKey, onSelect]) return ( <>
Model
- - - - - - - - - No model found. - - {modelIds.map((model) => ( - - {model} - - - ))} - - {allowCustomModel && ( - - { - setIsCustomModel(true) - setOpen(false) - }}> - + Add custom model - - - )} - - - - + + + + No model found. + {modelIds.map((model) => ( + + {model} + + ))} + + + {selectedModelId && selectedModelInfo && ( onSelect(recommendedModel)}>{recommendedModel}. You can also try searching "free" for no-cost options currently available.

- {allowCustomModel && isCustomModel && ( -
-
-

Add Custom Model

- setCustomModelId(e.target.value)} - /> -
- - -
-
-
- )} ) } diff --git a/webview-ui/src/components/settings/OpenAiModelPicker.tsx b/webview-ui/src/components/settings/OpenAiModelPicker.tsx deleted file mode 100644 index 040da1d4210..00000000000 --- a/webview-ui/src/components/settings/OpenAiModelPicker.tsx +++ /dev/null @@ -1,27 +0,0 @@ -import React from "react" -import { useExtensionState } from "../../context/ExtensionStateContext" -import { ModelPicker } from "./ModelPicker" - -const OpenAiModelPicker: React.FC = () => { - const { apiConfiguration } = useExtensionState() - - return ( - - ) -} - -export default OpenAiModelPicker diff --git a/webview-ui/src/components/settings/OpenRouterModelPicker.tsx b/webview-ui/src/components/settings/OpenRouterModelPicker.tsx deleted file mode 100644 index c773478e542..00000000000 --- a/webview-ui/src/components/settings/OpenRouterModelPicker.tsx +++ /dev/null @@ -1,15 +0,0 @@ -import { ModelPicker } from "./ModelPicker" -import { openRouterDefaultModelId } from "../../../../src/shared/api" - -export const OpenRouterModelPicker = () => ( - -) diff --git a/webview-ui/src/components/settings/RequestyModelPicker.tsx b/webview-ui/src/components/settings/RequestyModelPicker.tsx deleted file mode 100644 index c65067068aa..00000000000 --- a/webview-ui/src/components/settings/RequestyModelPicker.tsx +++ /dev/null @@ -1,22 +0,0 @@ -import { ModelPicker } from "./ModelPicker" -import { requestyDefaultModelId } from "../../../../src/shared/api" -import { useExtensionState } from "@/context/ExtensionStateContext" - -export const RequestyModelPicker = () => { - const { apiConfiguration } = useExtensionState() - return ( - - ) -} diff --git a/webview-ui/src/components/settings/SettingsView.tsx b/webview-ui/src/components/settings/SettingsView.tsx index 761e8565214..75ba11107c4 100644 --- a/webview-ui/src/components/settings/SettingsView.tsx +++ b/webview-ui/src/components/settings/SettingsView.tsx @@ -1,4 +1,4 @@ -import { forwardRef, memo, useCallback, useEffect, useImperativeHandle, useRef, useState } from "react" +import { forwardRef, memo, useCallback, useEffect, useImperativeHandle, useMemo, useRef, useState } from "react" import { VSCodeButton, VSCodeCheckbox, VSCodeLink, VSCodeTextField } from "@vscode/webview-ui-toolkit/react" import { Dropdown, type DropdownOption } from "vscrui" @@ -45,7 +45,6 @@ const SettingsView = forwardRef(({ onDone }, // TODO: Reduce WebviewMessage/ExtensionState complexity const { currentApiConfigName } = extensionState const { - apiConfiguration, alwaysAllowReadOnly, allowedCommands, alwaysAllowBrowser, @@ -69,6 +68,9 @@ const SettingsView = forwardRef(({ onDone }, terminalOutputLineLimit, writeDelayMs, } = cachedState + + //Make sure apiConfiguration is initialized and managed by SettingsView + const apiConfiguration = useMemo(() => cachedState.apiConfiguration ?? {}, [cachedState.apiConfiguration]) useEffect(() => { // Update only when currentApiConfigName is changed diff --git a/webview-ui/src/components/settings/UnboundModelPicker.tsx b/webview-ui/src/components/settings/UnboundModelPicker.tsx deleted file mode 100644 index 4901884f1e6..00000000000 --- a/webview-ui/src/components/settings/UnboundModelPicker.tsx +++ /dev/null @@ -1,15 +0,0 @@ -import { ModelPicker } from "./ModelPicker" -import { unboundDefaultModelId } from "../../../../src/shared/api" - -export const UnboundModelPicker = () => ( - -) diff --git a/webview-ui/src/components/settings/__tests__/ModelPicker.test.tsx b/webview-ui/src/components/settings/__tests__/ModelPicker.test.tsx index 4e7c67c1872..49d60c55c48 100644 --- a/webview-ui/src/components/settings/__tests__/ModelPicker.test.tsx +++ b/webview-ui/src/components/settings/__tests__/ModelPicker.test.tsx @@ -3,7 +3,6 @@ import { screen, fireEvent, render } from "@testing-library/react" import { act } from "react" import { ModelPicker } from "../ModelPicker" -import { useExtensionState } from "../../../context/ExtensionStateContext" jest.mock("../../../context/ExtensionStateContext", () => ({ useExtensionState: jest.fn(), @@ -20,36 +19,40 @@ global.ResizeObserver = MockResizeObserver Element.prototype.scrollIntoView = jest.fn() describe("ModelPicker", () => { - const mockOnUpdateApiConfig = jest.fn() - const mockSetApiConfiguration = jest.fn() - + const mockSetApiConfigurationField = jest.fn() + const modelInfo = { + maxTokens: 8192, + contextWindow: 200_000, + supportsImages: true, + supportsComputerUse: true, + supportsPromptCache: true, + inputPrice: 3.0, + outputPrice: 15.0, + cacheWritesPrice: 3.75, + cacheReadsPrice: 0.3, + } + const mockModels = { + model1: { name: "Model 1", description: "Test model 1", ...modelInfo }, + model2: { name: "Model 2", description: "Test model 2", ...modelInfo }, + } const defaultProps = { + apiConfiguration: {}, defaultModelId: "model1", - modelsKey: "glamaModels" as const, - configKey: "glamaModelId" as const, - infoKey: "glamaModelInfo" as const, - refreshMessageType: "refreshGlamaModels" as const, + defaultModelInfo: modelInfo, + modelIdKey: "glamaModelId" as const, + modelInfoKey: "glamaModelInfo" as const, serviceName: "Test Service", serviceUrl: "https://test.service", recommendedModel: "recommended-model", - } - - const mockModels = { - model1: { name: "Model 1", description: "Test model 1" }, - model2: { name: "Model 2", description: "Test model 2" }, + models: mockModels, + setApiConfigurationField: mockSetApiConfigurationField, } beforeEach(() => { jest.clearAllMocks() - ;(useExtensionState as jest.Mock).mockReturnValue({ - apiConfiguration: {}, - setApiConfiguration: mockSetApiConfiguration, - glamaModels: mockModels, - onUpdateApiConfig: mockOnUpdateApiConfig, - }) }) - it("calls onUpdateApiConfig when a model is selected", async () => { + it("calls setApiConfigurationField when a model is selected", async () => { await act(async () => { render() }) @@ -67,20 +70,12 @@ describe("ModelPicker", () => { await act(async () => { // Find and click the model item by its value. - const modelItem = screen.getByRole("option", { name: "model2" }) - fireEvent.click(modelItem) + const modelItem = screen.getByTestId("model-input") + fireEvent.input(modelItem, { target: { value: "model2" } }) }) // Verify the API config was updated. - expect(mockSetApiConfiguration).toHaveBeenCalledWith({ - glamaModelId: "model2", - glamaModelInfo: mockModels["model2"], - }) - - // Verify onUpdateApiConfig was called with the new config. - expect(mockOnUpdateApiConfig).toHaveBeenCalledWith({ - glamaModelId: "model2", - glamaModelInfo: mockModels["model2"], - }) + expect(mockSetApiConfigurationField).toHaveBeenCalledWith(defaultProps.modelIdKey, "model2") + expect(mockSetApiConfigurationField).toHaveBeenCalledWith(defaultProps.modelInfoKey, mockModels.model2) }) }) diff --git a/webview-ui/src/utils/validate.ts b/webview-ui/src/utils/validate.ts index 19b13e2c6c2..97c702637c4 100644 --- a/webview-ui/src/utils/validate.ts +++ b/webview-ui/src/utils/validate.ts @@ -1,9 +1,4 @@ -import { - ApiConfiguration, - glamaDefaultModelId, - openRouterDefaultModelId, - unboundDefaultModelId, -} from "../../../src/shared/api" +import { ApiConfiguration } from "../../../src/shared/api" import { ModelInfo } from "../../../src/shared/api" export function validateApiConfiguration(apiConfiguration?: ApiConfiguration): string | undefined { if (apiConfiguration) { @@ -86,7 +81,7 @@ export function validateModelId( if (apiConfiguration) { switch (apiConfiguration.apiProvider) { case "glama": - const glamaModelId = apiConfiguration.glamaModelId || glamaDefaultModelId // in case the user hasn't changed the model id, it will be undefined by default + const glamaModelId = apiConfiguration.glamaModelId if (!glamaModelId) { return "You must provide a model ID." } @@ -96,7 +91,7 @@ export function validateModelId( } break case "openrouter": - const modelId = apiConfiguration.openRouterModelId || openRouterDefaultModelId // in case the user hasn't changed the model id, it will be undefined by default + const modelId = apiConfiguration.openRouterModelId if (!modelId) { return "You must provide a model ID." } @@ -106,7 +101,7 @@ export function validateModelId( } break case "unbound": - const unboundModelId = apiConfiguration.unboundModelId || unboundDefaultModelId + const unboundModelId = apiConfiguration.unboundModelId if (!unboundModelId) { return "You must provide a model ID." } From 1a3b8700ba2c8f93f31cdfed7aa0fadb38d584d0 Mon Sep 17 00:00:00 2001 From: System233 Date: Wed, 26 Feb 2025 06:42:29 +0800 Subject: [PATCH 064/145] Improved error message feedback in settings panel --- webview-ui/src/__mocks__/vscrui.ts | 3 + .../components/settings/ApiErrorMessage.tsx | 16 +++++ .../src/components/settings/ApiOptions.tsx | 69 ++++++++----------- .../src/components/settings/ModelPicker.tsx | 41 ++++++++--- .../src/components/settings/SettingsView.tsx | 53 ++++---------- .../settings/__tests__/ApiOptions.test.tsx | 4 ++ .../src/components/welcome/WelcomeView.tsx | 2 + 7 files changed, 98 insertions(+), 90 deletions(-) create mode 100644 webview-ui/src/components/settings/ApiErrorMessage.tsx diff --git a/webview-ui/src/__mocks__/vscrui.ts b/webview-ui/src/__mocks__/vscrui.ts index 76760ba5cce..9b4a20f4d6b 100644 --- a/webview-ui/src/__mocks__/vscrui.ts +++ b/webview-ui/src/__mocks__/vscrui.ts @@ -8,6 +8,9 @@ export const Dropdown = ({ children, value, onChange }: any) => export const Pane = ({ children }: any) => React.createElement("div", { "data-testid": "mock-pane" }, children) +export const Button = ({ children, ...props }: any) => + React.createElement("div", { "data-testid": "mock-button", ...props }, children) + export type DropdownOption = { label: string value: string diff --git a/webview-ui/src/components/settings/ApiErrorMessage.tsx b/webview-ui/src/components/settings/ApiErrorMessage.tsx new file mode 100644 index 00000000000..4b419957b6c --- /dev/null +++ b/webview-ui/src/components/settings/ApiErrorMessage.tsx @@ -0,0 +1,16 @@ +import React from "react" + +interface ApiErrorMessageProps { + errorMessage: string | undefined + children?: React.ReactNode +} +const ApiErrorMessage = ({ errorMessage, children }: ApiErrorMessageProps) => { + return ( +
+ + {errorMessage} + {children} +
+ ) +} +export default ApiErrorMessage diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 594cd2fd5fb..f0c2b0e45fa 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -1,4 +1,4 @@ -import { memo, useCallback, useMemo, useState } from "react" +import React, { memo, useCallback, useEffect, useMemo, useState } from "react" import { useDebounce, useEvent } from "react-use" import { Checkbox, Dropdown, Pane, type DropdownOption } from "vscrui" import { VSCodeLink, VSCodeRadio, VSCodeRadioGroup, VSCodeTextField } from "@vscode/webview-ui-toolkit/react" @@ -42,23 +42,25 @@ import { ModelInfoView } from "./ModelInfoView" import { DROPDOWN_Z_INDEX } from "./styles" import { ModelPicker } from "./ModelPicker" import { TemperatureControl } from "./TemperatureControl" +import { validateApiConfiguration, validateModelId } from "@/utils/validate" +import ApiErrorMessage from "./ApiErrorMessage" interface ApiOptionsProps { uriScheme: string | undefined apiConfiguration: ApiConfiguration setApiConfigurationField: (field: K, value: ApiConfiguration[K]) => void - apiErrorMessage?: string - modelIdErrorMessage?: string fromWelcomeView?: boolean + errorMessage: string | undefined + setErrorMessage: React.Dispatch> } const ApiOptions = ({ uriScheme, apiConfiguration, setApiConfigurationField, - apiErrorMessage, - modelIdErrorMessage, fromWelcomeView, + errorMessage, + setErrorMessage, }: ApiOptionsProps) => { const [ollamaModels, setOllamaModels] = useState([]) const [lmStudioModels, setLmStudioModels] = useState([]) @@ -146,6 +148,13 @@ const ApiOptions = ({ ], ) + useEffect(() => { + const apiValidationResult = + validateApiConfiguration(apiConfiguration) || + validateModelId(apiConfiguration, glamaModels, openRouterModels, unboundModels) + setErrorMessage(apiValidationResult) + }, [apiConfiguration, glamaModels, openRouterModels, setErrorMessage, unboundModels]) + const handleMessage = useCallback((event: MessageEvent) => { const message: ExtensionMessage = event.data switch (message.type) { @@ -626,6 +635,7 @@ const ApiOptions = ({ ]} />
+ {errorMessage && }

{/* end Model Info Configuration */} - -

- - (Note: Roo Code uses complex prompts and works best - with Claude models. Less capable models may not work as expected.) - -

)} @@ -1100,6 +1099,7 @@ const ApiOptions = ({ placeholder={"e.g. meta-llama-3.1-8b-instruct"}> Model ID + {errorMessage && } {lmStudioModels.length > 0 && ( Model ID + {errorMessage && ( +
+ + {errorMessage} +
+ )} {ollamaModels.length > 0 && (
)} - {apiErrorMessage && ( -

- - {apiErrorMessage} -

- )} - {selectedProvider === "glama" && ( )} @@ -1364,6 +1360,7 @@ const ApiOptions = ({ serviceName="OpenRouter" serviceUrl="https://openrouter.ai/models" recommendedModel="anthropic/claude-3.7-sonnet" + errorMessage={errorMessage} /> )} {selectedProvider === "requesty" && ( @@ -1378,6 +1375,7 @@ const ApiOptions = ({ serviceName="Requesty" serviceUrl="https://requesty.ai" recommendedModel="anthropic/claude-3-7-sonnet-latest" + errorMessage={errorMessage} /> )} @@ -1401,6 +1399,7 @@ const ApiOptions = ({ {selectedProvider === "deepseek" && createDropdown(deepSeekModels)} {selectedProvider === "mistral" && createDropdown(mistralModels)} + {errorMessage && } )} - - {modelIdErrorMessage && ( -

- - {modelIdErrorMessage} -

- )} ) } diff --git a/webview-ui/src/components/settings/ModelPicker.tsx b/webview-ui/src/components/settings/ModelPicker.tsx index 8fd6d82daa7..fd62bfb97b6 100644 --- a/webview-ui/src/components/settings/ModelPicker.tsx +++ b/webview-ui/src/components/settings/ModelPicker.tsx @@ -5,6 +5,7 @@ import { normalizeApiConfiguration } from "./ApiOptions" import { ModelInfoView } from "./ModelInfoView" import { ApiConfiguration, ModelInfo } from "../../../../src/shared/api" import { Combobox, ComboboxContent, ComboboxEmpty, ComboboxInput, ComboboxItem } from "../ui/combobox" +import ApiErrorMessage from "./ApiErrorMessage" type ExtractType = NonNullable< { [K in keyof ApiConfiguration]: Required[K] extends T ? K : never }[keyof ApiConfiguration] @@ -30,6 +31,7 @@ interface ModelPickerProps { apiConfiguration: ApiConfiguration setApiConfigurationField: (field: K, value: ApiConfiguration[K]) => void defaultModelInfo?: ModelInfo + errorMessage?: string } export const ModelPicker = ({ @@ -43,6 +45,7 @@ export const ModelPicker = ({ apiConfiguration, setApiConfigurationField, defaultModelInfo, + errorMessage, }: ModelPickerProps) => { const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false) @@ -69,11 +72,16 @@ export const ModelPicker = ({ return ( <>
Model
- + No model found. @@ -85,13 +93,30 @@ export const ModelPicker = ({ - {selectedModelId && selectedModelInfo && ( - + {errorMessage ? ( + +

+ + Note: Roo Code uses complex prompts and works best + with Claude models. Less capable models may not work as expected. + +

+
+ ) : ( + selectedModelId && + selectedModelInfo && ( + + ) )}

The extension automatically fetches the latest list of models available on{" "} diff --git a/webview-ui/src/components/settings/SettingsView.tsx b/webview-ui/src/components/settings/SettingsView.tsx index 75ba11107c4..ee032c3ee06 100644 --- a/webview-ui/src/components/settings/SettingsView.tsx +++ b/webview-ui/src/components/settings/SettingsView.tsx @@ -1,6 +1,6 @@ import { forwardRef, memo, useCallback, useEffect, useImperativeHandle, useMemo, useRef, useState } from "react" import { VSCodeButton, VSCodeCheckbox, VSCodeLink, VSCodeTextField } from "@vscode/webview-ui-toolkit/react" -import { Dropdown, type DropdownOption } from "vscrui" +import { Button, Dropdown, type DropdownOption } from "vscrui" import { AlertDialog, @@ -14,7 +14,6 @@ import { } from "@/components/ui" import { vscode } from "../../utils/vscode" -import { validateApiConfiguration, validateModelId } from "../../utils/validate" import { ExtensionStateContextType, useExtensionState } from "../../context/ExtensionStateContext" import { EXPERIMENT_IDS, experimentConfigsMap, ExperimentId } from "../../../../src/shared/experiments" import { ApiConfiguration } from "../../../../src/shared/api" @@ -33,14 +32,13 @@ export interface SettingsViewRef { const SettingsView = forwardRef(({ onDone }, ref) => { const extensionState = useExtensionState() - const [apiErrorMessage, setApiErrorMessage] = useState(undefined) - const [modelIdErrorMessage, setModelIdErrorMessage] = useState(undefined) const [commandInput, setCommandInput] = useState("") const [isDiscardDialogShow, setDiscardDialogShow] = useState(false) const [cachedState, setCachedState] = useState(extensionState) const [isChangeDetected, setChangeDetected] = useState(false) const prevApiConfigName = useRef(extensionState.currentApiConfigName) const confirmDialogHandler = useRef<() => void>() + const [errorMessage, setErrorMessage] = useState(undefined) // TODO: Reduce WebviewMessage/ExtensionState complexity const { currentApiConfigName } = extensionState @@ -135,20 +133,9 @@ const SettingsView = forwardRef(({ onDone }, } }) }, []) - + const isSettingValid = !errorMessage const handleSubmit = () => { - const apiValidationResult = validateApiConfiguration(apiConfiguration) - - const modelIdValidationResult = validateModelId( - apiConfiguration, - extensionState.glamaModels, - extensionState.openRouterModels, - ) - - setApiErrorMessage(apiValidationResult) - setModelIdErrorMessage(modelIdValidationResult) - - if (!apiValidationResult && !modelIdValidationResult) { + if (isSettingValid) { vscode.postMessage({ type: "alwaysAllowReadOnly", bool: alwaysAllowReadOnly }) vscode.postMessage({ type: "alwaysAllowWrite", bool: alwaysAllowWrite }) vscode.postMessage({ type: "alwaysAllowExecute", bool: alwaysAllowExecute }) @@ -177,23 +164,6 @@ const SettingsView = forwardRef(({ onDone }, } } - useEffect(() => { - setApiErrorMessage(undefined) - setModelIdErrorMessage(undefined) - }, [apiConfiguration]) - - // Initial validation on mount - useEffect(() => { - const apiValidationResult = validateApiConfiguration(apiConfiguration) - const modelIdValidationResult = validateModelId( - apiConfiguration, - extensionState.glamaModels, - extensionState.openRouterModels, - ) - setApiErrorMessage(apiValidationResult) - setModelIdErrorMessage(modelIdValidationResult) - }, [apiConfiguration, extensionState.glamaModels, extensionState.openRouterModels]) - const checkUnsaveChanges = useCallback( (then: () => void) => { if (isChangeDetected) { @@ -287,13 +257,14 @@ const SettingsView = forwardRef(({ onDone }, justifyContent: "space-between", gap: "6px", }}> - + disabled={!isChangeDetected || !isSettingValid}> Save - + (({ onDone }, uriScheme={extensionState.uriScheme} apiConfiguration={apiConfiguration} setApiConfigurationField={setApiConfigurationField} - apiErrorMessage={apiErrorMessage} - modelIdErrorMessage={modelIdErrorMessage} + errorMessage={errorMessage} + setErrorMessage={setErrorMessage} /> diff --git a/webview-ui/src/components/settings/__tests__/ApiOptions.test.tsx b/webview-ui/src/components/settings/__tests__/ApiOptions.test.tsx index 8f2d0dff893..73394bae104 100644 --- a/webview-ui/src/components/settings/__tests__/ApiOptions.test.tsx +++ b/webview-ui/src/components/settings/__tests__/ApiOptions.test.tsx @@ -51,6 +51,8 @@ describe("ApiOptions", () => { render( {}} uriScheme={undefined} apiConfiguration={{}} setApiConfigurationField={() => {}} @@ -69,4 +71,6 @@ describe("ApiOptions", () => { renderApiOptions({ fromWelcomeView: true }) expect(screen.queryByTestId("temperature-control")).not.toBeInTheDocument() }) + + //TODO: More test cases needed }) diff --git a/webview-ui/src/components/welcome/WelcomeView.tsx b/webview-ui/src/components/welcome/WelcomeView.tsx index 858d2622f39..5d880efc0b9 100644 --- a/webview-ui/src/components/welcome/WelcomeView.tsx +++ b/webview-ui/src/components/welcome/WelcomeView.tsx @@ -42,6 +42,8 @@ const WelcomeView = () => { apiConfiguration={apiConfiguration || {}} uriScheme={uriScheme} setApiConfigurationField={(field, value) => setApiConfiguration({ [field]: value })} + errorMessage={errorMessage} + setErrorMessage={setErrorMessage} /> From 48975003afe593c975476acfd348ceb6110d7ced Mon Sep 17 00:00:00 2001 From: System233 Date: Wed, 26 Feb 2025 06:50:11 +0800 Subject: [PATCH 065/145] Remove ModelInfo related exports from ExtensionStateContext --- .../src/context/ExtensionStateContext.tsx | 84 +------------------ 1 file changed, 1 insertion(+), 83 deletions(-) diff --git a/webview-ui/src/context/ExtensionStateContext.tsx b/webview-ui/src/context/ExtensionStateContext.tsx index 3dca8d5f51c..c2c4d181e4a 100644 --- a/webview-ui/src/context/ExtensionStateContext.tsx +++ b/webview-ui/src/context/ExtensionStateContext.tsx @@ -1,18 +1,7 @@ import React, { createContext, useCallback, useContext, useEffect, useState } from "react" import { useEvent } from "react-use" import { ApiConfigMeta, ExtensionMessage, ExtensionState } from "../../../src/shared/ExtensionMessage" -import { - ApiConfiguration, - ModelInfo, - glamaDefaultModelId, - glamaDefaultModelInfo, - openRouterDefaultModelId, - openRouterDefaultModelInfo, - unboundDefaultModelId, - unboundDefaultModelInfo, - requestyDefaultModelId, - requestyDefaultModelInfo, -} from "../../../src/shared/api" +import { ApiConfiguration } from "../../../src/shared/api" import { vscode } from "../utils/vscode" import { convertTextMateToHljs } from "../utils/textMateToHljs" import { findLastIndex } from "../../../src/shared/array" @@ -26,11 +15,6 @@ export interface ExtensionStateContextType extends ExtensionState { didHydrateState: boolean showWelcome: boolean theme: any - glamaModels: Record - requestyModels: Record - openRouterModels: Record - unboundModels: Record - openAiModels: string[] mcpServers: McpServer[] currentCheckpoint?: string filePaths: string[] @@ -70,7 +54,6 @@ export interface ExtensionStateContextType extends ExtensionState { setRateLimitSeconds: (value: number) => void setCurrentApiConfigName: (value: string) => void setListApiConfigMeta: (value: ApiConfigMeta[]) => void - onUpdateApiConfig: (apiConfig: ApiConfiguration) => void mode: Mode setMode: (value: Mode) => void setCustomModePrompts: (value: CustomModePrompts) => void @@ -124,21 +107,8 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode const [showWelcome, setShowWelcome] = useState(false) const [theme, setTheme] = useState(undefined) const [filePaths, setFilePaths] = useState([]) - const [glamaModels, setGlamaModels] = useState>({ - [glamaDefaultModelId]: glamaDefaultModelInfo, - }) const [openedTabs, setOpenedTabs] = useState>([]) - const [openRouterModels, setOpenRouterModels] = useState>({ - [openRouterDefaultModelId]: openRouterDefaultModelInfo, - }) - const [unboundModels, setUnboundModels] = useState>({ - [unboundDefaultModelId]: unboundDefaultModelInfo, - }) - const [requestyModels, setRequestyModels] = useState>({ - [requestyDefaultModelId]: requestyDefaultModelInfo, - }) - const [openAiModels, setOpenAiModels] = useState([]) const [mcpServers, setMcpServers] = useState([]) const [currentCheckpoint, setCurrentCheckpoint] = useState() @@ -146,18 +116,6 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode (value: ApiConfigMeta[]) => setState((prevState) => ({ ...prevState, listApiConfigMeta: value })), [], ) - - const onUpdateApiConfig = useCallback((apiConfig: ApiConfiguration) => { - setState((currentState) => { - vscode.postMessage({ - type: "upsertApiConfiguration", - text: currentState.currentApiConfigName, - apiConfiguration: { ...currentState.apiConfiguration, ...apiConfig }, - }) - return currentState // No state update needed - }) - }, []) - const handleMessage = useCallback( (event: MessageEvent) => { const message: ExtensionMessage = event.data @@ -202,40 +160,6 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode }) break } - case "glamaModels": { - const updatedModels = message.glamaModels ?? {} - setGlamaModels({ - [glamaDefaultModelId]: glamaDefaultModelInfo, // in case the extension sent a model list without the default model - ...updatedModels, - }) - break - } - case "openRouterModels": { - const updatedModels = message.openRouterModels ?? {} - setOpenRouterModels({ - [openRouterDefaultModelId]: openRouterDefaultModelInfo, // in case the extension sent a model list without the default model - ...updatedModels, - }) - break - } - case "openAiModels": { - const updatedModels = message.openAiModels ?? [] - setOpenAiModels(updatedModels) - break - } - case "unboundModels": { - const updatedModels = message.unboundModels ?? {} - setUnboundModels(updatedModels) - break - } - case "requestyModels": { - const updatedModels = message.requestyModels ?? {} - setRequestyModels({ - [requestyDefaultModelId]: requestyDefaultModelInfo, // in case the extension sent a model list without the default model - ...updatedModels, - }) - break - } case "mcpServers": { setMcpServers(message.mcpServers ?? []) break @@ -264,11 +188,6 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode didHydrateState, showWelcome, theme, - glamaModels, - requestyModels, - openRouterModels, - openAiModels, - unboundModels, mcpServers, currentCheckpoint, filePaths, @@ -316,7 +235,6 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode setRateLimitSeconds: (value) => setState((prevState) => ({ ...prevState, rateLimitSeconds: value })), setCurrentApiConfigName: (value) => setState((prevState) => ({ ...prevState, currentApiConfigName: value })), setListApiConfigMeta, - onUpdateApiConfig, setMode: (value: Mode) => setState((prevState) => ({ ...prevState, mode: value })), setCustomModePrompts: (value) => setState((prevState) => ({ ...prevState, customModePrompts: value })), setCustomSupportPrompts: (value) => setState((prevState) => ({ ...prevState, customSupportPrompts: value })), From 05151ed2254e8698d4c380ceb8daa36e019f82aa Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Tue, 25 Feb 2025 18:30:37 -0500 Subject: [PATCH 066/145] Update package.json --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 84bec2645a5..28045436e6b 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "roo-cline", "displayName": "Roo Code (prev. Roo Cline)", - "description": "An AI-powered autonomous coding agent that lives in your editor.", + "description": "A whole dev team of AI agents in your editor.", "publisher": "RooVeterinaryInc", "version": "3.7.4", "icon": "assets/icons/rocket.png", From e56908f6c1b800bd2a2b4edd85f725c1a0055920 Mon Sep 17 00:00:00 2001 From: cte Date: Tue, 25 Feb 2025 15:48:20 -0800 Subject: [PATCH 067/145] Thinking settings tweaks --- src/api/providers/anthropic.ts | 23 +++++---- src/api/providers/openrouter.ts | 12 ++++- src/shared/api.ts | 14 +++++- .../src/components/settings/ApiOptions.tsx | 50 ++++++------------- 4 files changed, 51 insertions(+), 48 deletions(-) diff --git a/src/api/providers/anthropic.ts b/src/api/providers/anthropic.ts index 2d1f07f833f..c9073506072 100644 --- a/src/api/providers/anthropic.ts +++ b/src/api/providers/anthropic.ts @@ -14,8 +14,6 @@ import { ApiStream } from "../transform/stream" const ANTHROPIC_DEFAULT_TEMPERATURE = 0 -const THINKING_MODELS = ["claude-3-7-sonnet-20250219"] - export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { private options: ApiHandlerOptions private client: Anthropic @@ -32,16 +30,19 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { let stream: AnthropicStream const cacheControl: CacheControlEphemeral = { type: "ephemeral" } - const modelId = this.getModel().id - const maxTokens = this.getModel().info.maxTokens || 8192 + let { id: modelId, info: modelInfo } = this.getModel() + const maxTokens = modelInfo.maxTokens || 8192 + const budgetTokens = this.options.anthropicThinking ?? Math.min(maxTokens - 1, 8192) let temperature = this.options.modelTemperature ?? ANTHROPIC_DEFAULT_TEMPERATURE let thinking: BetaThinkingConfigParam | undefined = undefined - if (THINKING_MODELS.includes(modelId)) { - thinking = this.options.anthropicThinking - ? { type: "enabled", budget_tokens: this.options.anthropicThinking } - : { type: "disabled" } - + // Anthropic "Thinking" models require a temperature of 1.0. + if (modelId === "claude-3-7-sonnet-20250219:thinking") { + // The `:thinking` variant is a virtual identifier for the + // `claude-3-7-sonnet-20250219` model with a thinking budget. + // We can handle this more elegantly in the future. + modelId = "claude-3-7-sonnet-20250219" + thinking = { type: "enabled", budget_tokens: budgetTokens } temperature = 1.0 } @@ -114,8 +115,8 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { default: { stream = (await this.client.messages.create({ model: modelId, - max_tokens: this.getModel().info.maxTokens || 8192, - temperature: this.options.modelTemperature ?? ANTHROPIC_DEFAULT_TEMPERATURE, + max_tokens: maxTokens, + temperature, system: [{ text: systemPrompt, type: "text" }], messages, // tools, diff --git a/src/api/providers/openrouter.ts b/src/api/providers/openrouter.ts index 69c55b8e712..6bf4fa4a8cb 100644 --- a/src/api/providers/openrouter.ts +++ b/src/api/providers/openrouter.ts @@ -1,4 +1,5 @@ import { Anthropic } from "@anthropic-ai/sdk" +import { BetaThinkingConfigParam } from "@anthropic-ai/sdk/resources/beta" import axios from "axios" import OpenAI from "openai" import delay from "delay" @@ -17,6 +18,7 @@ const OPENROUTER_DEFAULT_TEMPERATURE = 0 type OpenRouterChatCompletionParams = OpenAI.Chat.ChatCompletionCreateParams & { transforms?: string[] include_reasoning?: boolean + thinking?: BetaThinkingConfigParam } // Add custom interface for OpenRouter usage chunk. @@ -57,7 +59,7 @@ export class OpenRouterHandler implements ApiHandler, SingleCompletionHandler { // prompt caching: https://openrouter.ai/docs/prompt-caching // this is specifically for claude models (some models may 'support prompt caching' automatically without this) switch (true) { - case this.getModel().id.startsWith("anthropic/"): + case modelId.startsWith("anthropic/"): openAiMessages[0] = { role: "system", content: [ @@ -108,8 +110,13 @@ export class OpenRouterHandler implements ApiHandler, SingleCompletionHandler { let temperature = this.options.modelTemperature ?? defaultTemperature + const maxTokens = modelInfo.maxTokens + const budgetTokens = this.options.anthropicThinking ?? Math.min((maxTokens ?? 8192) - 1, 8192) + let thinking: BetaThinkingConfigParam | undefined = undefined + // Anthropic "Thinking" models require a temperature of 1.0. if (modelInfo.thinking) { + thinking = { type: "enabled", budget_tokens: budgetTokens } temperature = 1.0 } @@ -118,8 +125,9 @@ export class OpenRouterHandler implements ApiHandler, SingleCompletionHandler { const completionParams: OpenRouterChatCompletionParams = { model: modelId, - max_tokens: modelInfo.maxTokens, + max_tokens: maxTokens, temperature, + thinking, // OpenRouter is temporarily supporting this. top_p: topP, messages: openAiMessages, stream: true, diff --git a/src/shared/api.ts b/src/shared/api.ts index 63707e52b4b..5d4b8b120d7 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -103,7 +103,7 @@ export const THINKING_BUDGET = { export type AnthropicModelId = keyof typeof anthropicModels export const anthropicDefaultModelId: AnthropicModelId = "claude-3-7-sonnet-20250219" export const anthropicModels = { - "claude-3-7-sonnet-20250219": { + "claude-3-7-sonnet-20250219:thinking": { maxTokens: 16384, contextWindow: 200_000, supportsImages: true, @@ -115,6 +115,18 @@ export const anthropicModels = { cacheReadsPrice: 0.3, // $0.30 per million tokens thinking: true, }, + "claude-3-7-sonnet-20250219": { + maxTokens: 16384, + contextWindow: 200_000, + supportsImages: true, + supportsComputerUse: true, + supportsPromptCache: true, + inputPrice: 3.0, // $3 per million input tokens + outputPrice: 15.0, // $15 per million output tokens + cacheWritesPrice: 3.75, // $3.75 per million tokens + cacheReadsPrice: 0.3, // $0.30 per million tokens + thinking: false, + }, "claude-3-5-sonnet-20241022": { maxTokens: 8192, contextWindow: 200_000, diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 9d17cae4fa3..73dc4fd41f8 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -73,7 +73,7 @@ const ApiOptions = ({ const [openRouterBaseUrlSelected, setOpenRouterBaseUrlSelected] = useState(!!apiConfiguration?.openRouterBaseUrl) const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false) - const anthropicThinkingBudget = apiConfiguration?.anthropicThinking + const anthropicThinkingBudget = apiConfiguration?.anthropicThinking ?? THINKING_BUDGET.default const noTransform = (value: T) => value const inputEventTransform = (event: E) => (event as { target: HTMLInputElement })?.target?.value as any @@ -1272,39 +1272,21 @@ const ApiOptions = ({ )} {selectedModelInfo && selectedModelInfo.thinking && ( -

- - setApiConfigurationField( - "anthropicThinking", - checked - ? Math.min( - THINKING_BUDGET.default, - selectedModelInfo.maxTokens ?? THINKING_BUDGET.default, - ) - : undefined, - ) - }> - Thinking? - - {anthropicThinkingBudget && ( - <> -
- Number of tokens Claude is allowed to use for its internal reasoning process. -
-
- setApiConfigurationField("anthropicThinking", value[0])} - /> -
{anthropicThinkingBudget}
-
- - )} +
+
Thinking Budget
+
+ setApiConfigurationField("anthropicThinking", value[0])} + /> +
{anthropicThinkingBudget}
+
+
+ Number of tokens Claude is allowed to use for its internal reasoning process. +
)} From 8971e47b96ec7ae3a6de4ec5a95a2acab4cba1b7 Mon Sep 17 00:00:00 2001 From: cte Date: Tue, 25 Feb 2025 15:53:32 -0800 Subject: [PATCH 068/145] Add changeset --- .changeset/swift-kings-attack.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/swift-kings-attack.md diff --git a/.changeset/swift-kings-attack.md b/.changeset/swift-kings-attack.md new file mode 100644 index 00000000000..8a8a425611d --- /dev/null +++ b/.changeset/swift-kings-attack.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Pass "thinking" params to OpenRouter From 33fd3bd6b35caafce66fcd53b9070f60279fcc0d Mon Sep 17 00:00:00 2001 From: cte Date: Tue, 25 Feb 2025 16:26:40 -0800 Subject: [PATCH 069/145] Fix budgetTokens --- src/api/providers/anthropic.ts | 2 +- src/api/providers/openrouter.ts | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/api/providers/anthropic.ts b/src/api/providers/anthropic.ts index c9073506072..ad58a1cf6b2 100644 --- a/src/api/providers/anthropic.ts +++ b/src/api/providers/anthropic.ts @@ -32,7 +32,6 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { const cacheControl: CacheControlEphemeral = { type: "ephemeral" } let { id: modelId, info: modelInfo } = this.getModel() const maxTokens = modelInfo.maxTokens || 8192 - const budgetTokens = this.options.anthropicThinking ?? Math.min(maxTokens - 1, 8192) let temperature = this.options.modelTemperature ?? ANTHROPIC_DEFAULT_TEMPERATURE let thinking: BetaThinkingConfigParam | undefined = undefined @@ -42,6 +41,7 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { // `claude-3-7-sonnet-20250219` model with a thinking budget. // We can handle this more elegantly in the future. modelId = "claude-3-7-sonnet-20250219" + const budgetTokens = this.options.anthropicThinking ?? Math.max(maxTokens * 0.8, 1024) thinking = { type: "enabled", budget_tokens: budgetTokens } temperature = 1.0 } diff --git a/src/api/providers/openrouter.ts b/src/api/providers/openrouter.ts index 6bf4fa4a8cb..0a9488e816f 100644 --- a/src/api/providers/openrouter.ts +++ b/src/api/providers/openrouter.ts @@ -109,13 +109,11 @@ export class OpenRouterHandler implements ApiHandler, SingleCompletionHandler { } let temperature = this.options.modelTemperature ?? defaultTemperature - - const maxTokens = modelInfo.maxTokens - const budgetTokens = this.options.anthropicThinking ?? Math.min((maxTokens ?? 8192) - 1, 8192) let thinking: BetaThinkingConfigParam | undefined = undefined - // Anthropic "Thinking" models require a temperature of 1.0. if (modelInfo.thinking) { + const maxTokens = modelInfo.maxTokens || 8192 + const budgetTokens = this.options.anthropicThinking ?? Math.max(maxTokens * 0.8, 1024) thinking = { type: "enabled", budget_tokens: budgetTokens } temperature = 1.0 } @@ -125,7 +123,7 @@ export class OpenRouterHandler implements ApiHandler, SingleCompletionHandler { const completionParams: OpenRouterChatCompletionParams = { model: modelId, - max_tokens: maxTokens, + max_tokens: modelInfo.maxTokens, temperature, thinking, // OpenRouter is temporarily supporting this. top_p: topP, From f3d02030ac47420d8a9735c0d02f70e561475dae Mon Sep 17 00:00:00 2001 From: System233 Date: Wed, 26 Feb 2025 08:37:28 +0800 Subject: [PATCH 070/145] Fix: Input/output prices should be parsed using parseFloat --- webview-ui/src/components/settings/ApiOptions.tsx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 73dc4fd41f8..bfcf93256e8 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -900,7 +900,7 @@ const ApiOptions = ({ }} onChange={handleInputChange("openAiCustomModelInfo", (e) => { const value = (e.target as HTMLInputElement).value - const parsed = parseInt(value) + const parsed = parseFloat(value) return { ...(apiConfiguration?.openAiCustomModelInfo ?? openAiModelInfoSaneDefaults), @@ -945,7 +945,7 @@ const ApiOptions = ({ }} onChange={handleInputChange("openAiCustomModelInfo", (e) => { const value = (e.target as HTMLInputElement).value - const parsed = parseInt(value) + const parsed = parseFloat(value) return { ...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults), From 41e75bc9890036674cd31ce2d9da23fcd5127956 Mon Sep 17 00:00:00 2001 From: cte Date: Tue, 25 Feb 2025 23:02:43 -0800 Subject: [PATCH 071/145] Model picker fixes --- .changeset/real-ties-destroy.md | 5 + src/api/providers/requesty.ts | 36 +- src/core/webview/ClineProvider.ts | 22 +- src/shared/ExtensionMessage.ts | 11 +- src/shared/WebviewMessage.ts | 5 +- webview-ui/package-lock.json | 2 + .../components/settings/ApiErrorMessage.tsx | 18 +- .../src/components/settings/ApiOptions.tsx | 435 ++++++++---------- .../src/components/settings/ModelPicker.tsx | 91 ++-- .../src/components/settings/SettingsView.tsx | 48 +- .../components/settings/ThinkingBudget.tsx | 29 ++ webview-ui/src/components/ui/alert-dialog.tsx | 151 +++--- webview-ui/src/components/ui/dialog.tsx | 156 ++++--- .../src/components/welcome/WelcomeView.tsx | 8 +- webview-ui/src/utils/validate.ts | 254 ++++++---- 15 files changed, 657 insertions(+), 614 deletions(-) create mode 100644 .changeset/real-ties-destroy.md create mode 100644 webview-ui/src/components/settings/ThinkingBudget.tsx diff --git a/.changeset/real-ties-destroy.md b/.changeset/real-ties-destroy.md new file mode 100644 index 00000000000..a2e9ba8eb04 --- /dev/null +++ b/.changeset/real-ties-destroy.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Fix model picker diff --git a/src/api/providers/requesty.ts b/src/api/providers/requesty.ts index 2151a7172d1..5e570ca2a2b 100644 --- a/src/api/providers/requesty.ts +++ b/src/api/providers/requesty.ts @@ -42,26 +42,33 @@ export class RequestyHandler extends OpenAiHandler { } } -export async function getRequestyModels({ apiKey }: { apiKey?: string }) { +export async function getRequestyModels() { const models: Record = {} - if (!apiKey) { - return models - } - try { - const config: Record = {} - config["headers"] = { Authorization: `Bearer ${apiKey}` } - - const response = await axios.get("https://router.requesty.ai/v1/models", config) + const response = await axios.get("https://router.requesty.ai/v1/models") const rawModels = response.data.data for (const rawModel of rawModels) { + // { + // id: "anthropic/claude-3-5-sonnet-20240620", + // object: "model", + // created: 1740552655, + // owned_by: "system", + // input_price: 0.0000028, + // caching_price: 0.00000375, + // cached_price: 3e-7, + // output_price: 0.000015, + // max_output_tokens: 8192, + // context_window: 200000, + // supports_caching: true, + // description: + // "Anthropic's previous most intelligent model. High level of intelligence and capability. Excells in coding.", + // } + const modelInfo: ModelInfo = { maxTokens: rawModel.max_output_tokens, contextWindow: rawModel.context_window, - supportsImages: rawModel.support_image, - supportsComputerUse: rawModel.support_computer_use, supportsPromptCache: rawModel.supports_caching, inputPrice: parseApiPrice(rawModel.input_price), outputPrice: parseApiPrice(rawModel.output_price), @@ -72,8 +79,15 @@ export async function getRequestyModels({ apiKey }: { apiKey?: string }) { switch (rawModel.id) { case rawModel.id.startsWith("anthropic/claude-3-7-sonnet"): + modelInfo.supportsComputerUse = true + modelInfo.supportsImages = true modelInfo.maxTokens = 16384 break + case rawModel.id.startsWith("anthropic/claude-3-5-sonnet-20241022"): + modelInfo.supportsComputerUse = true + modelInfo.supportsImages = true + modelInfo.maxTokens = 8192 + break case rawModel.id.startsWith("anthropic/"): modelInfo.maxTokens = 8192 break diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index 118bbddcf55..bc6f4578683 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -644,9 +644,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { } }) - const requestyApiKey = await this.getSecret("requestyApiKey") - - getRequestyModels({ apiKey: requestyApiKey }).then(async (requestyModels) => { + getRequestyModels().then(async (requestyModels) => { if (Object.keys(requestyModels).length > 0) { await fs.writeFile( path.join(cacheDir, GlobalFileNames.requestyModels), @@ -838,17 +836,15 @@ export class ClineProvider implements vscode.WebviewViewProvider { break case "refreshRequestyModels": - if (message?.values?.apiKey) { - const requestyModels = await getRequestyModels({ apiKey: message.values.apiKey }) + const requestyModels = await getRequestyModels() - if (Object.keys(requestyModels).length > 0) { - const cacheDir = await this.ensureCacheDirectoryExists() - await fs.writeFile( - path.join(cacheDir, GlobalFileNames.requestyModels), - JSON.stringify(requestyModels), - ) - await this.postMessageToWebview({ type: "requestyModels", requestyModels }) - } + if (Object.keys(requestyModels).length > 0) { + const cacheDir = await this.ensureCacheDirectoryExists() + await fs.writeFile( + path.join(cacheDir, GlobalFileNames.requestyModels), + JSON.stringify(requestyModels), + ) + await this.postMessageToWebview({ type: "requestyModels", requestyModels }) } break diff --git a/src/shared/ExtensionMessage.ts b/src/shared/ExtensionMessage.ts index 8f64a9ba056..e87edffed16 100644 --- a/src/shared/ExtensionMessage.ts +++ b/src/shared/ExtensionMessage.ts @@ -27,10 +27,11 @@ export interface ExtensionMessage { | "workspaceUpdated" | "invoke" | "partialMessage" - | "glamaModels" | "openRouterModels" - | "openAiModels" + | "glamaModels" + | "unboundModels" | "requestyModels" + | "openAiModels" | "mcpServers" | "enhancedPrompt" | "commitSearchResults" @@ -43,8 +44,6 @@ export interface ExtensionMessage { | "autoApprovalEnabled" | "updateCustomMode" | "deleteCustomMode" - | "unboundModels" - | "refreshUnboundModels" | "currentCheckpointUpdated" text?: string action?: @@ -67,11 +66,11 @@ export interface ExtensionMessage { path?: string }> partialMessage?: ClineMessage + openRouterModels?: Record glamaModels?: Record + unboundModels?: Record requestyModels?: Record - openRouterModels?: Record openAiModels?: string[] - unboundModels?: Record mcpServers?: McpServer[] commits?: GitCommit[] listApiConfig?: ApiConfigMeta[] diff --git a/src/shared/WebviewMessage.ts b/src/shared/WebviewMessage.ts index 106e6d243b9..fde7442cc1d 100644 --- a/src/shared/WebviewMessage.ts +++ b/src/shared/WebviewMessage.ts @@ -40,11 +40,11 @@ export interface WebviewMessage { | "openFile" | "openMention" | "cancelTask" - | "refreshGlamaModels" | "refreshOpenRouterModels" - | "refreshOpenAiModels" + | "refreshGlamaModels" | "refreshUnboundModels" | "refreshRequestyModels" + | "refreshOpenAiModels" | "alwaysAllowBrowser" | "alwaysAllowMcp" | "alwaysAllowModeSwitch" @@ -71,7 +71,6 @@ export interface WebviewMessage { | "mcpEnabled" | "enableMcpServerCreation" | "searchCommits" - | "refreshGlamaModels" | "alwaysApproveResubmit" | "requestDelaySeconds" | "rateLimitSeconds" diff --git a/webview-ui/package-lock.json b/webview-ui/package-lock.json index 1d64f934dc2..22564d01a65 100644 --- a/webview-ui/package-lock.json +++ b/webview-ui/package-lock.json @@ -3674,6 +3674,7 @@ "version": "1.1.6", "resolved": "https://registry.npmjs.org/@radix-ui/react-alert-dialog/-/react-alert-dialog-1.1.6.tgz", "integrity": "sha512-p4XnPqgej8sZAAReCAKgz1REYZEBLR8hU9Pg27wFnCWIMc8g1ccCs0FjBcy05V15VTu8pAePw/VDYeOm/uZ6yQ==", + "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.1", "@radix-ui/react-compose-refs": "1.1.1", @@ -4719,6 +4720,7 @@ "version": "1.1.2", "resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.1.2.tgz", "integrity": "sha512-YAKxaiGsSQJ38VzKH86/BPRC4rh+b1Jpa+JneA5LRE7skmLPNAyeG8kPJj/oo4STLvlrs8vkf/iYyc3A5stYCQ==", + "license": "MIT", "dependencies": { "@radix-ui/react-compose-refs": "1.1.1" }, diff --git a/webview-ui/src/components/settings/ApiErrorMessage.tsx b/webview-ui/src/components/settings/ApiErrorMessage.tsx index 4b419957b6c..06764a1bfa0 100644 --- a/webview-ui/src/components/settings/ApiErrorMessage.tsx +++ b/webview-ui/src/components/settings/ApiErrorMessage.tsx @@ -4,13 +4,13 @@ interface ApiErrorMessageProps { errorMessage: string | undefined children?: React.ReactNode } -const ApiErrorMessage = ({ errorMessage, children }: ApiErrorMessageProps) => { - return ( -
- - {errorMessage} - {children} + +export const ApiErrorMessage = ({ errorMessage, children }: ApiErrorMessageProps) => ( +
+
+
+
{errorMessage}
- ) -} -export default ApiErrorMessage + {children} +
+) diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 107f2a483ae..c30035cef01 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -4,8 +4,6 @@ import { Checkbox, Dropdown, Pane, type DropdownOption } from "vscrui" import { VSCodeLink, VSCodeRadio, VSCodeRadioGroup, VSCodeTextField } from "@vscode/webview-ui-toolkit/react" import * as vscodemodels from "vscode" -import { Slider } from "@/components/ui" - import { ApiConfiguration, ModelInfo, @@ -33,7 +31,6 @@ import { unboundDefaultModelInfo, requestyDefaultModelId, requestyDefaultModelInfo, - THINKING_BUDGET, } from "../../../../src/shared/api" import { ExtensionMessage } from "../../../../src/shared/ExtensionMessage" @@ -44,7 +41,18 @@ import { DROPDOWN_Z_INDEX } from "./styles" import { ModelPicker } from "./ModelPicker" import { TemperatureControl } from "./TemperatureControl" import { validateApiConfiguration, validateModelId } from "@/utils/validate" -import ApiErrorMessage from "./ApiErrorMessage" +import { ApiErrorMessage } from "./ApiErrorMessage" +import { ThinkingBudget } from "./ThinkingBudget" + +const modelsByProvider: Record> = { + anthropic: anthropicModels, + bedrock: bedrockModels, + vertex: vertexModels, + gemini: geminiModels, + "openai-native": openAiNativeModels, + deepseek: deepSeekModels, + mistral: mistralModels, +} interface ApiOptionsProps { uriScheme: string | undefined @@ -66,18 +74,23 @@ const ApiOptions = ({ const [ollamaModels, setOllamaModels] = useState([]) const [lmStudioModels, setLmStudioModels] = useState([]) const [vsCodeLmModels, setVsCodeLmModels] = useState([]) + const [openRouterModels, setOpenRouterModels] = useState>({ [openRouterDefaultModelId]: openRouterDefaultModelInfo, }) + const [glamaModels, setGlamaModels] = useState>({ [glamaDefaultModelId]: glamaDefaultModelInfo, }) + const [unboundModels, setUnboundModels] = useState>({ [unboundDefaultModelId]: unboundDefaultModelInfo, }) + const [requestyModels, setRequestyModels] = useState>({ [requestyDefaultModelId]: requestyDefaultModelInfo, }) + const [openAiModels, setOpenAiModels] = useState | null>(null) const [anthropicBaseUrlSelected, setAnthropicBaseUrlSelected] = useState(!!apiConfiguration?.anthropicBaseUrl) @@ -85,8 +98,6 @@ const ApiOptions = ({ const [openRouterBaseUrlSelected, setOpenRouterBaseUrlSelected] = useState(!!apiConfiguration?.openRouterBaseUrl) const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false) - const anthropicThinkingBudget = apiConfiguration?.anthropicThinking ?? THINKING_BUDGET.default - const noTransform = (value: T) => value const inputEventTransform = (event: E) => (event as { target: HTMLInputElement })?.target?.value as any const dropdownEventTransform = (event: DropdownOption | string | undefined) => @@ -103,62 +114,87 @@ const ApiOptions = ({ [setApiConfigurationField], ) - const { selectedProvider, selectedModelId, selectedModelInfo } = useMemo(() => { - return normalizeApiConfiguration(apiConfiguration) - }, [apiConfiguration]) + const { selectedProvider, selectedModelId, selectedModelInfo } = useMemo( + () => normalizeApiConfiguration(apiConfiguration), + [apiConfiguration], + ) - // Pull ollama/lmstudio models - // Debounced model updates, only executed 250ms after the user stops typing + // Debounced refresh model updates, only executed 250ms after the user + // stops typing. useDebounce( () => { - if (selectedProvider === "ollama") { - vscode.postMessage({ type: "requestOllamaModels", text: apiConfiguration?.ollamaBaseUrl }) - } else if (selectedProvider === "lmstudio") { - vscode.postMessage({ type: "requestLmStudioModels", text: apiConfiguration?.lmStudioBaseUrl }) - } else if (selectedProvider === "vscode-lm") { - vscode.postMessage({ type: "requestVsCodeLmModels" }) - } else if (selectedProvider === "openai") { - vscode.postMessage({ - type: "refreshOpenAiModels", - values: { - baseUrl: apiConfiguration?.openAiBaseUrl, - apiKey: apiConfiguration?.openAiApiKey, - }, - }) - } else if (selectedProvider === "openrouter") { - vscode.postMessage({ type: "refreshOpenRouterModels", values: {} }) + if (selectedProvider === "openrouter") { + vscode.postMessage({ type: "refreshOpenRouterModels" }) } else if (selectedProvider === "glama") { - vscode.postMessage({ type: "refreshGlamaModels", values: {} }) + vscode.postMessage({ type: "refreshGlamaModels" }) + } else if (selectedProvider === "unbound") { + vscode.postMessage({ type: "refreshUnboundModels" }) } else if (selectedProvider === "requesty") { vscode.postMessage({ type: "refreshRequestyModels", - values: { - apiKey: apiConfiguration?.requestyApiKey, - }, + values: { apiKey: apiConfiguration?.requestyApiKey }, + }) + } else if (selectedProvider === "openai") { + vscode.postMessage({ + type: "refreshOpenAiModels", + values: { baseUrl: apiConfiguration?.openAiBaseUrl, apiKey: apiConfiguration?.openAiApiKey }, }) + } else if (selectedProvider === "ollama") { + vscode.postMessage({ type: "requestOllamaModels", text: apiConfiguration?.ollamaBaseUrl }) + } else if (selectedProvider === "lmstudio") { + vscode.postMessage({ type: "requestLmStudioModels", text: apiConfiguration?.lmStudioBaseUrl }) + } else if (selectedProvider === "vscode-lm") { + vscode.postMessage({ type: "requestVsCodeLmModels" }) } }, 250, [ selectedProvider, - apiConfiguration?.ollamaBaseUrl, - apiConfiguration?.lmStudioBaseUrl, + apiConfiguration?.requestyApiKey, apiConfiguration?.openAiBaseUrl, apiConfiguration?.openAiApiKey, - apiConfiguration?.requestyApiKey, + apiConfiguration?.ollamaBaseUrl, + apiConfiguration?.lmStudioBaseUrl, ], ) useEffect(() => { const apiValidationResult = validateApiConfiguration(apiConfiguration) || - validateModelId(apiConfiguration, glamaModels, openRouterModels, unboundModels) + validateModelId(apiConfiguration, glamaModels, openRouterModels, unboundModels, requestyModels) + setErrorMessage(apiValidationResult) - }, [apiConfiguration, glamaModels, openRouterModels, setErrorMessage, unboundModels]) + }, [apiConfiguration, glamaModels, openRouterModels, setErrorMessage, unboundModels, requestyModels]) - const handleMessage = useCallback((event: MessageEvent) => { + const onMessage = useCallback((event: MessageEvent) => { const message: ExtensionMessage = event.data + switch (message.type) { + case "openRouterModels": { + const updatedModels = message.openRouterModels ?? {} + setOpenRouterModels({ [openRouterDefaultModelId]: openRouterDefaultModelInfo, ...updatedModels }) + break + } + case "glamaModels": { + const updatedModels = message.glamaModels ?? {} + setGlamaModels({ [glamaDefaultModelId]: glamaDefaultModelInfo, ...updatedModels }) + break + } + case "unboundModels": { + const updatedModels = message.unboundModels ?? {} + setUnboundModels({ [unboundDefaultModelId]: unboundDefaultModelInfo, ...updatedModels }) + break + } + case "requestyModels": { + const updatedModels = message.requestyModels ?? {} + setRequestyModels({ [requestyDefaultModelId]: requestyDefaultModelInfo, ...updatedModels }) + break + } + case "openAiModels": { + const updatedModels = message.openAiModels ?? [] + setOpenAiModels(Object.fromEntries(updatedModels.map((item) => [item, openAiModelInfoSaneDefaults]))) + break + } case "ollamaModels": { const newModels = message.ollamaModels ?? [] @@ -177,72 +213,30 @@ const ApiOptions = ({ setVsCodeLmModels(newModels) } break - case "glamaModels": { - const updatedModels = message.glamaModels ?? {} - setGlamaModels({ - [glamaDefaultModelId]: glamaDefaultModelInfo, // in case the extension sent a model list without the default model - ...updatedModels, - }) - break - } - case "openRouterModels": { - const updatedModels = message.openRouterModels ?? {} - setOpenRouterModels({ - [openRouterDefaultModelId]: openRouterDefaultModelInfo, // in case the extension sent a model list without the default model - ...updatedModels, - }) - break - } - case "openAiModels": { - const updatedModels = message.openAiModels ?? [] - setOpenAiModels(Object.fromEntries(updatedModels.map((item) => [item, openAiModelInfoSaneDefaults]))) - break - } - case "unboundModels": { - const updatedModels = message.unboundModels ?? {} - setUnboundModels(updatedModels) - break - } - case "requestyModels": { - const updatedModels = message.requestyModels ?? {} - setRequestyModels({ - [requestyDefaultModelId]: requestyDefaultModelInfo, // in case the extension sent a model list without the default model - ...updatedModels, - }) - break - } } }, []) - useEvent("message", handleMessage) - - const createDropdown = (models: Record) => { - const options: DropdownOption[] = [ - { value: "", label: "Select a model..." }, - ...Object.keys(models).map((modelId) => ({ - value: modelId, - label: modelId, - })), - ] - - return ( - { - setApiConfigurationField("apiModelId", typeof value == "string" ? value : value?.value) - }} - style={{ width: "100%" }} - options={options} - /> - ) - } + useEvent("message", onMessage) + + const selectedProviderModelOptions: DropdownOption[] = useMemo( + () => + modelsByProvider[selectedProvider] + ? [ + { value: "", label: "Select a model..." }, + ...Object.keys(modelsByProvider[selectedProvider]).map((modelId) => ({ + value: modelId, + label: modelId, + })), + ] + : [], + [selectedProvider], + ) return (
-
+ {errorMessage && } + {selectedProvider === "anthropic" && (
- Anthropic API Key + Anthropic API Key - Glama API Key + Glama API Key {!apiConfiguration?.glamaApiKey && ( - Requesty API Key + Requesty API Key

- OpenAI API Key + OpenAI API Key

- Mistral API Key + Mistral API Key

- Codestral Base URL (Optional) + Codestral Base URL (Optional)

- OpenRouter API Key + OpenRouter API Key {!apiConfiguration?.openRouterApiKey && (

@@ -530,7 +526,7 @@ const ApiOptions = ({ style={{ width: "100%" }} onInput={handleInputChange("awsProfile")} placeholder="Enter profile name"> - AWS Profile Name + AWS Profile Name ) : ( <> @@ -541,7 +537,7 @@ const ApiOptions = ({ type="password" onInput={handleInputChange("awsAccessKey")} placeholder="Enter Access Key..."> - AWS Access Key + AWS Access Key - AWS Secret Key + AWS Secret Key - AWS Session Token + AWS Session Token )}

- Google Cloud Project ID + Google Cloud Project ID
- {errorMessage && }

- Gemini API Key + Gemini API Key

- Base URL + Base URL - API Key + API Key

)} - -
+
- Max Output Tokens + Max Output Tokens
- Context Window Size + Context Window Size
- Image Support + Image Support - Computer Use + Computer Use
- Input Price + Input Price
- Output Price + Output Price - Base URL (optional) + Base URL (optional) - Model ID + Model ID - {errorMessage && } - {lmStudioModels.length > 0 && ( {" "} feature to use it with this extension.{" "} - (Note: Roo Code uses complex prompts and works best + (Note: Roo Code uses complex prompts and works best with Claude models. Less capable models may not work as expected.)

@@ -1154,7 +1141,7 @@ const ApiOptions = ({ type="password" onInput={handleInputChange("deepSeekApiKey")} placeholder="Enter API Key..."> - DeepSeek API Key + DeepSeek API Key

{vsCodeLmModels.length > 0 ? ( - Base URL (optional) + Base URL (optional) - Model ID + Model ID {errorMessage && (
@@ -1284,7 +1271,7 @@ const ApiOptions = ({ quickstart guide. - (Note: Roo Code uses complex prompts and works best + (Note: Roo Code uses complex prompts and works best with Claude models. Less capable models may not work as expected.)

@@ -1299,7 +1286,7 @@ const ApiOptions = ({ type="password" onChange={handleInputChange("unboundApiKey")} placeholder="Enter API Key..."> - Unbound API Key + Unbound API Key {!apiConfiguration?.unboundApiKey && ( This key is stored locally and only used to make API requests from this extension.

-
)} + {selectedProvider === "openrouter" && ( + + )} + {selectedProvider === "glama" && ( )} - {selectedProvider === "openrouter" && ( + {selectedProvider === "unbound" && ( )} + {selectedProvider === "requesty" && ( )} - {selectedProvider !== "glama" && - selectedProvider !== "openrouter" && - selectedProvider !== "requesty" && - selectedProvider !== "openai" && - selectedProvider !== "ollama" && - selectedProvider !== "lmstudio" && - selectedProvider !== "unbound" && ( - <> -
- - {selectedProvider === "anthropic" && createDropdown(anthropicModels)} - {selectedProvider === "bedrock" && createDropdown(bedrockModels)} - {selectedProvider === "vertex" && createDropdown(vertexModels)} - {selectedProvider === "gemini" && createDropdown(geminiModels)} - {selectedProvider === "openai-native" && createDropdown(openAiNativeModels)} - {selectedProvider === "deepseek" && createDropdown(deepSeekModels)} - {selectedProvider === "mistral" && createDropdown(mistralModels)} -
- {errorMessage && } - - - )} - - {selectedModelInfo && selectedModelInfo.thinking && ( -
-
Thinking Budget
-
- setApiConfigurationField("anthropicThinking", value[0])} + {selectedProviderModelOptions.length > 0 && ( + <> +
+ + { + setApiConfigurationField("apiModelId", typeof value == "string" ? value : value?.value) + }} + options={selectedProviderModelOptions} + className="w-full" /> -
{anthropicThinkingBudget}
-
-
- Number of tokens Claude is allowed to use for its internal reasoning process.
-
+ + + )} {!fromWelcomeView && ( @@ -1459,6 +1423,7 @@ export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) { const getProviderData = (models: Record, defaultId: string) => { let selectedModelId: string let selectedModelInfo: ModelInfo + if (modelId && modelId in models) { selectedModelId = modelId selectedModelInfo = models[modelId] @@ -1466,8 +1431,10 @@ export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) { selectedModelId = defaultId selectedModelInfo = models[defaultId] } + return { selectedProvider: provider, selectedModelId, selectedModelInfo } } + switch (provider) { case "anthropic": return getProviderData(anthropicModels, anthropicDefaultModelId) @@ -1481,19 +1448,31 @@ export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) { return getProviderData(deepSeekModels, deepSeekDefaultModelId) case "openai-native": return getProviderData(openAiNativeModels, openAiNativeDefaultModelId) + case "mistral": + return getProviderData(mistralModels, mistralDefaultModelId) + case "openrouter": + return { + selectedProvider: provider, + selectedModelId: apiConfiguration?.openRouterModelId || openRouterDefaultModelId, + selectedModelInfo: apiConfiguration?.openRouterModelInfo || openRouterDefaultModelInfo, + } case "glama": return { selectedProvider: provider, selectedModelId: apiConfiguration?.glamaModelId || glamaDefaultModelId, selectedModelInfo: apiConfiguration?.glamaModelInfo || glamaDefaultModelInfo, } - case "mistral": - return getProviderData(mistralModels, mistralDefaultModelId) - case "openrouter": + case "unbound": return { selectedProvider: provider, - selectedModelId: apiConfiguration?.openRouterModelId || openRouterDefaultModelId, - selectedModelInfo: apiConfiguration?.openRouterModelInfo || openRouterDefaultModelInfo, + selectedModelId: apiConfiguration?.unboundModelId || unboundDefaultModelId, + selectedModelInfo: apiConfiguration?.unboundModelInfo || unboundDefaultModelInfo, + } + case "requesty": + return { + selectedProvider: provider, + selectedModelId: apiConfiguration?.requestyModelId || requestyDefaultModelId, + selectedModelInfo: apiConfiguration?.requestyModelInfo || requestyDefaultModelInfo, } case "openai": return { @@ -1521,21 +1500,9 @@ export function normalizeApiConfiguration(apiConfiguration?: ApiConfiguration) { : "", selectedModelInfo: { ...openAiModelInfoSaneDefaults, - supportsImages: false, // VSCode LM API currently doesn't support images + supportsImages: false, // VSCode LM API currently doesn't support images. }, } - case "unbound": - return { - selectedProvider: provider, - selectedModelId: apiConfiguration?.unboundModelId || unboundDefaultModelId, - selectedModelInfo: apiConfiguration?.unboundModelInfo || unboundDefaultModelInfo, - } - case "requesty": - return { - selectedProvider: provider, - selectedModelId: apiConfiguration?.requestyModelId || requestyDefaultModelId, - selectedModelInfo: apiConfiguration?.requestyModelInfo || requestyDefaultModelInfo, - } default: return getProviderData(anthropicModels, anthropicDefaultModelId) } diff --git a/webview-ui/src/components/settings/ModelPicker.tsx b/webview-ui/src/components/settings/ModelPicker.tsx index fd62bfb97b6..5a7737edd56 100644 --- a/webview-ui/src/components/settings/ModelPicker.tsx +++ b/webview-ui/src/components/settings/ModelPicker.tsx @@ -1,11 +1,13 @@ +import { useMemo, useState, useCallback, useEffect, useRef } from "react" import { VSCodeLink } from "@vscode/webview-ui-toolkit/react" -import { useMemo, useState, useCallback, useEffect } from "react" + +import { Combobox, ComboboxContent, ComboboxEmpty, ComboboxInput, ComboboxItem } from "@/components/ui/combobox" + +import { ApiConfiguration, ModelInfo } from "../../../../src/shared/api" import { normalizeApiConfiguration } from "./ApiOptions" +import { ThinkingBudget } from "./ThinkingBudget" import { ModelInfoView } from "./ModelInfoView" -import { ApiConfiguration, ModelInfo } from "../../../../src/shared/api" -import { Combobox, ComboboxContent, ComboboxEmpty, ComboboxInput, ComboboxItem } from "../ui/combobox" -import ApiErrorMessage from "./ApiErrorMessage" type ExtractType = NonNullable< { [K in keyof ApiConfiguration]: Required[K] extends T ? K : never }[keyof ApiConfiguration] @@ -14,24 +16,17 @@ type ExtractType = NonNullable< type ModelIdKeys = NonNullable< { [K in keyof ApiConfiguration]: K extends `${string}ModelId` ? K : never }[keyof ApiConfiguration] > -declare module "react" { - interface CSSProperties { - // Allow CSS variables - [key: `--${string}`]: string | number - } -} + interface ModelPickerProps { - defaultModelId?: string + defaultModelId: string + defaultModelInfo?: ModelInfo models: Record | null modelIdKey: ModelIdKeys modelInfoKey: ExtractType serviceName: string serviceUrl: string - recommendedModel: string apiConfiguration: ApiConfiguration setApiConfigurationField: (field: K, value: ApiConfiguration[K]) => void - defaultModelInfo?: ModelInfo - errorMessage?: string } export const ModelPicker = ({ @@ -41,13 +36,12 @@ export const ModelPicker = ({ modelInfoKey, serviceName, serviceUrl, - recommendedModel, apiConfiguration, setApiConfigurationField, defaultModelInfo, - errorMessage, }: ModelPickerProps) => { const [isDescriptionExpanded, setIsDescriptionExpanded] = useState(false) + const isInitialized = useRef(false) const modelIds = useMemo(() => Object.keys(models ?? {}).sort((a, b) => a.localeCompare(b)), [models]) @@ -55,6 +49,7 @@ export const ModelPicker = ({ () => normalizeApiConfiguration(apiConfiguration), [apiConfiguration], ) + const onSelect = useCallback( (modelId: string) => { const modelInfo = models?.[modelId] @@ -63,26 +58,23 @@ export const ModelPicker = ({ }, [modelIdKey, modelInfoKey, models, setApiConfigurationField, defaultModelInfo], ) + + const inputValue = apiConfiguration[modelIdKey] + useEffect(() => { - if (apiConfiguration[modelIdKey] == null && defaultModelId) { - onSelect(defaultModelId) + if (!inputValue && !isInitialized.current) { + const initialValue = modelIds.includes(selectedModelId) ? selectedModelId : defaultModelId + setApiConfigurationField(modelIdKey, initialValue) } - }, [apiConfiguration, defaultModelId, modelIdKey, onSelect]) + + isInitialized.current = true + }, [inputValue, modelIds, setApiConfigurationField, modelIdKey, selectedModelId, defaultModelId]) return ( <>
Model
- - + + No model found. {modelIds.map((model) => ( @@ -92,31 +84,18 @@ export const ModelPicker = ({ ))} - - {errorMessage ? ( - -

- - Note: Roo Code uses complex prompts and works best - with Claude models. Less capable models may not work as expected. - -

-
- ) : ( - selectedModelId && - selectedModelInfo && ( - - ) + + {selectedModelId && selectedModelInfo && selectedModelId === inputValue && ( + )}

The extension automatically fetches the latest list of models available on{" "} @@ -124,7 +103,7 @@ export const ModelPicker = ({ {serviceName}. If you're unsure which model to choose, Roo Code works best with{" "} - onSelect(recommendedModel)}>{recommendedModel}. + onSelect(defaultModelId)}>{defaultModelId}. You can also try searching "free" for no-cost options currently available.

diff --git a/webview-ui/src/components/settings/SettingsView.tsx b/webview-ui/src/components/settings/SettingsView.tsx index ee032c3ee06..d3e65a99ea8 100644 --- a/webview-ui/src/components/settings/SettingsView.tsx +++ b/webview-ui/src/components/settings/SettingsView.tsx @@ -66,21 +66,20 @@ const SettingsView = forwardRef(({ onDone }, terminalOutputLineLimit, writeDelayMs, } = cachedState - + //Make sure apiConfiguration is initialized and managed by SettingsView const apiConfiguration = useMemo(() => cachedState.apiConfiguration ?? {}, [cachedState.apiConfiguration]) useEffect(() => { - // Update only when currentApiConfigName is changed - // Expected to be triggered by loadApiConfiguration/upsertApiConfiguration + // Update only when currentApiConfigName is changed. + // Expected to be triggered by loadApiConfiguration/upsertApiConfiguration. if (prevApiConfigName.current === currentApiConfigName) { return } - setCachedState((prevCachedState) => ({ - ...prevCachedState, - ...extensionState, - })) + + setCachedState((prevCachedState) => ({ ...prevCachedState, ...extensionState })) prevApiConfigName.current = currentApiConfigName + // console.log("useEffect: currentApiConfigName changed, setChangeDetected -> false") setChangeDetected(false) }, [currentApiConfigName, extensionState, isChangeDetected]) @@ -90,11 +89,10 @@ const SettingsView = forwardRef(({ onDone }, if (prevState[field] === value) { return prevState } + + // console.log(`setCachedStateField(${field} -> ${value}): setChangeDetected -> true`) setChangeDetected(true) - return { - ...prevState, - [field]: value, - } + return { ...prevState, [field]: value } }) }, [], @@ -107,15 +105,10 @@ const SettingsView = forwardRef(({ onDone }, return prevState } + // console.log(`setApiConfigurationField(${field} -> ${value}): setChangeDetected -> true`) setChangeDetected(true) - return { - ...prevState, - apiConfiguration: { - ...prevState.apiConfiguration, - [field]: value, - }, - } + return { ...prevState, apiConfiguration: { ...prevState.apiConfiguration, [field]: value } } }) }, [], @@ -126,14 +119,19 @@ const SettingsView = forwardRef(({ onDone }, if (prevState.experiments?.[id] === enabled) { return prevState } + + // console.log("setExperimentEnabled: setChangeDetected -> true") setChangeDetected(true) + return { ...prevState, experiments: { ...prevState.experiments, [id]: enabled }, } }) }, []) + const isSettingValid = !errorMessage + const handleSubmit = () => { if (isSettingValid) { vscode.postMessage({ type: "alwaysAllowReadOnly", bool: alwaysAllowReadOnly }) @@ -160,6 +158,7 @@ const SettingsView = forwardRef(({ onDone }, vscode.postMessage({ type: "updateExperimental", values: experiments }) vscode.postMessage({ type: "alwaysAllowModeSwitch", bool: alwaysAllowModeSwitch }) vscode.postMessage({ type: "upsertApiConfiguration", text: currentApiConfigName, apiConfiguration }) + // console.log("handleSubmit: setChangeDetected -> false") setChangeDetected(false) } } @@ -176,13 +175,7 @@ const SettingsView = forwardRef(({ onDone }, [isChangeDetected], ) - useImperativeHandle( - ref, - () => ({ - checkUnsaveChanges, - }), - [checkUnsaveChanges], - ) + useImperativeHandle(ref, () => ({ checkUnsaveChanges }), [checkUnsaveChanges]) const onConfirmDialogResult = useCallback((confirm: boolean) => { if (confirm) { @@ -200,10 +193,7 @@ const SettingsView = forwardRef(({ onDone }, const newCommands = [...currentCommands, commandInput] setCachedStateField("allowedCommands", newCommands) setCommandInput("") - vscode.postMessage({ - type: "allowedCommands", - commands: newCommands, - }) + vscode.postMessage({ type: "allowedCommands", commands: newCommands }) } } diff --git a/webview-ui/src/components/settings/ThinkingBudget.tsx b/webview-ui/src/components/settings/ThinkingBudget.tsx new file mode 100644 index 00000000000..efaa90dc39a --- /dev/null +++ b/webview-ui/src/components/settings/ThinkingBudget.tsx @@ -0,0 +1,29 @@ +import { Slider } from "@/components/ui" + +import { ApiConfiguration, ModelInfo, THINKING_BUDGET } from "../../../../src/shared/api" + +interface ThinkingBudgetProps { + apiConfiguration: ApiConfiguration + setApiConfigurationField: (field: K, value: ApiConfiguration[K]) => void + modelInfo?: ModelInfo +} + +export const ThinkingBudget = ({ apiConfiguration, setApiConfigurationField, modelInfo }: ThinkingBudgetProps) => { + const budget = apiConfiguration?.anthropicThinking ?? THINKING_BUDGET.default + + return modelInfo && modelInfo.thinking ? ( +
+
Thinking Budget
+
+ setApiConfigurationField("anthropicThinking", value[0])} + /> +
{budget}
+
+
+ ) : null +} diff --git a/webview-ui/src/components/ui/alert-dialog.tsx b/webview-ui/src/components/ui/alert-dialog.tsx index 7530cae54d6..82a25bf8f70 100644 --- a/webview-ui/src/components/ui/alert-dialog.tsx +++ b/webview-ui/src/components/ui/alert-dialog.tsx @@ -4,94 +4,97 @@ import * as AlertDialogPrimitive from "@radix-ui/react-alert-dialog" import { cn } from "@/lib/utils" import { buttonVariants } from "@/components/ui/button" -const AlertDialog = AlertDialogPrimitive.Root - -const AlertDialogTrigger = AlertDialogPrimitive.Trigger +function AlertDialog({ ...props }: React.ComponentProps) { + return +} -const AlertDialogPortal = AlertDialogPrimitive.Portal +function AlertDialogTrigger({ ...props }: React.ComponentProps) { + return +} -const AlertDialogOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogOverlay.displayName = AlertDialogPrimitive.Overlay.displayName +function AlertDialogPortal({ ...props }: React.ComponentProps) { + return +} -const AlertDialogContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - - - ) { + return ( + - -)) -AlertDialogContent.displayName = AlertDialogPrimitive.Content.displayName + ) +} -const AlertDialogHeader = ({ className, ...props }: React.HTMLAttributes) => ( -
-) -AlertDialogHeader.displayName = "AlertDialogHeader" +function AlertDialogContent({ className, ...props }: React.ComponentProps) { + return ( + + + + + ) +} -const AlertDialogFooter = ({ className, ...props }: React.HTMLAttributes) => ( -
-) -AlertDialogFooter.displayName = "AlertDialogFooter" +function AlertDialogHeader({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} -const AlertDialogTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogTitle.displayName = AlertDialogPrimitive.Title.displayName +function AlertDialogFooter({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function AlertDialogTitle({ className, ...props }: React.ComponentProps) { + return ( + + ) +} -const AlertDialogDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogDescription.displayName = AlertDialogPrimitive.Description.displayName +function AlertDialogDescription({ + className, + ...props +}: React.ComponentProps) { + return ( + + ) +} -const AlertDialogAction = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogAction.displayName = AlertDialogPrimitive.Action.displayName +function AlertDialogAction({ className, ...props }: React.ComponentProps) { + return +} -const AlertDialogCancel = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -AlertDialogCancel.displayName = AlertDialogPrimitive.Cancel.displayName +function AlertDialogCancel({ className, ...props }: React.ComponentProps) { + return +} export { AlertDialog, diff --git a/webview-ui/src/components/ui/dialog.tsx b/webview-ui/src/components/ui/dialog.tsx index 11d5e2d3b0c..ed3160f692a 100644 --- a/webview-ui/src/components/ui/dialog.tsx +++ b/webview-ui/src/components/ui/dialog.tsx @@ -1,96 +1,108 @@ -"use client" - import * as React from "react" import * as DialogPrimitive from "@radix-ui/react-dialog" -import { Cross2Icon } from "@radix-ui/react-icons" +import { XIcon } from "lucide-react" import { cn } from "@/lib/utils" -const Dialog = DialogPrimitive.Root - -const DialogTrigger = DialogPrimitive.Trigger +function Dialog({ ...props }: React.ComponentProps) { + return +} -const DialogPortal = DialogPrimitive.Portal +function DialogTrigger({ ...props }: React.ComponentProps) { + return +} -const DialogClose = DialogPrimitive.Close +function DialogPortal({ ...props }: React.ComponentProps) { + return +} -const DialogOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogOverlay.displayName = DialogPrimitive.Overlay.displayName +function DialogClose({ ...props }: React.ComponentProps) { + return +} -const DialogContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - ) { + return ( + - {children} - - - Close - - - -)) -DialogContent.displayName = DialogPrimitive.Content.displayName + {...props} + /> + ) +} -const DialogHeader = ({ className, ...props }: React.HTMLAttributes) => ( -
-) -DialogHeader.displayName = "DialogHeader" +function DialogContent({ className, children, ...props }: React.ComponentProps) { + return ( + + + + {children} + + + Close + + + + ) +} -const DialogFooter = ({ className, ...props }: React.HTMLAttributes) => ( -
-) -DialogFooter.displayName = "DialogFooter" +function DialogHeader({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} -const DialogTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogTitle.displayName = DialogPrimitive.Title.displayName +function DialogFooter({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} -const DialogDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogDescription.displayName = DialogPrimitive.Description.displayName +function DialogTitle({ className, ...props }: React.ComponentProps) { + return ( + + ) +} + +function DialogDescription({ className, ...props }: React.ComponentProps) { + return ( + + ) +} export { Dialog, - DialogPortal, - DialogOverlay, - DialogTrigger, DialogClose, DialogContent, - DialogHeader, + DialogDescription, DialogFooter, + DialogHeader, + DialogOverlay, + DialogPortal, DialogTitle, - DialogDescription, + DialogTrigger, } diff --git a/webview-ui/src/components/welcome/WelcomeView.tsx b/webview-ui/src/components/welcome/WelcomeView.tsx index 5d880efc0b9..ae674c895f4 100644 --- a/webview-ui/src/components/welcome/WelcomeView.tsx +++ b/webview-ui/src/components/welcome/WelcomeView.tsx @@ -12,16 +12,14 @@ const WelcomeView = () => { const handleSubmit = useCallback(() => { const error = validateApiConfiguration(apiConfiguration) + if (error) { setErrorMessage(error) return } + setErrorMessage(undefined) - vscode.postMessage({ - type: "upsertApiConfiguration", - text: currentApiConfigName, - apiConfiguration, - }) + vscode.postMessage({ type: "upsertApiConfiguration", text: currentApiConfigName, apiConfiguration }) }, [apiConfiguration, currentApiConfigName]) return ( diff --git a/webview-ui/src/utils/validate.ts b/webview-ui/src/utils/validate.ts index 97c702637c4..82af23ab497 100644 --- a/webview-ui/src/utils/validate.ts +++ b/webview-ui/src/utils/validate.ts @@ -1,74 +1,83 @@ -import { ApiConfiguration } from "../../../src/shared/api" -import { ModelInfo } from "../../../src/shared/api" +import { ApiConfiguration, ModelInfo } from "../../../src/shared/api" + export function validateApiConfiguration(apiConfiguration?: ApiConfiguration): string | undefined { - if (apiConfiguration) { - switch (apiConfiguration.apiProvider) { - case "anthropic": - if (!apiConfiguration.apiKey) { - return "You must provide a valid API key or choose a different provider." - } - break - case "glama": - if (!apiConfiguration.glamaApiKey) { - return "You must provide a valid API key or choose a different provider." - } - break - case "bedrock": - if (!apiConfiguration.awsRegion) { - return "You must choose a region to use with AWS Bedrock." - } - break - case "openrouter": - if (!apiConfiguration.openRouterApiKey) { - return "You must provide a valid API key or choose a different provider." - } - break - case "vertex": - if (!apiConfiguration.vertexProjectId || !apiConfiguration.vertexRegion) { - return "You must provide a valid Google Cloud Project ID and Region." - } - break - case "gemini": - if (!apiConfiguration.geminiApiKey) { - return "You must provide a valid API key or choose a different provider." - } - break - case "openai-native": - if (!apiConfiguration.openAiNativeApiKey) { - return "You must provide a valid API key or choose a different provider." - } - break - case "mistral": - if (!apiConfiguration.mistralApiKey) { - return "You must provide a valid API key or choose a different provider." - } - break - case "openai": - if ( - !apiConfiguration.openAiBaseUrl || - !apiConfiguration.openAiApiKey || - !apiConfiguration.openAiModelId - ) { - return "You must provide a valid base URL, API key, and model ID." - } - break - case "ollama": - if (!apiConfiguration.ollamaModelId) { - return "You must provide a valid model ID." - } - break - case "lmstudio": - if (!apiConfiguration.lmStudioModelId) { - return "You must provide a valid model ID." - } - break - case "vscode-lm": - if (!apiConfiguration.vsCodeLmModelSelector) { - return "You must provide a valid model selector." - } - break - } + if (!apiConfiguration) { + return undefined + } + + switch (apiConfiguration.apiProvider) { + case "openrouter": + if (!apiConfiguration.openRouterApiKey) { + return "You must provide a valid API key." + } + break + case "glama": + if (!apiConfiguration.glamaApiKey) { + return "You must provide a valid API key." + } + break + case "unbound": + if (!apiConfiguration.unboundApiKey) { + return "You must provide a valid API key." + } + break + case "requesty": + if (!apiConfiguration.requestyApiKey) { + return "You must provide a valid API key." + } + break + case "anthropic": + if (!apiConfiguration.apiKey) { + return "You must provide a valid API key." + } + break + case "bedrock": + if (!apiConfiguration.awsRegion) { + return "You must choose a region to use with AWS Bedrock." + } + break + case "vertex": + if (!apiConfiguration.vertexProjectId || !apiConfiguration.vertexRegion) { + return "You must provide a valid Google Cloud Project ID and Region." + } + break + case "gemini": + if (!apiConfiguration.geminiApiKey) { + return "You must provide a valid API key." + } + break + case "openai-native": + if (!apiConfiguration.openAiNativeApiKey) { + return "You must provide a valid API key." + } + break + case "mistral": + if (!apiConfiguration.mistralApiKey) { + return "You must provide a valid API key." + } + break + case "openai": + if (!apiConfiguration.openAiBaseUrl || !apiConfiguration.openAiApiKey || !apiConfiguration.openAiModelId) { + return "You must provide a valid base URL, API key, and model ID." + } + break + case "ollama": + if (!apiConfiguration.ollamaModelId) { + return "You must provide a valid model ID." + } + break + case "lmstudio": + if (!apiConfiguration.lmStudioModelId) { + return "You must provide a valid model ID." + } + break + case "vscode-lm": + if (!apiConfiguration.vsCodeLmModelSelector) { + return "You must provide a valid model selector." + } + break } + return undefined } @@ -77,40 +86,81 @@ export function validateModelId( glamaModels?: Record, openRouterModels?: Record, unboundModels?: Record, + requestyModels?: Record, ): string | undefined { - if (apiConfiguration) { - switch (apiConfiguration.apiProvider) { - case "glama": - const glamaModelId = apiConfiguration.glamaModelId - if (!glamaModelId) { - return "You must provide a model ID." - } - if (glamaModels && !Object.keys(glamaModels).includes(glamaModelId)) { - // even if the model list endpoint failed, extensionstatecontext will always have the default model info - return "The model ID you provided is not available. Please choose a different model." - } - break - case "openrouter": - const modelId = apiConfiguration.openRouterModelId - if (!modelId) { - return "You must provide a model ID." - } - if (openRouterModels && !Object.keys(openRouterModels).includes(modelId)) { - // even if the model list endpoint failed, extensionstatecontext will always have the default model info - return "The model ID you provided is not available. Please choose a different model." - } - break - case "unbound": - const unboundModelId = apiConfiguration.unboundModelId - if (!unboundModelId) { - return "You must provide a model ID." - } - if (unboundModels && !Object.keys(unboundModels).includes(unboundModelId)) { - // even if the model list endpoint failed, extensionstatecontext will always have the default model info - return "The model ID you provided is not available. Please choose a different model." - } - break - } + if (!apiConfiguration) { + return undefined + } + + switch (apiConfiguration.apiProvider) { + case "openrouter": + const modelId = apiConfiguration.openRouterModelId + + if (!modelId) { + return "You must provide a model ID." + } + + if ( + openRouterModels && + Object.keys(openRouterModels).length > 1 && + !Object.keys(openRouterModels).includes(modelId) + ) { + return `The model ID (${modelId}) you provided is not available. Please choose a different model.` + } + + break + + case "glama": + const glamaModelId = apiConfiguration.glamaModelId + + if (!glamaModelId) { + return "You must provide a model ID." + } + + if ( + glamaModels && + Object.keys(glamaModels).length > 1 && + !Object.keys(glamaModels).includes(glamaModelId) + ) { + return `The model ID (${glamaModelId}) you provided is not available. Please choose a different model.` + } + + break + + case "unbound": + const unboundModelId = apiConfiguration.unboundModelId + + if (!unboundModelId) { + return "You must provide a model ID." + } + + if ( + unboundModels && + Object.keys(unboundModels).length > 1 && + !Object.keys(unboundModels).includes(unboundModelId) + ) { + return `The model ID (${unboundModelId}) you provided is not available. Please choose a different model.` + } + + break + + case "requesty": + const requestyModelId = apiConfiguration.requestyModelId + + if (!requestyModelId) { + return "You must provide a model ID." + } + + if ( + requestyModels && + Object.keys(requestyModels).length > 1 && + !Object.keys(requestyModels).includes(requestyModelId) + ) { + return `The model ID (${requestyModelId}) you provided is not available. Please choose a different model.` + } + + break } + return undefined } From 44724e5881ab8daefa7b80bb6c52e5f1bb8a828e Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 26 Feb 2025 07:06:25 +0000 Subject: [PATCH 072/145] changeset version bump --- .changeset/cold-poems-change.md | 5 ----- .changeset/real-ties-destroy.md | 5 ----- .changeset/shaggy-spies-kneel.md | 5 ----- .changeset/swift-kings-attack.md | 5 ----- CHANGELOG.md | 9 +++++++++ package-lock.json | 4 ++-- package.json | 2 +- 7 files changed, 12 insertions(+), 23 deletions(-) delete mode 100644 .changeset/cold-poems-change.md delete mode 100644 .changeset/real-ties-destroy.md delete mode 100644 .changeset/shaggy-spies-kneel.md delete mode 100644 .changeset/swift-kings-attack.md diff --git a/.changeset/cold-poems-change.md b/.changeset/cold-poems-change.md deleted file mode 100644 index 41693ccdfc7..00000000000 --- a/.changeset/cold-poems-change.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -v3.7.5 diff --git a/.changeset/real-ties-destroy.md b/.changeset/real-ties-destroy.md deleted file mode 100644 index a2e9ba8eb04..00000000000 --- a/.changeset/real-ties-destroy.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -Fix model picker diff --git a/.changeset/shaggy-spies-kneel.md b/.changeset/shaggy-spies-kneel.md deleted file mode 100644 index d137cf85efc..00000000000 --- a/.changeset/shaggy-spies-kneel.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -Add drag-and-drop for files diff --git a/.changeset/swift-kings-attack.md b/.changeset/swift-kings-attack.md deleted file mode 100644 index 8a8a425611d..00000000000 --- a/.changeset/swift-kings-attack.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -Pass "thinking" params to OpenRouter diff --git a/CHANGELOG.md b/CHANGELOG.md index 52fb7540976..7a9b4b57bf0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Roo Code Changelog +## 3.7.5 + +### Patch Changes + +- v3.7.5 +- Fix model picker +- Add drag-and-drop for files +- Pass "thinking" params to OpenRouter + ## [3.7.4] - Fix a bug that prevented the "Thinking" setting from properly updating when switching profiles. diff --git a/package-lock.json b/package-lock.json index 4bcdf8136df..a6c75bd69b3 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "roo-cline", - "version": "3.7.4", + "version": "3.7.5", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "roo-cline", - "version": "3.7.4", + "version": "3.7.5", "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/sdk": "^0.37.0", diff --git a/package.json b/package.json index 28045436e6b..40bb6a545dd 100644 --- a/package.json +++ b/package.json @@ -3,7 +3,7 @@ "displayName": "Roo Code (prev. Roo Cline)", "description": "A whole dev team of AI agents in your editor.", "publisher": "RooVeterinaryInc", - "version": "3.7.4", + "version": "3.7.5", "icon": "assets/icons/rocket.png", "galleryBanner": { "color": "#617A91", From 319f16eb0aabb8033ef94d006b983bdbb45a1eec Mon Sep 17 00:00:00 2001 From: cte Date: Tue, 25 Feb 2025 23:11:42 -0800 Subject: [PATCH 073/145] Update CHANGELOG --- CHANGELOG.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a9b4b57bf0..ee107be67a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,13 +1,11 @@ # Roo Code Changelog -## 3.7.5 +## [3.7.5] -### Patch Changes - -- v3.7.5 -- Fix model picker +- Fix context window truncation math (see [#1173](https://github.com/RooVetGit/Roo-Code/issues/1173)) +- Fix various issues with the model picker - Add drag-and-drop for files -- Pass "thinking" params to OpenRouter +- Enable the "Thinking Budget" slider for Claude 3.7 Sonnet on OpenRouter ## [3.7.4] From da1b31765ed05cab6c1d0e25cbae6748aa7ce89a Mon Sep 17 00:00:00 2001 From: cte Date: Tue, 25 Feb 2025 23:13:52 -0800 Subject: [PATCH 074/145] Update CHANGELOG --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ee107be67a4..02a4a30cbd2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,8 @@ ## [3.7.5] - Fix context window truncation math (see [#1173](https://github.com/RooVetGit/Roo-Code/issues/1173)) -- Fix various issues with the model picker +- Fix various issues with the model picker (thanks @System233!) +- Fix model input / output cost parsing (thanks @System233!) - Add drag-and-drop for files - Enable the "Thinking Budget" slider for Claude 3.7 Sonnet on OpenRouter From 78d5af491aed08505afcfb80bd02a96cb0d287ae Mon Sep 17 00:00:00 2001 From: Joe Manley Date: Wed, 26 Feb 2025 09:32:13 -0800 Subject: [PATCH 075/145] Fix long strings correctly in ChatRow --- webview-ui/src/components/chat/ChatRow.tsx | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/webview-ui/src/components/chat/ChatRow.tsx b/webview-ui/src/components/chat/ChatRow.tsx index b139c68f963..4017ccf318e 100644 --- a/webview-ui/src/components/chat/ChatRow.tsx +++ b/webview-ui/src/components/chat/ChatRow.tsx @@ -617,8 +617,10 @@ export const ChatRowContent = ({ color: "var(--vscode-badge-foreground)", borderRadius: "3px", padding: "9px", - whiteSpace: "pre-line", - wordWrap: "break-word", + overflow: "hidden", + whiteSpace: "pre-wrap", + wordBreak: "break-word", + overflowWrap: "anywhere", }}>
Date: Wed, 26 Feb 2025 09:41:57 -0800 Subject: [PATCH 076/145] Add changeset --- .changeset/fluffy-apples-attack.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/fluffy-apples-attack.md diff --git a/.changeset/fluffy-apples-attack.md b/.changeset/fluffy-apples-attack.md new file mode 100644 index 00000000000..924a1b25057 --- /dev/null +++ b/.changeset/fluffy-apples-attack.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Handle really long text in the ChatRow similar to TaskHeader From d7266be3feb018c5bf207135cb54f74c94d198e5 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Wed, 26 Feb 2025 13:52:38 -0500 Subject: [PATCH 077/145] Better OpenRouter error handling --- .changeset/tender-cycles-help.md | 5 +++++ src/core/Cline.ts | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 .changeset/tender-cycles-help.md diff --git a/.changeset/tender-cycles-help.md b/.changeset/tender-cycles-help.md new file mode 100644 index 00000000000..d43e423ee61 --- /dev/null +++ b/.changeset/tender-cycles-help.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Better OpenRouter error handling diff --git a/src/core/Cline.ts b/src/core/Cline.ts index 073bd109117..2e29ad453cc 100644 --- a/src/core/Cline.ts +++ b/src/core/Cline.ts @@ -1010,7 +1010,7 @@ export class Cline { } catch (error) { // note that this api_req_failed ask is unique in that we only present this option if the api hasn't streamed any content yet (ie it fails on the first chunk due), as it would allow them to hit a retry button. However if the api failed mid-stream, it could be in any arbitrary state where some tools may have executed, so that error is handled differently and requires cancelling the task entirely. if (alwaysApproveResubmit) { - const errorMsg = error.message ?? "Unknown error" + const errorMsg = error.error?.metadata?.raw ?? error.message ?? "Unknown error" const baseDelay = requestDelaySeconds || 5 const exponentialDelay = Math.ceil(baseDelay * Math.pow(2, retryAttempt)) // Wait for the greater of the exponential delay or the rate limit delay From a4e58700ce7527ed486a96df6cce9709fb145074 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Wed, 26 Feb 2025 16:04:32 -0500 Subject: [PATCH 078/145] Support multiple files in drag-and-drop --- .changeset/orange-zoos-train.md | 5 + .../src/components/chat/ChatTextArea.tsx | 37 ++- .../chat/__tests__/ChatTextArea.test.tsx | 238 ++++++++++++++++++ 3 files changed, 272 insertions(+), 8 deletions(-) create mode 100644 .changeset/orange-zoos-train.md diff --git a/.changeset/orange-zoos-train.md b/.changeset/orange-zoos-train.md new file mode 100644 index 00000000000..76c16f45671 --- /dev/null +++ b/.changeset/orange-zoos-train.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Support multiple files in drag-and-drop diff --git a/webview-ui/src/components/chat/ChatTextArea.tsx b/webview-ui/src/components/chat/ChatTextArea.tsx index dc78a3fdb3d..be2b2a97984 100644 --- a/webview-ui/src/components/chat/ChatTextArea.tsx +++ b/webview-ui/src/components/chat/ChatTextArea.tsx @@ -590,15 +590,36 @@ const ChatTextArea = forwardRef( const files = Array.from(e.dataTransfer.files) const text = e.dataTransfer.getData("text") if (text) { - // Convert the path to a mention-friendly format - const mentionText = convertToMentionPath(text, cwd) + // Split text on newlines to handle multiple files + const lines = text.split(/\r?\n/).filter((line) => line.trim() !== "") + + if (lines.length > 0) { + // Process each line as a separate file path + let newValue = inputValue.slice(0, cursorPosition) + let totalLength = 0 + + lines.forEach((line, index) => { + // Convert each path to a mention-friendly format + const mentionText = convertToMentionPath(line, cwd) + newValue += mentionText + totalLength += mentionText.length + + // Add space after each mention except the last one + if (index < lines.length - 1) { + newValue += " " + totalLength += 1 + } + }) - const newValue = - inputValue.slice(0, cursorPosition) + mentionText + " " + inputValue.slice(cursorPosition) - setInputValue(newValue) - const newCursorPosition = cursorPosition + mentionText.length + 1 - setCursorPosition(newCursorPosition) - setIntendedCursorPosition(newCursorPosition) + // Add space after the last mention and append the rest of the input + newValue += " " + inputValue.slice(cursorPosition) + totalLength += 1 + + setInputValue(newValue) + const newCursorPosition = cursorPosition + totalLength + setCursorPosition(newCursorPosition) + setIntendedCursorPosition(newCursorPosition) + } return } diff --git a/webview-ui/src/components/chat/__tests__/ChatTextArea.test.tsx b/webview-ui/src/components/chat/__tests__/ChatTextArea.test.tsx index 205912fc154..3241010e886 100644 --- a/webview-ui/src/components/chat/__tests__/ChatTextArea.test.tsx +++ b/webview-ui/src/components/chat/__tests__/ChatTextArea.test.tsx @@ -3,6 +3,7 @@ import ChatTextArea from "../ChatTextArea" import { useExtensionState } from "../../../context/ExtensionStateContext" import { vscode } from "../../../utils/vscode" import { defaultModeSlug } from "../../../../../src/shared/modes" +import * as pathMentions from "../../../utils/path-mentions" // Mock modules jest.mock("../../../utils/vscode", () => ({ @@ -12,9 +13,20 @@ jest.mock("../../../utils/vscode", () => ({ })) jest.mock("../../../components/common/CodeBlock") jest.mock("../../../components/common/MarkdownBlock") +jest.mock("../../../utils/path-mentions", () => ({ + convertToMentionPath: jest.fn((path, cwd) => { + // Simple mock implementation that mimics the real function's behavior + if (cwd && path.toLowerCase().startsWith(cwd.toLowerCase())) { + const relativePath = path.substring(cwd.length) + return "@" + (relativePath.startsWith("/") ? relativePath : "/" + relativePath) + } + return path + }), +})) // Get the mocked postMessage function const mockPostMessage = vscode.postMessage as jest.Mock +const mockConvertToMentionPath = pathMentions.convertToMentionPath as jest.Mock // Mock ExtensionStateContext jest.mock("../../../context/ExtensionStateContext") @@ -160,4 +172,230 @@ describe("ChatTextArea", () => { expect(setInputValue).toHaveBeenCalledWith("Enhanced test prompt") }) }) + + describe("multi-file drag and drop", () => { + const mockCwd = "/Users/test/project" + + beforeEach(() => { + jest.clearAllMocks() + ;(useExtensionState as jest.Mock).mockReturnValue({ + filePaths: [], + openedTabs: [], + cwd: mockCwd, + }) + mockConvertToMentionPath.mockClear() + }) + + it("should process multiple file paths separated by newlines", () => { + const setInputValue = jest.fn() + + const { container } = render( + , + ) + + // Create a mock dataTransfer object with text data containing multiple file paths + const dataTransfer = { + getData: jest.fn().mockReturnValue("/Users/test/project/file1.js\n/Users/test/project/file2.js"), + files: [], + } + + // Simulate drop event + fireEvent.drop(container.querySelector(".chat-text-area")!, { + dataTransfer, + preventDefault: jest.fn(), + }) + + // Verify convertToMentionPath was called for each file path + expect(mockConvertToMentionPath).toHaveBeenCalledTimes(2) + expect(mockConvertToMentionPath).toHaveBeenCalledWith("/Users/test/project/file1.js", mockCwd) + expect(mockConvertToMentionPath).toHaveBeenCalledWith("/Users/test/project/file2.js", mockCwd) + + // Verify setInputValue was called with the correct value + // The mock implementation of convertToMentionPath will convert the paths to @/file1.js and @/file2.js + expect(setInputValue).toHaveBeenCalledWith("@/file1.js @/file2.js Initial text") + }) + + it("should filter out empty lines in the dragged text", () => { + const setInputValue = jest.fn() + + const { container } = render( + , + ) + + // Create a mock dataTransfer object with text data containing empty lines + const dataTransfer = { + getData: jest.fn().mockReturnValue("/Users/test/project/file1.js\n\n/Users/test/project/file2.js\n\n"), + files: [], + } + + // Simulate drop event + fireEvent.drop(container.querySelector(".chat-text-area")!, { + dataTransfer, + preventDefault: jest.fn(), + }) + + // Verify convertToMentionPath was called only for non-empty lines + expect(mockConvertToMentionPath).toHaveBeenCalledTimes(2) + + // Verify setInputValue was called with the correct value + expect(setInputValue).toHaveBeenCalledWith("@/file1.js @/file2.js Initial text") + }) + + it("should correctly update cursor position after adding multiple mentions", () => { + const setInputValue = jest.fn() + const initialCursorPosition = 5 + + const { container } = render( + , + ) + + // Set the cursor position manually + const textArea = container.querySelector("textarea") + if (textArea) { + textArea.selectionStart = initialCursorPosition + textArea.selectionEnd = initialCursorPosition + } + + // Create a mock dataTransfer object with text data + const dataTransfer = { + getData: jest.fn().mockReturnValue("/Users/test/project/file1.js\n/Users/test/project/file2.js"), + files: [], + } + + // Simulate drop event + fireEvent.drop(container.querySelector(".chat-text-area")!, { + dataTransfer, + preventDefault: jest.fn(), + }) + + // The cursor position should be updated based on the implementation in the component + expect(setInputValue).toHaveBeenCalledWith("@/file1.js @/file2.js Hello world") + }) + + it("should handle very long file paths correctly", () => { + const setInputValue = jest.fn() + + const { container } = render() + + // Create a very long file path + const longPath = + "/Users/test/project/very/long/path/with/many/nested/directories/and/a/very/long/filename/with/extension.typescript" + + // Create a mock dataTransfer object with the long path + const dataTransfer = { + getData: jest.fn().mockReturnValue(longPath), + files: [], + } + + // Simulate drop event + fireEvent.drop(container.querySelector(".chat-text-area")!, { + dataTransfer, + preventDefault: jest.fn(), + }) + + // Verify convertToMentionPath was called with the long path + expect(mockConvertToMentionPath).toHaveBeenCalledWith(longPath, mockCwd) + + // The mock implementation will convert it to @/very/long/path/... + expect(setInputValue).toHaveBeenCalledWith( + "@/very/long/path/with/many/nested/directories/and/a/very/long/filename/with/extension.typescript ", + ) + }) + + it("should handle paths with special characters correctly", () => { + const setInputValue = jest.fn() + + const { container } = render() + + // Create paths with special characters + const specialPath1 = "/Users/test/project/file with spaces.js" + const specialPath2 = "/Users/test/project/file-with-dashes.js" + const specialPath3 = "/Users/test/project/file_with_underscores.js" + const specialPath4 = "/Users/test/project/file.with.dots.js" + + // Create a mock dataTransfer object with the special paths + const dataTransfer = { + getData: jest + .fn() + .mockReturnValue(`${specialPath1}\n${specialPath2}\n${specialPath3}\n${specialPath4}`), + files: [], + } + + // Simulate drop event + fireEvent.drop(container.querySelector(".chat-text-area")!, { + dataTransfer, + preventDefault: jest.fn(), + }) + + // Verify convertToMentionPath was called for each path + expect(mockConvertToMentionPath).toHaveBeenCalledTimes(4) + expect(mockConvertToMentionPath).toHaveBeenCalledWith(specialPath1, mockCwd) + expect(mockConvertToMentionPath).toHaveBeenCalledWith(specialPath2, mockCwd) + expect(mockConvertToMentionPath).toHaveBeenCalledWith(specialPath3, mockCwd) + expect(mockConvertToMentionPath).toHaveBeenCalledWith(specialPath4, mockCwd) + + // Verify setInputValue was called with the correct value + expect(setInputValue).toHaveBeenCalledWith( + "@/file with spaces.js @/file-with-dashes.js @/file_with_underscores.js @/file.with.dots.js ", + ) + }) + + it("should handle paths outside the current working directory", () => { + const setInputValue = jest.fn() + + const { container } = render() + + // Create paths outside the current working directory + const outsidePath = "/Users/other/project/file.js" + + // Mock the convertToMentionPath function to return the original path for paths outside cwd + mockConvertToMentionPath.mockImplementationOnce((path, cwd) => { + return path // Return original path for this test + }) + + // Create a mock dataTransfer object with the outside path + const dataTransfer = { + getData: jest.fn().mockReturnValue(outsidePath), + files: [], + } + + // Simulate drop event + fireEvent.drop(container.querySelector(".chat-text-area")!, { + dataTransfer, + preventDefault: jest.fn(), + }) + + // Verify convertToMentionPath was called with the outside path + expect(mockConvertToMentionPath).toHaveBeenCalledWith(outsidePath, mockCwd) + + // Verify setInputValue was called with the original path + expect(setInputValue).toHaveBeenCalledWith("/Users/other/project/file.js ") + }) + + it("should do nothing when dropped text is empty", () => { + const setInputValue = jest.fn() + + const { container } = render( + , + ) + + // Create a mock dataTransfer object with empty text + const dataTransfer = { + getData: jest.fn().mockReturnValue(""), + files: [], + } + + // Simulate drop event + fireEvent.drop(container.querySelector(".chat-text-area")!, { + dataTransfer, + preventDefault: jest.fn(), + }) + + // Verify convertToMentionPath was not called + expect(mockConvertToMentionPath).not.toHaveBeenCalled() + + // Verify setInputValue was not called + expect(setInputValue).not.toHaveBeenCalled() + }) + }) }) From 5e53d00ebcf0d2adf218a07452d0e15835bf3e64 Mon Sep 17 00:00:00 2001 From: Chris Estreich Date: Wed, 26 Feb 2025 14:23:11 -0800 Subject: [PATCH 079/145] Allow control over maxTokens for thinking models --- src/api/providers/anthropic.ts | 12 ++- src/api/providers/openrouter.ts | 13 ++- src/core/Cline.ts | 18 +++- .../__tests__/sliding-window.test.ts | 102 +++++++++++++++--- src/core/sliding-window/index.ts | 37 ++++--- src/core/webview/ClineProvider.ts | 5 + src/shared/api.ts | 11 +- src/shared/globalState.ts | 1 + .../components/settings/ThinkingBudget.tsx | 65 ++++++++--- 9 files changed, 198 insertions(+), 66 deletions(-) diff --git a/src/api/providers/anthropic.ts b/src/api/providers/anthropic.ts index ad58a1cf6b2..8c5a1795b1f 100644 --- a/src/api/providers/anthropic.ts +++ b/src/api/providers/anthropic.ts @@ -31,7 +31,7 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { let stream: AnthropicStream const cacheControl: CacheControlEphemeral = { type: "ephemeral" } let { id: modelId, info: modelInfo } = this.getModel() - const maxTokens = modelInfo.maxTokens || 8192 + const maxTokens = this.options.modelMaxTokens || modelInfo.maxTokens || 8192 let temperature = this.options.modelTemperature ?? ANTHROPIC_DEFAULT_TEMPERATURE let thinking: BetaThinkingConfigParam | undefined = undefined @@ -41,7 +41,15 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { // `claude-3-7-sonnet-20250219` model with a thinking budget. // We can handle this more elegantly in the future. modelId = "claude-3-7-sonnet-20250219" - const budgetTokens = this.options.anthropicThinking ?? Math.max(maxTokens * 0.8, 1024) + + // Clamp the thinking budget to be at most 80% of max tokens and at + // least 1024 tokens. + const maxBudgetTokens = Math.floor(maxTokens * 0.8) + const budgetTokens = Math.max( + Math.min(this.options.anthropicThinking ?? maxBudgetTokens, maxBudgetTokens), + 1024, + ) + thinking = { type: "enabled", budget_tokens: budgetTokens } temperature = 1.0 } diff --git a/src/api/providers/openrouter.ts b/src/api/providers/openrouter.ts index 0a9488e816f..69bcb0074c1 100644 --- a/src/api/providers/openrouter.ts +++ b/src/api/providers/openrouter.ts @@ -108,12 +108,19 @@ export class OpenRouterHandler implements ApiHandler, SingleCompletionHandler { topP = 0.95 } + const maxTokens = this.options.modelMaxTokens || modelInfo.maxTokens let temperature = this.options.modelTemperature ?? defaultTemperature let thinking: BetaThinkingConfigParam | undefined = undefined if (modelInfo.thinking) { - const maxTokens = modelInfo.maxTokens || 8192 - const budgetTokens = this.options.anthropicThinking ?? Math.max(maxTokens * 0.8, 1024) + // Clamp the thinking budget to be at most 80% of max tokens and at + // least 1024 tokens. + const maxBudgetTokens = Math.floor((maxTokens || 8192) * 0.8) + const budgetTokens = Math.max( + Math.min(this.options.anthropicThinking ?? maxBudgetTokens, maxBudgetTokens), + 1024, + ) + thinking = { type: "enabled", budget_tokens: budgetTokens } temperature = 1.0 } @@ -271,7 +278,7 @@ export async function getOpenRouterModels() { modelInfo.supportsPromptCache = true modelInfo.cacheWritesPrice = 3.75 modelInfo.cacheReadsPrice = 0.3 - modelInfo.maxTokens = 16384 + modelInfo.maxTokens = 64_000 break case rawModel.id.startsWith("anthropic/claude-3.5-sonnet-20240620"): modelInfo.supportsPromptCache = true diff --git a/src/core/Cline.ts b/src/core/Cline.ts index 073bd109117..fb123e0584b 100644 --- a/src/core/Cline.ts +++ b/src/core/Cline.ts @@ -87,6 +87,7 @@ export type ClineOptions = { export class Cline { readonly taskId: string + readonly apiConfiguration: ApiConfiguration api: ApiHandler private terminalManager: TerminalManager private urlContentFetcher: UrlContentFetcher @@ -148,6 +149,7 @@ export class Cline { } this.taskId = crypto.randomUUID() + this.apiConfiguration = apiConfiguration this.api = buildApiHandler(apiConfiguration) this.terminalManager = new TerminalManager() this.urlContentFetcher = new UrlContentFetcher(provider.context) @@ -961,13 +963,21 @@ export class Cline { cacheWrites = 0, cacheReads = 0, }: ClineApiReqInfo = JSON.parse(previousRequest) + const totalTokens = tokensIn + tokensOut + cacheWrites + cacheReads - const trimmedMessages = truncateConversationIfNeeded( - this.apiConversationHistory, + const modelInfo = this.api.getModel().info + const maxTokens = modelInfo.thinking + ? this.apiConfiguration.modelMaxTokens || modelInfo.maxTokens + : modelInfo.maxTokens + const contextWindow = modelInfo.contextWindow + + const trimmedMessages = truncateConversationIfNeeded({ + messages: this.apiConversationHistory, totalTokens, - this.api.getModel().info, - ) + maxTokens, + contextWindow, + }) if (trimmedMessages !== this.apiConversationHistory) { await this.overwriteApiConversationHistory(trimmedMessages) diff --git a/src/core/sliding-window/__tests__/sliding-window.test.ts b/src/core/sliding-window/__tests__/sliding-window.test.ts index 3dcf9e5fd25..cb897aa8cb0 100644 --- a/src/core/sliding-window/__tests__/sliding-window.test.ts +++ b/src/core/sliding-window/__tests__/sliding-window.test.ts @@ -119,11 +119,21 @@ describe("getMaxTokens", () => { // Max tokens = 100000 - 50000 = 50000 // Below max tokens - no truncation - const result1 = truncateConversationIfNeeded(messages, 49999, modelInfo) + const result1 = truncateConversationIfNeeded({ + messages, + totalTokens: 49999, + contextWindow: modelInfo.contextWindow, + maxTokens: modelInfo.maxTokens, + }) expect(result1).toEqual(messages) // Above max tokens - truncate - const result2 = truncateConversationIfNeeded(messages, 50001, modelInfo) + const result2 = truncateConversationIfNeeded({ + messages, + totalTokens: 50001, + contextWindow: modelInfo.contextWindow, + maxTokens: modelInfo.maxTokens, + }) expect(result2).not.toEqual(messages) expect(result2.length).toBe(3) // Truncated with 0.5 fraction }) @@ -133,11 +143,21 @@ describe("getMaxTokens", () => { // Max tokens = 100000 - (100000 * 0.2) = 80000 // Below max tokens - no truncation - const result1 = truncateConversationIfNeeded(messages, 79999, modelInfo) + const result1 = truncateConversationIfNeeded({ + messages, + totalTokens: 79999, + contextWindow: modelInfo.contextWindow, + maxTokens: modelInfo.maxTokens, + }) expect(result1).toEqual(messages) // Above max tokens - truncate - const result2 = truncateConversationIfNeeded(messages, 80001, modelInfo) + const result2 = truncateConversationIfNeeded({ + messages, + totalTokens: 80001, + contextWindow: modelInfo.contextWindow, + maxTokens: modelInfo.maxTokens, + }) expect(result2).not.toEqual(messages) expect(result2.length).toBe(3) // Truncated with 0.5 fraction }) @@ -147,11 +167,21 @@ describe("getMaxTokens", () => { // Max tokens = 50000 - 10000 = 40000 // Below max tokens - no truncation - const result1 = truncateConversationIfNeeded(messages, 39999, modelInfo) + const result1 = truncateConversationIfNeeded({ + messages, + totalTokens: 39999, + contextWindow: modelInfo.contextWindow, + maxTokens: modelInfo.maxTokens, + }) expect(result1).toEqual(messages) // Above max tokens - truncate - const result2 = truncateConversationIfNeeded(messages, 40001, modelInfo) + const result2 = truncateConversationIfNeeded({ + messages, + totalTokens: 40001, + contextWindow: modelInfo.contextWindow, + maxTokens: modelInfo.maxTokens, + }) expect(result2).not.toEqual(messages) expect(result2.length).toBe(3) // Truncated with 0.5 fraction }) @@ -161,11 +191,21 @@ describe("getMaxTokens", () => { // Max tokens = 200000 - 30000 = 170000 // Below max tokens - no truncation - const result1 = truncateConversationIfNeeded(messages, 169999, modelInfo) + const result1 = truncateConversationIfNeeded({ + messages, + totalTokens: 169999, + contextWindow: modelInfo.contextWindow, + maxTokens: modelInfo.maxTokens, + }) expect(result1).toEqual(messages) // Above max tokens - truncate - const result2 = truncateConversationIfNeeded(messages, 170001, modelInfo) + const result2 = truncateConversationIfNeeded({ + messages, + totalTokens: 170001, + contextWindow: modelInfo.contextWindow, + maxTokens: modelInfo.maxTokens, + }) expect(result2).not.toEqual(messages) expect(result2.length).toBe(3) // Truncated with 0.5 fraction }) @@ -194,7 +234,12 @@ describe("truncateConversationIfNeeded", () => { const maxTokens = 100000 - 30000 // 70000 const totalTokens = 69999 // Below threshold - const result = truncateConversationIfNeeded(messages, totalTokens, modelInfo) + const result = truncateConversationIfNeeded({ + messages, + totalTokens, + contextWindow: modelInfo.contextWindow, + maxTokens: modelInfo.maxTokens, + }) expect(result).toEqual(messages) // No truncation occurs }) @@ -207,7 +252,12 @@ describe("truncateConversationIfNeeded", () => { // With 4 messages after the first, 0.5 fraction means remove 2 messages const expectedResult = [messages[0], messages[3], messages[4]] - const result = truncateConversationIfNeeded(messages, totalTokens, modelInfo) + const result = truncateConversationIfNeeded({ + messages, + totalTokens, + contextWindow: modelInfo.contextWindow, + maxTokens: modelInfo.maxTokens, + }) expect(result).toEqual(expectedResult) }) @@ -218,14 +268,38 @@ describe("truncateConversationIfNeeded", () => { // Test below threshold const belowThreshold = 69999 - expect(truncateConversationIfNeeded(messages, belowThreshold, modelInfo1)).toEqual( - truncateConversationIfNeeded(messages, belowThreshold, modelInfo2), + expect( + truncateConversationIfNeeded({ + messages, + totalTokens: belowThreshold, + contextWindow: modelInfo1.contextWindow, + maxTokens: modelInfo1.maxTokens, + }), + ).toEqual( + truncateConversationIfNeeded({ + messages, + totalTokens: belowThreshold, + contextWindow: modelInfo2.contextWindow, + maxTokens: modelInfo2.maxTokens, + }), ) // Test above threshold const aboveThreshold = 70001 - expect(truncateConversationIfNeeded(messages, aboveThreshold, modelInfo1)).toEqual( - truncateConversationIfNeeded(messages, aboveThreshold, modelInfo2), + expect( + truncateConversationIfNeeded({ + messages, + totalTokens: aboveThreshold, + contextWindow: modelInfo1.contextWindow, + maxTokens: modelInfo1.maxTokens, + }), + ).toEqual( + truncateConversationIfNeeded({ + messages, + totalTokens: aboveThreshold, + contextWindow: modelInfo2.contextWindow, + maxTokens: modelInfo2.maxTokens, + }), ) }) }) diff --git a/src/core/sliding-window/index.ts b/src/core/sliding-window/index.ts index a0fff05ea55..8b646f933b9 100644 --- a/src/core/sliding-window/index.ts +++ b/src/core/sliding-window/index.ts @@ -1,7 +1,5 @@ import { Anthropic } from "@anthropic-ai/sdk" -import { ModelInfo } from "../../shared/api" - /** * Truncates a conversation by removing a fraction of the messages. * @@ -26,28 +24,29 @@ export function truncateConversation( } /** - * Conditionally truncates the conversation messages if the total token count exceeds the model's limit. + * Conditionally truncates the conversation messages if the total token count + * exceeds the model's limit. * * @param {Anthropic.Messages.MessageParam[]} messages - The conversation messages. * @param {number} totalTokens - The total number of tokens in the conversation. - * @param {ModelInfo} modelInfo - Model metadata including context window size. + * @param {number} contextWindow - The context window size. + * @param {number} maxTokens - The maximum number of tokens allowed. * @returns {Anthropic.Messages.MessageParam[]} The original or truncated conversation messages. */ -export function truncateConversationIfNeeded( - messages: Anthropic.Messages.MessageParam[], - totalTokens: number, - modelInfo: ModelInfo, -): Anthropic.Messages.MessageParam[] { - return totalTokens < getMaxTokens(modelInfo) ? messages : truncateConversation(messages, 0.5) + +type TruncateOptions = { + messages: Anthropic.Messages.MessageParam[] + totalTokens: number + contextWindow: number + maxTokens?: number } -/** - * Calculates the maximum allowed tokens - * - * @param {ModelInfo} modelInfo - The model information containing the context window size. - * @returns {number} The maximum number of tokens allowed - */ -function getMaxTokens(modelInfo: ModelInfo): number { - // The buffer needs to be at least as large as `modelInfo.maxTokens`, or 20% of the context window if for some reason it's not set. - return modelInfo.contextWindow - (modelInfo.maxTokens || modelInfo.contextWindow * 0.2) +export function truncateConversationIfNeeded({ + messages, + totalTokens, + contextWindow, + maxTokens, +}: TruncateOptions): Anthropic.Messages.MessageParam[] { + const allowedTokens = contextWindow - (maxTokens || contextWindow * 0.2) + return totalTokens < allowedTokens ? messages : truncateConversation(messages, 0.5) } diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index bc6f4578683..5e6170e2eee 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -1671,6 +1671,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { requestyModelId, requestyModelInfo, modelTemperature, + modelMaxTokens, } = apiConfiguration await Promise.all([ this.updateGlobalState("apiProvider", apiProvider), @@ -1719,6 +1720,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { this.updateGlobalState("requestyModelId", requestyModelId), this.updateGlobalState("requestyModelInfo", requestyModelInfo), this.updateGlobalState("modelTemperature", modelTemperature), + this.updateGlobalState("modelMaxTokens", modelMaxTokens), ]) if (this.cline) { this.cline.api = buildApiHandler(apiConfiguration) @@ -2210,6 +2212,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { requestyModelId, requestyModelInfo, modelTemperature, + modelMaxTokens, maxOpenTabsContext, ] = await Promise.all([ this.getGlobalState("apiProvider") as Promise, @@ -2293,6 +2296,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { this.getGlobalState("requestyModelId") as Promise, this.getGlobalState("requestyModelInfo") as Promise, this.getGlobalState("modelTemperature") as Promise, + this.getGlobalState("modelMaxTokens") as Promise, this.getGlobalState("maxOpenTabsContext") as Promise, ]) @@ -2358,6 +2362,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { requestyModelId, requestyModelInfo, modelTemperature, + modelMaxTokens, }, lastShownAnnouncementId, customInstructions, diff --git a/src/shared/api.ts b/src/shared/api.ts index 5d4b8b120d7..e7e4c54db6a 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -68,6 +68,7 @@ export interface ApiHandlerOptions { requestyModelId?: string requestyModelInfo?: ModelInfo modelTemperature?: number + modelMaxTokens?: number } export type ApiConfiguration = ApiHandlerOptions & { @@ -92,19 +93,13 @@ export interface ModelInfo { thinking?: boolean } -export const THINKING_BUDGET = { - step: 1024, - min: 1024, - default: 8 * 1024, -} - // Anthropic // https://docs.anthropic.com/en/docs/about-claude/models export type AnthropicModelId = keyof typeof anthropicModels export const anthropicDefaultModelId: AnthropicModelId = "claude-3-7-sonnet-20250219" export const anthropicModels = { "claude-3-7-sonnet-20250219:thinking": { - maxTokens: 16384, + maxTokens: 64_000, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, @@ -116,7 +111,7 @@ export const anthropicModels = { thinking: true, }, "claude-3-7-sonnet-20250219": { - maxTokens: 16384, + maxTokens: 64_000, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, diff --git a/src/shared/globalState.ts b/src/shared/globalState.ts index 7b6b4f8274b..2cc90456a76 100644 --- a/src/shared/globalState.ts +++ b/src/shared/globalState.ts @@ -81,5 +81,6 @@ export type GlobalStateKey = | "requestyModelInfo" | "unboundModelInfo" | "modelTemperature" + | "modelMaxTokens" | "mistralCodestralUrl" | "maxOpenTabsContext" diff --git a/webview-ui/src/components/settings/ThinkingBudget.tsx b/webview-ui/src/components/settings/ThinkingBudget.tsx index efaa90dc39a..5b678744106 100644 --- a/webview-ui/src/components/settings/ThinkingBudget.tsx +++ b/webview-ui/src/components/settings/ThinkingBudget.tsx @@ -1,6 +1,8 @@ +import { useEffect } from "react" + import { Slider } from "@/components/ui" -import { ApiConfiguration, ModelInfo, THINKING_BUDGET } from "../../../../src/shared/api" +import { ApiConfiguration, ModelInfo } from "../../../../src/shared/api" interface ThinkingBudgetProps { apiConfiguration: ApiConfiguration @@ -9,21 +11,52 @@ interface ThinkingBudgetProps { } export const ThinkingBudget = ({ apiConfiguration, setApiConfigurationField, modelInfo }: ThinkingBudgetProps) => { - const budget = apiConfiguration?.anthropicThinking ?? THINKING_BUDGET.default - - return modelInfo && modelInfo.thinking ? ( -
-
Thinking Budget
-
- setApiConfigurationField("anthropicThinking", value[0])} - /> -
{budget}
+ const tokens = apiConfiguration?.modelMaxTokens || modelInfo?.maxTokens || 64_000 + const tokensMin = 8192 + const tokensMax = modelInfo?.maxTokens || 64_000 + + const thinkingTokens = apiConfiguration?.anthropicThinking || 8192 + const thinkingTokensMin = 1024 + const thinkingTokensMax = Math.floor(0.8 * tokens) + + useEffect(() => { + if (thinkingTokens > thinkingTokensMax) { + setApiConfigurationField("anthropicThinking", thinkingTokensMax) + } + }, [thinkingTokens, thinkingTokensMax, setApiConfigurationField]) + + if (!modelInfo || !modelInfo.thinking) { + return null + } + + return ( +
+
+
Max Tokens
+
+ setApiConfigurationField("modelMaxTokens", value)} + /> +
{tokens}
+
+
+
+
Max Thinking Tokens
+
+ setApiConfigurationField("anthropicThinking", value)} + /> +
{thinkingTokens}
+
- ) : null + ) } From cf69b0fff92e8b1cff23cf4b37d8e8a2f10a34dc Mon Sep 17 00:00:00 2001 From: Chris Estreich Date: Wed, 26 Feb 2025 14:26:27 -0800 Subject: [PATCH 080/145] Add changeset --- .changeset/wild-emus-dream.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/wild-emus-dream.md diff --git a/.changeset/wild-emus-dream.md b/.changeset/wild-emus-dream.md new file mode 100644 index 00000000000..19e5a4626b0 --- /dev/null +++ b/.changeset/wild-emus-dream.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Allow control over maxTokens for thinking models From dfa019e7f443bf9997ed2ac2556f597a59e4bf77 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Wed, 26 Feb 2025 17:34:39 -0500 Subject: [PATCH 081/145] Truncate results from search_files to 500 chars max --- .changeset/stale-cooks-help.md | 5 ++ src/services/ripgrep/__tests__/index.test.ts | 51 ++++++++++++++++++++ src/services/ripgrep/index.ts | 30 ++++++++++-- 3 files changed, 82 insertions(+), 4 deletions(-) create mode 100644 .changeset/stale-cooks-help.md create mode 100644 src/services/ripgrep/__tests__/index.test.ts diff --git a/.changeset/stale-cooks-help.md b/.changeset/stale-cooks-help.md new file mode 100644 index 00000000000..8c9c7147381 --- /dev/null +++ b/.changeset/stale-cooks-help.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Truncate search_file output to avoid crashing the extension diff --git a/src/services/ripgrep/__tests__/index.test.ts b/src/services/ripgrep/__tests__/index.test.ts new file mode 100644 index 00000000000..7c3549a827b --- /dev/null +++ b/src/services/ripgrep/__tests__/index.test.ts @@ -0,0 +1,51 @@ +// npx jest src/services/ripgrep/__tests__/index.test.ts + +import { describe, expect, it } from "@jest/globals" +import { truncateLine } from "../index" + +describe("Ripgrep line truncation", () => { + // The default MAX_LINE_LENGTH is 500 in the implementation + const MAX_LINE_LENGTH = 500 + + it("should truncate lines longer than MAX_LINE_LENGTH", () => { + const longLine = "a".repeat(600) // Line longer than MAX_LINE_LENGTH + const truncated = truncateLine(longLine) + + expect(truncated).toContain("[truncated...]") + expect(truncated.length).toBeLessThan(longLine.length) + expect(truncated.length).toEqual(MAX_LINE_LENGTH + " [truncated...]".length) + }) + + it("should not truncate lines shorter than MAX_LINE_LENGTH", () => { + const shortLine = "Short line of text" + const truncated = truncateLine(shortLine) + + expect(truncated).toEqual(shortLine) + expect(truncated).not.toContain("[truncated...]") + }) + + it("should correctly truncate a line at exactly MAX_LINE_LENGTH characters", () => { + const exactLine = "a".repeat(MAX_LINE_LENGTH) + const exactPlusOne = exactLine + "x" + + // Should not truncate when exactly MAX_LINE_LENGTH + expect(truncateLine(exactLine)).toEqual(exactLine) + + // Should truncate when exceeding MAX_LINE_LENGTH by even 1 character + expect(truncateLine(exactPlusOne)).toContain("[truncated...]") + }) + + it("should handle empty lines without errors", () => { + expect(truncateLine("")).toEqual("") + }) + + it("should allow custom maximum length", () => { + const customLength = 100 + const line = "a".repeat(customLength + 50) + + const truncated = truncateLine(line, customLength) + + expect(truncated.length).toEqual(customLength + " [truncated...]".length) + expect(truncated).toContain("[truncated...]") + }) +}) diff --git a/src/services/ripgrep/index.ts b/src/services/ripgrep/index.ts index b48c60b5b2e..770c897e529 100644 --- a/src/services/ripgrep/index.ts +++ b/src/services/ripgrep/index.ts @@ -58,7 +58,19 @@ interface SearchResult { afterContext: string[] } +// Constants const MAX_RESULTS = 300 +const MAX_LINE_LENGTH = 500 + +/** + * Truncates a line if it exceeds the maximum length + * @param line The line to truncate + * @param maxLength The maximum allowed length (defaults to MAX_LINE_LENGTH) + * @returns The truncated line, or the original line if it's shorter than maxLength + */ +export function truncateLine(line: string, maxLength: number = MAX_LINE_LENGTH): string { + return line.length > maxLength ? line.substring(0, maxLength) + " [truncated...]" : line +} async function getBinPath(vscodeAppRoot: string): Promise { const checkPath = async (pkgFolder: string) => { @@ -140,7 +152,8 @@ export async function regexSearchFiles( let output: string try { output = await execRipgrep(rgPath, args) - } catch { + } catch (error) { + console.error("Error executing ripgrep:", error) return "No results found" } const results: SearchResult[] = [] @@ -154,19 +167,28 @@ export async function regexSearchFiles( if (currentResult) { results.push(currentResult as SearchResult) } + + // Safety check: truncate extremely long lines to prevent excessive output + const matchText = parsed.data.lines.text + const truncatedMatch = truncateLine(matchText) + currentResult = { file: parsed.data.path.text, line: parsed.data.line_number, column: parsed.data.submatches[0].start, - match: parsed.data.lines.text, + match: truncatedMatch, beforeContext: [], afterContext: [], } } else if (parsed.type === "context" && currentResult) { + // Apply the same truncation logic to context lines + const contextText = parsed.data.lines.text + const truncatedContext = truncateLine(contextText) + if (parsed.data.line_number < currentResult.line!) { - currentResult.beforeContext!.push(parsed.data.lines.text) + currentResult.beforeContext!.push(truncatedContext) } else { - currentResult.afterContext!.push(parsed.data.lines.text) + currentResult.afterContext!.push(truncatedContext) } } } catch (error) { From 247a50a6dcbb29512d20796b29a3240dc4b84463 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 26 Feb 2025 23:17:23 +0000 Subject: [PATCH 082/145] changeset version bump --- .changeset/fluffy-apples-attack.md | 5 ----- .changeset/orange-zoos-train.md | 5 ----- .changeset/stale-cooks-help.md | 5 ----- .changeset/tender-cycles-help.md | 5 ----- .changeset/wild-emus-dream.md | 5 ----- CHANGELOG.md | 10 ++++++++++ package-lock.json | 4 ++-- package.json | 2 +- 8 files changed, 13 insertions(+), 28 deletions(-) delete mode 100644 .changeset/fluffy-apples-attack.md delete mode 100644 .changeset/orange-zoos-train.md delete mode 100644 .changeset/stale-cooks-help.md delete mode 100644 .changeset/tender-cycles-help.md delete mode 100644 .changeset/wild-emus-dream.md diff --git a/.changeset/fluffy-apples-attack.md b/.changeset/fluffy-apples-attack.md deleted file mode 100644 index 924a1b25057..00000000000 --- a/.changeset/fluffy-apples-attack.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -Handle really long text in the ChatRow similar to TaskHeader diff --git a/.changeset/orange-zoos-train.md b/.changeset/orange-zoos-train.md deleted file mode 100644 index 76c16f45671..00000000000 --- a/.changeset/orange-zoos-train.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -Support multiple files in drag-and-drop diff --git a/.changeset/stale-cooks-help.md b/.changeset/stale-cooks-help.md deleted file mode 100644 index 8c9c7147381..00000000000 --- a/.changeset/stale-cooks-help.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -Truncate search_file output to avoid crashing the extension diff --git a/.changeset/tender-cycles-help.md b/.changeset/tender-cycles-help.md deleted file mode 100644 index d43e423ee61..00000000000 --- a/.changeset/tender-cycles-help.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -Better OpenRouter error handling diff --git a/.changeset/wild-emus-dream.md b/.changeset/wild-emus-dream.md deleted file mode 100644 index 19e5a4626b0..00000000000 --- a/.changeset/wild-emus-dream.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -Allow control over maxTokens for thinking models diff --git a/CHANGELOG.md b/CHANGELOG.md index 02a4a30cbd2..0e5223231ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Roo Code Changelog +## 3.7.6 + +### Patch Changes + +- Handle really long text in the ChatRow similar to TaskHeader +- Support multiple files in drag-and-drop +- Truncate search_file output to avoid crashing the extension +- Better OpenRouter error handling +- Allow control over maxTokens for thinking models + ## [3.7.5] - Fix context window truncation math (see [#1173](https://github.com/RooVetGit/Roo-Code/issues/1173)) diff --git a/package-lock.json b/package-lock.json index a6c75bd69b3..808e2f2f107 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "roo-cline", - "version": "3.7.5", + "version": "3.7.6", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "roo-cline", - "version": "3.7.5", + "version": "3.7.6", "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/sdk": "^0.37.0", diff --git a/package.json b/package.json index 40bb6a545dd..463e9d597a2 100644 --- a/package.json +++ b/package.json @@ -3,7 +3,7 @@ "displayName": "Roo Code (prev. Roo Cline)", "description": "A whole dev team of AI agents in your editor.", "publisher": "RooVeterinaryInc", - "version": "3.7.5", + "version": "3.7.6", "icon": "assets/icons/rocket.png", "galleryBanner": { "color": "#617A91", From 4f578dc8262e03a2a665abcd6784610cc092cdb2 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Wed, 26 Feb 2025 18:47:22 -0500 Subject: [PATCH 083/145] Update CHANGELOG.md --- CHANGELOG.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e5223231ae..13b06953354 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,14 +1,12 @@ # Roo Code Changelog -## 3.7.6 +## [3.7.6] -### Patch Changes - -- Handle really long text in the ChatRow similar to TaskHeader +- Handle really long text better in the in the ChatRow similar to TaskHeader (thanks @joemanley201!) - Support multiple files in drag-and-drop - Truncate search_file output to avoid crashing the extension -- Better OpenRouter error handling -- Allow control over maxTokens for thinking models +- Better OpenRouter error handling (no more "Provider Error") +- Add slider to control max output tokens for thinking models ## [3.7.5] From 5c5bf8502094fb87397eebadda89acd3512dcf84 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Wed, 26 Feb 2025 21:36:19 -0500 Subject: [PATCH 084/145] Stop removing commas from terminal output --- .changeset/sour-parents-hug.md | 5 +++++ src/integrations/terminal/TerminalProcess.ts | 3 --- 2 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 .changeset/sour-parents-hug.md diff --git a/.changeset/sour-parents-hug.md b/.changeset/sour-parents-hug.md new file mode 100644 index 00000000000..a24286b6bbe --- /dev/null +++ b/.changeset/sour-parents-hug.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Stop removing commas from terminal output diff --git a/src/integrations/terminal/TerminalProcess.ts b/src/integrations/terminal/TerminalProcess.ts index 5597350db3c..4e85c10575d 100644 --- a/src/integrations/terminal/TerminalProcess.ts +++ b/src/integrations/terminal/TerminalProcess.ts @@ -110,9 +110,6 @@ export class TerminalProcess extends EventEmitter { data = lines.join("\n") } - // FIXME: right now it seems that data chunks returned to us from the shell integration stream contains random commas, which from what I can tell is not the expected behavior. There has to be a better solution here than just removing all commas. - data = data.replace(/,/g, "") - // 2. Set isHot depending on the command // Set to hot to stall API requests until terminal is cool again this.isHot = true From 4806ab5420048af6526348e5b128dd4724c9fcc8 Mon Sep 17 00:00:00 2001 From: dleffel Date: Wed, 26 Feb 2025 21:34:56 -0800 Subject: [PATCH 085/145] Fix missing tooltips in several components. --- .../src/components/chat/Announcement.tsx | 1 + .../src/components/chat/ChatTextArea.tsx | 5 +++ webview-ui/src/components/chat/ChatView.tsx | 33 ++++++++++++++++++- webview-ui/src/components/chat/TaskHeader.tsx | 13 ++++++-- 4 files changed, 49 insertions(+), 3 deletions(-) diff --git a/webview-ui/src/components/chat/Announcement.tsx b/webview-ui/src/components/chat/Announcement.tsx index a2e96606efc..93d0c9d7500 100644 --- a/webview-ui/src/components/chat/Announcement.tsx +++ b/webview-ui/src/components/chat/Announcement.tsx @@ -25,6 +25,7 @@ const Announcement = ({ version, hideAnnouncement }: AnnouncementProps) => { diff --git a/webview-ui/src/components/chat/ChatTextArea.tsx b/webview-ui/src/components/chat/ChatTextArea.tsx index be2b2a97984..dcbe0851477 100644 --- a/webview-ui/src/components/chat/ChatTextArea.tsx +++ b/webview-ui/src/components/chat/ChatTextArea.tsx @@ -798,6 +798,7 @@ const ChatTextArea = forwardRef( { const value = e.target.value if (value === "settings-action") { @@ -915,6 +917,7 @@ const ChatTextArea = forwardRef( role="button" aria-label="enhance prompt" data-testid="enhance-prompt-button" + title="Enhance prompt with additional context" className={`input-icon-button ${ textAreaDisabled ? "disabled" : "" } codicon codicon-sparkle`} @@ -927,11 +930,13 @@ const ChatTextArea = forwardRef( className={`input-icon-button ${ shouldDisableImages ? "disabled" : "" } codicon codicon-device-camera`} + title="Add images to message" onClick={() => !shouldDisableImages && onSelectImages()} style={{ fontSize: 16.5 }} /> !textAreaDisabled && onSend()} style={{ fontSize: 15 }} /> diff --git a/webview-ui/src/components/chat/ChatView.tsx b/webview-ui/src/components/chat/ChatView.tsx index 98369cf095c..fcd1ba9a3b4 100644 --- a/webview-ui/src/components/chat/ChatView.tsx +++ b/webview-ui/src/components/chat/ChatView.tsx @@ -1077,7 +1077,8 @@ const ChatView = ({ isHidden, showAnnouncement, hideAnnouncement, showHistoryVie onClick={() => { scrollToBottomSmooth() disableAutoScrollRef.current = false - }}> + }} + title="Scroll to bottom of chat">
@@ -1101,6 +1102,25 @@ const ChatView = ({ isHidden, showAnnouncement, hideAnnouncement, showHistoryVie flex: secondaryButtonText ? 1 : 2, marginRight: secondaryButtonText ? "6px" : "0", }} + title={ + primaryButtonText === "Retry" + ? "Try the operation again" + : primaryButtonText === "Save" + ? "Save the file changes" + : primaryButtonText === "Approve" + ? "Approve this action" + : primaryButtonText === "Run Command" + ? "Execute this command" + : primaryButtonText === "Start New Task" + ? "Begin a new task" + : primaryButtonText === "Resume Task" + ? "Continue the current task" + : primaryButtonText === "Proceed Anyways" + ? "Continue despite warnings" + : primaryButtonText === "Proceed While Running" + ? "Continue while command executes" + : undefined + } onClick={(e) => handlePrimaryButtonClick(inputValue, selectedImages)}> {primaryButtonText} @@ -1113,6 +1133,17 @@ const ChatView = ({ isHidden, showAnnouncement, hideAnnouncement, showHistoryVie flex: isStreaming ? 2 : 1, marginLeft: isStreaming ? 0 : "6px", }} + title={ + isStreaming + ? "Cancel the current operation" + : secondaryButtonText === "Start New Task" + ? "Begin a new task" + : secondaryButtonText === "Reject" + ? "Reject this action" + : secondaryButtonText === "Terminate" + ? "End the current task" + : undefined + } onClick={(e) => handleSecondaryButtonClick(inputValue, selectedImages)}> {isStreaming ? "Cancel" : secondaryButtonText} diff --git a/webview-ui/src/components/chat/TaskHeader.tsx b/webview-ui/src/components/chat/TaskHeader.tsx index 341855f796e..fb7db6f6173 100644 --- a/webview-ui/src/components/chat/TaskHeader.tsx +++ b/webview-ui/src/components/chat/TaskHeader.tsx @@ -180,7 +180,11 @@ const TaskHeader: React.FC = ({ ${totalCost?.toFixed(4)}
)} - +
@@ -348,13 +352,18 @@ export const highlightMentions = (text?: string, withShadow = true) => { const TaskActions = ({ item }: { item: HistoryItem | undefined }) => (
- {!!item?.size && item.size > 0 && (
) diff --git a/webview-ui/src/components/settings/SettingsView.tsx b/webview-ui/src/components/settings/SettingsView.tsx index d3e65a99ea8..51ef4fe81d6 100644 --- a/webview-ui/src/components/settings/SettingsView.tsx +++ b/webview-ui/src/components/settings/SettingsView.tsx @@ -52,7 +52,7 @@ const SettingsView = forwardRef(({ onDone }, alwaysAllowWrite, alwaysApproveResubmit, browserViewportSize, - checkpointsEnabled, + enableCheckpoints, diffEnabled, experiments, fuzzyMatchThreshold, @@ -143,7 +143,7 @@ const SettingsView = forwardRef(({ onDone }, vscode.postMessage({ type: "soundEnabled", bool: soundEnabled }) vscode.postMessage({ type: "soundVolume", value: soundVolume }) vscode.postMessage({ type: "diffEnabled", bool: diffEnabled }) - vscode.postMessage({ type: "checkpointsEnabled", bool: checkpointsEnabled }) + vscode.postMessage({ type: "enableCheckpoints", bool: enableCheckpoints }) vscode.postMessage({ type: "browserViewportSize", text: browserViewportSize }) vscode.postMessage({ type: "fuzzyMatchThreshold", value: fuzzyMatchThreshold ?? 1.0 }) vscode.postMessage({ type: "writeDelayMs", value: writeDelayMs }) @@ -706,6 +706,25 @@ const SettingsView = forwardRef(({ onDone },

+
+ { + setCachedStateField("enableCheckpoints", e.target.checked) + }}> + Enable automatic checkpoints + +

+ When enabled, Roo will automatically create checkpoints during task execution, making it + easy to review changes or revert to earlier states. +

+
+
(({ onDone },
)} -
-
- ⚠️ - { - setCachedStateField("checkpointsEnabled", e.target.checked) - }}> - Enable experimental checkpoints - -
-

- When enabled, Roo will save a checkpoint whenever a file in the workspace is modified, - added or deleted, letting you easily revert to a previous state. -

-
- {Object.entries(experimentConfigsMap) .filter((config) => config[0] !== "DIFF_STRATEGY") .map((config) => ( diff --git a/webview-ui/src/context/ExtensionStateContext.tsx b/webview-ui/src/context/ExtensionStateContext.tsx index ae5c5b95398..3dfc87de750 100644 --- a/webview-ui/src/context/ExtensionStateContext.tsx +++ b/webview-ui/src/context/ExtensionStateContext.tsx @@ -32,7 +32,7 @@ export interface ExtensionStateContextType extends ExtensionState { setSoundEnabled: (value: boolean) => void setSoundVolume: (value: number) => void setDiffEnabled: (value: boolean) => void - setCheckpointsEnabled: (value: boolean) => void + setEnableCheckpoints: (value: boolean) => void setBrowserViewportSize: (value: string) => void setFuzzyMatchThreshold: (value: number) => void preferredLanguage: string @@ -79,7 +79,7 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode soundEnabled: false, soundVolume: 0.5, diffEnabled: false, - checkpointsEnabled: false, + enableCheckpoints: true, fuzzyMatchThreshold: 1.0, preferredLanguage: "English", writeDelayMs: 1000, @@ -219,7 +219,7 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode setSoundEnabled: (value) => setState((prevState) => ({ ...prevState, soundEnabled: value })), setSoundVolume: (value) => setState((prevState) => ({ ...prevState, soundVolume: value })), setDiffEnabled: (value) => setState((prevState) => ({ ...prevState, diffEnabled: value })), - setCheckpointsEnabled: (value) => setState((prevState) => ({ ...prevState, checkpointsEnabled: value })), + setEnableCheckpoints: (value) => setState((prevState) => ({ ...prevState, enableCheckpoints: value })), setBrowserViewportSize: (value: string) => setState((prevState) => ({ ...prevState, browserViewportSize: value })), setFuzzyMatchThreshold: (value) => setState((prevState) => ({ ...prevState, fuzzyMatchThreshold: value })), From ea38d9ebbac80f4170f74b4c7d978e588b132d2d Mon Sep 17 00:00:00 2001 From: Aitor Oses Date: Thu, 27 Feb 2025 09:04:54 +0100 Subject: [PATCH 087/145] Enable prompt caching for Claude Sonnet 3.7 Vertex AI model --- src/shared/api.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/shared/api.ts b/src/shared/api.ts index 5cda3330318..cd6aead1a59 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -441,7 +441,7 @@ export const vertexModels = { contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, - supportsPromptCache: false, + supportsPromptCache: true, inputPrice: 3.0, outputPrice: 15.0, }, From 0b583ed15ee80acdd853c6e994c1694f3d4f0cca Mon Sep 17 00:00:00 2001 From: cte Date: Thu, 27 Feb 2025 02:34:14 -0800 Subject: [PATCH 088/145] Fix AnthropicHandler#completePrompt --- src/api/providers/anthropic.ts | 94 +++++++++++++++++----------------- 1 file changed, 47 insertions(+), 47 deletions(-) diff --git a/src/api/providers/anthropic.ts b/src/api/providers/anthropic.ts index 8c5a1795b1f..eca81eab2e2 100644 --- a/src/api/providers/anthropic.ts +++ b/src/api/providers/anthropic.ts @@ -30,29 +30,7 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { let stream: AnthropicStream const cacheControl: CacheControlEphemeral = { type: "ephemeral" } - let { id: modelId, info: modelInfo } = this.getModel() - const maxTokens = this.options.modelMaxTokens || modelInfo.maxTokens || 8192 - let temperature = this.options.modelTemperature ?? ANTHROPIC_DEFAULT_TEMPERATURE - let thinking: BetaThinkingConfigParam | undefined = undefined - - // Anthropic "Thinking" models require a temperature of 1.0. - if (modelId === "claude-3-7-sonnet-20250219:thinking") { - // The `:thinking` variant is a virtual identifier for the - // `claude-3-7-sonnet-20250219` model with a thinking budget. - // We can handle this more elegantly in the future. - modelId = "claude-3-7-sonnet-20250219" - - // Clamp the thinking budget to be at most 80% of max tokens and at - // least 1024 tokens. - const maxBudgetTokens = Math.floor(maxTokens * 0.8) - const budgetTokens = Math.max( - Math.min(this.options.anthropicThinking ?? maxBudgetTokens, maxBudgetTokens), - 1024, - ) - - thinking = { type: "enabled", budget_tokens: budgetTokens } - temperature = 1.0 - } + let { id: modelId, temperature, maxTokens, thinking } = this.getModel() switch (modelId) { case "claude-3-7-sonnet-20250219": @@ -202,40 +180,62 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { } } - getModel(): { id: AnthropicModelId; info: ModelInfo } { + getModel() { const modelId = this.options.apiModelId + let temperature = this.options.modelTemperature ?? ANTHROPIC_DEFAULT_TEMPERATURE + let thinking: BetaThinkingConfigParam | undefined = undefined if (modelId && modelId in anthropicModels) { - const id = modelId as AnthropicModelId - return { id, info: anthropicModels[id] } - } + let id = modelId as AnthropicModelId + const info: ModelInfo = anthropicModels[id] - return { id: anthropicDefaultModelId, info: anthropicModels[anthropicDefaultModelId] } - } + // The `:thinking` variant is a virtual identifier for the + // `claude-3-7-sonnet-20250219` model with a thinking budget. + // We can handle this more elegantly in the future. + if (id === "claude-3-7-sonnet-20250219:thinking") { + id = "claude-3-7-sonnet-20250219" + } - async completePrompt(prompt: string): Promise { - try { - const response = await this.client.messages.create({ - model: this.getModel().id, - max_tokens: this.getModel().info.maxTokens || 8192, - temperature: this.options.modelTemperature ?? ANTHROPIC_DEFAULT_TEMPERATURE, - messages: [{ role: "user", content: prompt }], - stream: false, - }) + const maxTokens = this.options.modelMaxTokens || info.maxTokens || 8192 - const content = response.content[0] + if (info.thinking) { + // Anthropic "Thinking" models require a temperature of 1.0. + temperature = 1.0 - if (content.type === "text") { - return content.text - } + // Clamp the thinking budget to be at most 80% of max tokens and at + // least 1024 tokens. + const maxBudgetTokens = Math.floor(maxTokens * 0.8) + const budgetTokens = Math.max( + Math.min(this.options.anthropicThinking ?? maxBudgetTokens, maxBudgetTokens), + 1024, + ) - return "" - } catch (error) { - if (error instanceof Error) { - throw new Error(`Anthropic completion error: ${error.message}`) + thinking = { type: "enabled", budget_tokens: budgetTokens } } - throw error + return { id, info, temperature, maxTokens, thinking } } + + const id = anthropicDefaultModelId + const info: ModelInfo = anthropicModels[id] + const maxTokens = this.options.modelMaxTokens || info.maxTokens || 8192 + + return { id, info, temperature, maxTokens, thinking } + } + + async completePrompt(prompt: string) { + let { id: modelId, temperature, maxTokens, thinking } = this.getModel() + + const message = await this.client.messages.create({ + model: modelId, + max_tokens: maxTokens, + temperature, + thinking, + messages: [{ role: "user", content: prompt }], + stream: false, + }) + + const content = message.content.find(({ type }) => type === "text") + return content?.type === "text" ? content.text : "" } } From d66b5d2db62f0a6cb8650b8f465d14cf77bbcd36 Mon Sep 17 00:00:00 2001 From: cte Date: Thu, 27 Feb 2025 02:40:39 -0800 Subject: [PATCH 089/145] Fix tests --- src/api/providers/__tests__/anthropic.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/providers/__tests__/anthropic.test.ts b/src/api/providers/__tests__/anthropic.test.ts index ff7bdb40549..82e098f65fb 100644 --- a/src/api/providers/__tests__/anthropic.test.ts +++ b/src/api/providers/__tests__/anthropic.test.ts @@ -153,7 +153,7 @@ describe("AnthropicHandler", () => { }) it("should handle API errors", async () => { - mockCreate.mockRejectedValueOnce(new Error("API Error")) + mockCreate.mockRejectedValueOnce(new Error("Anthropic completion error: API Error")) await expect(handler.completePrompt("Test prompt")).rejects.toThrow("Anthropic completion error: API Error") }) From 210afc681e799ade14fa2886e07cce0cb6aa2496 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Thu, 27 Feb 2025 09:44:50 -0500 Subject: [PATCH 090/145] v3.7.7 --- .changeset/gorgeous-feet-dress.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/gorgeous-feet-dress.md diff --git a/.changeset/gorgeous-feet-dress.md b/.changeset/gorgeous-feet-dress.md new file mode 100644 index 00000000000..fe2183052d3 --- /dev/null +++ b/.changeset/gorgeous-feet-dress.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +v3.7.7 From dc83617b4d2da06b830e848476a1c5d9179a361a Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Thu, 27 Feb 2025 09:51:05 -0500 Subject: [PATCH 091/145] Revert "Stop removing commas from terminal output" --- .changeset/sour-parents-hug.md | 5 ----- src/integrations/terminal/TerminalProcess.ts | 3 +++ 2 files changed, 3 insertions(+), 5 deletions(-) delete mode 100644 .changeset/sour-parents-hug.md diff --git a/.changeset/sour-parents-hug.md b/.changeset/sour-parents-hug.md deleted file mode 100644 index a24286b6bbe..00000000000 --- a/.changeset/sour-parents-hug.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -Stop removing commas from terminal output diff --git a/src/integrations/terminal/TerminalProcess.ts b/src/integrations/terminal/TerminalProcess.ts index 4e85c10575d..5597350db3c 100644 --- a/src/integrations/terminal/TerminalProcess.ts +++ b/src/integrations/terminal/TerminalProcess.ts @@ -110,6 +110,9 @@ export class TerminalProcess extends EventEmitter { data = lines.join("\n") } + // FIXME: right now it seems that data chunks returned to us from the shell integration stream contains random commas, which from what I can tell is not the expected behavior. There has to be a better solution here than just removing all commas. + data = data.replace(/,/g, "") + // 2. Set isHot depending on the command // Set to hot to stall API requests until terminal is cool again this.isHot = true From 8612ab574be39f863329c1b93b32d257bd67450f Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 27 Feb 2025 15:56:03 +0000 Subject: [PATCH 092/145] changeset version bump --- .changeset/eighty-cheetahs-fetch.md | 5 ----- .changeset/gorgeous-feet-dress.md | 5 ----- CHANGELOG.md | 7 +++++++ package-lock.json | 4 ++-- package.json | 2 +- 5 files changed, 10 insertions(+), 13 deletions(-) delete mode 100644 .changeset/eighty-cheetahs-fetch.md delete mode 100644 .changeset/gorgeous-feet-dress.md diff --git a/.changeset/eighty-cheetahs-fetch.md b/.changeset/eighty-cheetahs-fetch.md deleted file mode 100644 index ca103880c84..00000000000 --- a/.changeset/eighty-cheetahs-fetch.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -Graduate checkpoints out of beta diff --git a/.changeset/gorgeous-feet-dress.md b/.changeset/gorgeous-feet-dress.md deleted file mode 100644 index fe2183052d3..00000000000 --- a/.changeset/gorgeous-feet-dress.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -v3.7.7 diff --git a/CHANGELOG.md b/CHANGELOG.md index 13b06953354..ff5d8d6aaf2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ # Roo Code Changelog +## 3.7.7 + +### Patch Changes + +- Graduate checkpoints out of beta +- v3.7.7 + ## [3.7.6] - Handle really long text better in the in the ChatRow similar to TaskHeader (thanks @joemanley201!) diff --git a/package-lock.json b/package-lock.json index 808e2f2f107..c1f748983fc 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "roo-cline", - "version": "3.7.6", + "version": "3.7.7", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "roo-cline", - "version": "3.7.6", + "version": "3.7.7", "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/sdk": "^0.37.0", diff --git a/package.json b/package.json index 463e9d597a2..8441488bac3 100644 --- a/package.json +++ b/package.json @@ -3,7 +3,7 @@ "displayName": "Roo Code (prev. Roo Cline)", "description": "A whole dev team of AI agents in your editor.", "publisher": "RooVeterinaryInc", - "version": "3.7.6", + "version": "3.7.7", "icon": "assets/icons/rocket.png", "galleryBanner": { "color": "#617A91", From 4786815fe8bccf47af9202ff8cb99c6f1ef8ad16 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Thu, 27 Feb 2025 11:02:13 -0500 Subject: [PATCH 093/145] Update CHANGELOG.md --- CHANGELOG.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ff5d8d6aaf2..d0cf8f79c3a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,10 @@ # Roo Code Changelog -## 3.7.7 - -### Patch Changes +## [3.7.7] - Graduate checkpoints out of beta -- v3.7.7 +- Fix enhance prompt button when using Thinking Sonnet +- Add tooltips to make what buttons do more obvious ## [3.7.6] From eec1769b6b5883d3179e2d1a370ed01b83078286 Mon Sep 17 00:00:00 2001 From: Catalin Lupuleti Date: Thu, 27 Feb 2025 18:56:28 +0000 Subject: [PATCH 094/145] Added cache costs for Claude Sonnet 3.7 via Vertex AI --- src/shared/api.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/shared/api.ts b/src/shared/api.ts index e7e4c54db6a..d2b4ed728fb 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -444,6 +444,8 @@ export const vertexModels = { supportsPromptCache: false, inputPrice: 3.0, outputPrice: 15.0, + cacheWritesPrice: 3.75, + cacheReadsPrice: 0.3, }, "claude-3-5-sonnet-v2@20241022": { maxTokens: 8192, From 1f0211ee6418752201b7d9b34ffb12608ba4a54d Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Sun, 23 Feb 2025 20:52:10 -0600 Subject: [PATCH 095/145] Allow users to set custom system prompts --- src/__mocks__/fs/promises.ts | 1 - src/__mocks__/jest.setup.ts | 30 +++ .../__tests__/custom-system-prompt.test.ts | 172 ++++++++++++++++++ .../prompts/sections/custom-system-prompt.ts | 60 ++++++ src/core/prompts/system.ts | 15 ++ .../src/components/prompts/PromptsView.tsx | 40 ++++ 6 files changed, 317 insertions(+), 1 deletion(-) create mode 100644 src/core/prompts/__tests__/custom-system-prompt.test.ts create mode 100644 src/core/prompts/sections/custom-system-prompt.ts diff --git a/src/__mocks__/fs/promises.ts b/src/__mocks__/fs/promises.ts index d5f076247a6..e496a7fa510 100644 --- a/src/__mocks__/fs/promises.ts +++ b/src/__mocks__/fs/promises.ts @@ -140,7 +140,6 @@ const mockFs = { currentPath += "/" + parts[parts.length - 1] mockDirectories.add(currentPath) return Promise.resolve() - return Promise.resolve() }), access: jest.fn().mockImplementation(async (path: string) => { diff --git a/src/__mocks__/jest.setup.ts b/src/__mocks__/jest.setup.ts index 6bd00e95673..836279bfe45 100644 --- a/src/__mocks__/jest.setup.ts +++ b/src/__mocks__/jest.setup.ts @@ -15,3 +15,33 @@ jest.mock("../utils/logging", () => ({ }), }, })) + +// Add toPosix method to String prototype for all tests, mimicking src/utils/path.ts +// This is needed because the production code expects strings to have this method +// Note: In production, this is added via import in the entry point (extension.ts) +export {} + +declare global { + interface String { + toPosix(): string + } +} + +// Implementation that matches src/utils/path.ts +function toPosixPath(p: string) { + // Extended-Length Paths in Windows start with "\\?\" to allow longer paths + // and bypass usual parsing. If detected, we return the path unmodified. + const isExtendedLengthPath = p.startsWith("\\\\?\\") + + if (isExtendedLengthPath) { + return p + } + + return p.replace(/\\/g, "/") +} + +if (!String.prototype.toPosix) { + String.prototype.toPosix = function (this: string): string { + return toPosixPath(this) + } +} diff --git a/src/core/prompts/__tests__/custom-system-prompt.test.ts b/src/core/prompts/__tests__/custom-system-prompt.test.ts new file mode 100644 index 00000000000..7594c13e6d9 --- /dev/null +++ b/src/core/prompts/__tests__/custom-system-prompt.test.ts @@ -0,0 +1,172 @@ +import { SYSTEM_PROMPT } from "../system" +import { defaultModeSlug, modes } from "../../../shared/modes" +import * as vscode from "vscode" +import * as fs from "fs/promises" + +// Mock the fs/promises module +jest.mock("fs/promises", () => ({ + readFile: jest.fn(), + mkdir: jest.fn().mockResolvedValue(undefined), + access: jest.fn().mockResolvedValue(undefined), +})) + +// Get the mocked fs module +const mockedFs = fs as jest.Mocked + +// Mock the fileExistsAtPath function +jest.mock("../../../utils/fs", () => ({ + fileExistsAtPath: jest.fn().mockResolvedValue(true), + createDirectoriesForFile: jest.fn().mockResolvedValue([]), +})) + +// Create a mock ExtensionContext with relative paths instead of absolute paths +const mockContext = { + extensionPath: "mock/extension/path", + globalStoragePath: "mock/storage/path", + storagePath: "mock/storage/path", + logPath: "mock/log/path", + subscriptions: [], + workspaceState: { + get: () => undefined, + update: () => Promise.resolve(), + }, + globalState: { + get: () => undefined, + update: () => Promise.resolve(), + setKeysForSync: () => {}, + }, + extensionUri: { fsPath: "mock/extension/path" }, + globalStorageUri: { fsPath: "mock/settings/path" }, + asAbsolutePath: (relativePath: string) => `mock/extension/path/${relativePath}`, + extension: { + packageJSON: { + version: "1.0.0", + }, + }, +} as unknown as vscode.ExtensionContext + +describe("File-Based Custom System Prompt", () => { + const experiments = {} + + beforeEach(() => { + // Reset mocks before each test + jest.clearAllMocks() + + // Default behavior: file doesn't exist + mockedFs.readFile.mockRejectedValue({ code: "ENOENT" }) + }) + + it("should use default generation when no file-based system prompt is found", async () => { + const customModePrompts = { + [defaultModeSlug]: { + roleDefinition: "Test role definition", + }, + } + + const prompt = await SYSTEM_PROMPT( + mockContext, + "test/path", // Using a relative path without leading slash + false, + undefined, + undefined, + undefined, + defaultModeSlug, + customModePrompts, + undefined, + undefined, + undefined, + undefined, + experiments, + true, + ) + + // Should contain default sections + expect(prompt).toContain("TOOL USE") + expect(prompt).toContain("CAPABILITIES") + expect(prompt).toContain("MODES") + expect(prompt).toContain("Test role definition") + }) + + it("should use file-based custom system prompt when available", async () => { + // Mock the readFile to return content from a file + const fileCustomSystemPrompt = "Custom system prompt from file" + // When called with utf-8 encoding, return a string + mockedFs.readFile.mockImplementation((filePath, options) => { + if (filePath.toString().includes(`.roo/system-prompt-${defaultModeSlug}`) && options === "utf-8") { + return Promise.resolve(fileCustomSystemPrompt) + } + return Promise.reject({ code: "ENOENT" }) + }) + + const prompt = await SYSTEM_PROMPT( + mockContext, + "test/path", // Using a relative path without leading slash + false, + undefined, + undefined, + undefined, + defaultModeSlug, + undefined, + undefined, + undefined, + undefined, + undefined, + experiments, + true, + ) + + // Should contain role definition and file-based system prompt + expect(prompt).toContain(modes[0].roleDefinition) + expect(prompt).toContain(fileCustomSystemPrompt) + + // Should not contain any of the default sections + expect(prompt).not.toContain("TOOL USE") + expect(prompt).not.toContain("CAPABILITIES") + expect(prompt).not.toContain("MODES") + }) + + it("should combine file-based system prompt with role definition and custom instructions", async () => { + // Mock the readFile to return content from a file + const fileCustomSystemPrompt = "Custom system prompt from file" + mockedFs.readFile.mockImplementation((filePath, options) => { + if (filePath.toString().includes(`.roo/system-prompt-${defaultModeSlug}`) && options === "utf-8") { + return Promise.resolve(fileCustomSystemPrompt) + } + return Promise.reject({ code: "ENOENT" }) + }) + + // Define custom role definition + const customRoleDefinition = "Custom role definition" + const customModePrompts = { + [defaultModeSlug]: { + roleDefinition: customRoleDefinition, + }, + } + + const prompt = await SYSTEM_PROMPT( + mockContext, + "test/path", // Using a relative path without leading slash + false, + undefined, + undefined, + undefined, + defaultModeSlug, + customModePrompts, + undefined, + undefined, + undefined, + undefined, + experiments, + true, + ) + + // Should contain custom role definition and file-based system prompt + expect(prompt).toContain(customRoleDefinition) + expect(prompt).toContain(fileCustomSystemPrompt) + + // Should not contain any of the default sections + expect(prompt).not.toContain("TOOL USE") + expect(prompt).not.toContain("CAPABILITIES") + expect(prompt).not.toContain("MODES") + }) +}) diff --git a/src/core/prompts/sections/custom-system-prompt.ts b/src/core/prompts/sections/custom-system-prompt.ts new file mode 100644 index 00000000000..eca2b98b8d8 --- /dev/null +++ b/src/core/prompts/sections/custom-system-prompt.ts @@ -0,0 +1,60 @@ +import fs from "fs/promises" +import path from "path" +import { Mode } from "../../../shared/modes" +import { fileExistsAtPath } from "../../../utils/fs" + +/** + * Safely reads a file, returning an empty string if the file doesn't exist + */ +async function safeReadFile(filePath: string): Promise { + try { + const content = await fs.readFile(filePath, "utf-8") + // When reading with "utf-8" encoding, content should be a string + return content.trim() + } catch (err) { + const errorCode = (err as NodeJS.ErrnoException).code + if (!errorCode || !["ENOENT", "EISDIR"].includes(errorCode)) { + throw err + } + return "" + } +} + +/** + * Get the path to a system prompt file for a specific mode + */ +export function getSystemPromptFilePath(cwd: string, mode: Mode): string { + return path.join(cwd, ".roo", `system-prompt-${mode}`) +} + +/** + * Loads custom system prompt from a file at .roo/system-prompt-[mode slug] + * If the file doesn't exist, returns an empty string + */ +export async function loadSystemPromptFile(cwd: string, mode: Mode): Promise { + const filePath = getSystemPromptFilePath(cwd, mode) + return safeReadFile(filePath) +} + +/** + * Ensures the .roo directory exists, creating it if necessary + */ +export async function ensureRooDirectory(cwd: string): Promise { + const rooDir = path.join(cwd, ".roo") + + // Check if directory already exists + if (await fileExistsAtPath(rooDir)) { + return + } + + // Create the directory + try { + await fs.mkdir(rooDir, { recursive: true }) + } catch (err) { + // If directory already exists (race condition), ignore the error + const errorCode = (err as NodeJS.ErrnoException).code + if (errorCode !== "EEXIST") { + throw err + } + } +} diff --git a/src/core/prompts/system.ts b/src/core/prompts/system.ts index 91bbd073870..90791f63586 100644 --- a/src/core/prompts/system.ts +++ b/src/core/prompts/system.ts @@ -23,6 +23,7 @@ import { getModesSection, addCustomInstructions, } from "./sections" +import { loadSystemPromptFile } from "./sections/custom-system-prompt" import fs from "fs/promises" import path from "path" @@ -119,11 +120,25 @@ export const SYSTEM_PROMPT = async ( return undefined } + // Try to load custom system prompt from file + const fileCustomSystemPrompt = await loadSystemPromptFile(cwd, mode) + // Check if it's a custom mode const promptComponent = getPromptComponent(customModePrompts?.[mode]) + // Get full mode config from custom modes or fall back to built-in modes const currentMode = getModeBySlug(mode, customModes) || modes.find((m) => m.slug === mode) || modes[0] + // If a file-based custom system prompt exists, use it + if (fileCustomSystemPrompt) { + const roleDefinition = promptComponent?.roleDefinition || currentMode.roleDefinition + return `${roleDefinition} + +${fileCustomSystemPrompt} + +${await addCustomInstructions(promptComponent?.customInstructions || currentMode.customInstructions || "", globalCustomInstructions || "", cwd, mode, { preferredLanguage })}` + } + // If diff is disabled, don't pass the diffStrategy const effectiveDiffStrategy = diffEnabled ? diffStrategy : undefined diff --git a/webview-ui/src/components/prompts/PromptsView.tsx b/webview-ui/src/components/prompts/PromptsView.tsx index 061fa789de4..2bfafeff5c6 100644 --- a/webview-ui/src/components/prompts/PromptsView.tsx +++ b/webview-ui/src/components/prompts/PromptsView.tsx @@ -88,6 +88,7 @@ const PromptsView = ({ onDone }: PromptsViewProps) => { const [showConfigMenu, setShowConfigMenu] = useState(false) const [isCreateModeDialogOpen, setIsCreateModeDialogOpen] = useState(false) const [activeSupportTab, setActiveSupportTab] = useState("ENHANCE") + const [isSystemPromptDisclosureOpen, setIsSystemPromptDisclosureOpen] = useState(false) // Direct update functions const updateAgentPrompt = useCallback( @@ -971,6 +972,45 @@ const PromptsView = ({ onDone }: PromptsViewProps) => {
+ + {/* Custom System Prompt Disclosure */} +
+ + + {isSystemPromptDisclosureOpen && ( +
+ You can completely replace the system prompt for this mode (aside from the role + definition and custom instructions) by creating a file at{" "} + { + const currentMode = getCurrentMode() + if (!currentMode) return + + // Open or create an empty file + vscode.postMessage({ + type: "openFile", + text: `./.roo/system-prompt-${currentMode.slug}`, + values: { + create: true, + content: "", + }, + }) + }}> + .roo/system-prompt-{getCurrentMode()?.slug || "code"} + {" "} + in your workspace. This is a very advanced feature that bypasses built-in safeguards and + consistency checks (especially around tool usage), so be careful! +
+ )} +
Date: Thu, 27 Feb 2025 15:18:54 -0500 Subject: [PATCH 096/145] Add gpt-4.5-preview --- .changeset/flat-avocados-carry.md | 5 +++++ src/api/providers/__tests__/openai-native.test.ts | 2 +- src/shared/api.ts | 10 +++++++++- 3 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 .changeset/flat-avocados-carry.md diff --git a/.changeset/flat-avocados-carry.md b/.changeset/flat-avocados-carry.md new file mode 100644 index 00000000000..f0128f21e0d --- /dev/null +++ b/.changeset/flat-avocados-carry.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Add gpt-4.5-preview diff --git a/src/api/providers/__tests__/openai-native.test.ts b/src/api/providers/__tests__/openai-native.test.ts index d6a855849c5..eda744c335c 100644 --- a/src/api/providers/__tests__/openai-native.test.ts +++ b/src/api/providers/__tests__/openai-native.test.ts @@ -357,7 +357,7 @@ describe("OpenAiNativeHandler", () => { const modelInfo = handler.getModel() expect(modelInfo.id).toBe(mockOptions.apiModelId) expect(modelInfo.info).toBeDefined() - expect(modelInfo.info.maxTokens).toBe(4096) + expect(modelInfo.info.maxTokens).toBe(16384) expect(modelInfo.info.contextWindow).toBe(128_000) }) diff --git a/src/shared/api.ts b/src/shared/api.ts index 442282d5876..47b023ce6fd 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -678,8 +678,16 @@ export const openAiNativeModels = { inputPrice: 1.1, outputPrice: 4.4, }, + "gpt-4.5-preview": { + maxTokens: 16_384, + contextWindow: 128_000, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 75, + outputPrice: 150, + }, "gpt-4o": { - maxTokens: 4_096, + maxTokens: 16_384, contextWindow: 128_000, supportsImages: true, supportsPromptCache: false, From 820ebc97c5251e04194b014bd60ab51e08de1481 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 27 Feb 2025 20:41:23 +0000 Subject: [PATCH 097/145] changeset version bump --- .changeset/flat-avocados-carry.md | 5 ----- CHANGELOG.md | 6 ++++++ package-lock.json | 4 ++-- package.json | 2 +- 4 files changed, 9 insertions(+), 8 deletions(-) delete mode 100644 .changeset/flat-avocados-carry.md diff --git a/.changeset/flat-avocados-carry.md b/.changeset/flat-avocados-carry.md deleted file mode 100644 index f0128f21e0d..00000000000 --- a/.changeset/flat-avocados-carry.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -Add gpt-4.5-preview diff --git a/CHANGELOG.md b/CHANGELOG.md index d0cf8f79c3a..e3aa95d448e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Roo Code Changelog +## 3.7.8 + +### Patch Changes + +- Add gpt-4.5-preview + ## [3.7.7] - Graduate checkpoints out of beta diff --git a/package-lock.json b/package-lock.json index c1f748983fc..e7d7718b754 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "roo-cline", - "version": "3.7.7", + "version": "3.7.8", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "roo-cline", - "version": "3.7.7", + "version": "3.7.8", "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/sdk": "^0.37.0", diff --git a/package.json b/package.json index 8441488bac3..a4a2298a48f 100644 --- a/package.json +++ b/package.json @@ -3,7 +3,7 @@ "displayName": "Roo Code (prev. Roo Cline)", "description": "A whole dev team of AI agents in your editor.", "publisher": "RooVeterinaryInc", - "version": "3.7.7", + "version": "3.7.8", "icon": "assets/icons/rocket.png", "galleryBanner": { "color": "#617A91", From ca7d746990ace8208b5f444ae6f16ea0f25525a1 Mon Sep 17 00:00:00 2001 From: R00-B0T Date: Thu, 27 Feb 2025 20:41:50 +0000 Subject: [PATCH 098/145] Updating CHANGELOG.md format --- CHANGELOG.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e3aa95d448e..fe8156abf79 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,6 @@ # Roo Code Changelog -## 3.7.8 - -### Patch Changes +## [3.7.8] - Add gpt-4.5-preview From 75e7ef728d0c8512a2f8835a3139bba194f8b327 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Thu, 27 Feb 2025 16:09:28 -0500 Subject: [PATCH 099/145] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe8156abf79..9622ce0c99e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,9 @@ ## [3.7.8] +- Add Vertex AI prompt caching support for Claude models (thanks @aitoroses and @lupuletic!) - Add gpt-4.5-preview +- Add an advanced feature to customize the system prompt ## [3.7.7] From 3514f6506b5b0e24919bad29e65b8eba11afced5 Mon Sep 17 00:00:00 2001 From: Catalin Lupuleti Date: Thu, 27 Feb 2025 21:56:56 +0000 Subject: [PATCH 100/145] Added support for Claude Sonnet 3.7 thinking via Vertex AI --- package-lock.json | 10 +- package.json | 2 +- src/api/providers/vertex.ts | 110 +++++++++++++++--- src/core/webview/ClineProvider.ts | 5 + src/shared/api.ts | 14 +++ src/shared/globalState.ts | 2 + .../src/components/settings/ApiOptions.tsx | 3 + .../components/settings/ThinkingBudget.tsx | 30 +++-- 8 files changed, 143 insertions(+), 33 deletions(-) diff --git a/package-lock.json b/package-lock.json index c1f748983fc..547f20a9305 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,7 +10,7 @@ "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/sdk": "^0.37.0", - "@anthropic-ai/vertex-sdk": "^0.4.1", + "@anthropic-ai/vertex-sdk": "^0.7.0", "@aws-sdk/client-bedrock-runtime": "^3.706.0", "@google/generative-ai": "^0.18.0", "@mistralai/mistralai": "^1.3.6", @@ -150,11 +150,11 @@ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" }, "node_modules/@anthropic-ai/vertex-sdk": { - "version": "0.4.3", - "resolved": "https://registry.npmjs.org/@anthropic-ai/vertex-sdk/-/vertex-sdk-0.4.3.tgz", - "integrity": "sha512-2Uef0C5P2Hx+T88RnUSRA3u4aZqmqnrRSOb2N64ozgKPiSUPTM5JlggAq2b32yWMj5d3MLYa6spJXKMmHXOcoA==", + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/@anthropic-ai/vertex-sdk/-/vertex-sdk-0.7.0.tgz", + "integrity": "sha512-zNm3hUXgYmYDTyveIxOyxbcnh5VXFkrLo4bSnG6LAfGzW7k3k2iCNDSVKtR9qZrK2BCid7JtVu7jsEKaZ/9dSw==", "dependencies": { - "@anthropic-ai/sdk": ">=0.14 <1", + "@anthropic-ai/sdk": ">=0.35 <1", "google-auth-library": "^9.4.2" } }, diff --git a/package.json b/package.json index 8441488bac3..35db01621ab 100644 --- a/package.json +++ b/package.json @@ -305,7 +305,7 @@ "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", "@anthropic-ai/sdk": "^0.37.0", - "@anthropic-ai/vertex-sdk": "^0.4.1", + "@anthropic-ai/vertex-sdk": "^0.7.0", "@aws-sdk/client-bedrock-runtime": "^3.706.0", "@google/generative-ai": "^0.18.0", "@mistralai/mistralai": "^1.3.6", diff --git a/src/api/providers/vertex.ts b/src/api/providers/vertex.ts index 70562766c3b..69fb7d26f78 100644 --- a/src/api/providers/vertex.ts +++ b/src/api/providers/vertex.ts @@ -2,6 +2,7 @@ import { Anthropic } from "@anthropic-ai/sdk" import { AnthropicVertex } from "@anthropic-ai/vertex-sdk" import { Stream as AnthropicStream } from "@anthropic-ai/sdk/streaming" import { ApiHandler, SingleCompletionHandler } from "../" +import { BetaThinkingConfigParam } from "@anthropic-ai/sdk/resources/beta" import { ApiHandlerOptions, ModelInfo, vertexDefaultModelId, VertexModelId, vertexModels } from "../../shared/api" import { ApiStream } from "../transform/stream" @@ -70,15 +71,25 @@ interface VertexMessageStreamEvent { usage?: { output_tokens: number } - content_block?: { - type: "text" - text: string - } + content_block?: + | { + type: "text" + text: string + } + | { + type: "thinking" + thinking: string + } index?: number - delta?: { - type: "text_delta" - text: string - } + delta?: + | { + type: "text_delta" + text: string + } + | { + type: "thinking_delta" + thinking: string + } } // https://docs.anthropic.com/en/api/claude-on-vertex-ai @@ -145,6 +156,7 @@ export class VertexHandler implements ApiHandler, SingleCompletionHandler { async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { const model = this.getModel() + let { id, info, temperature, maxTokens, thinking } = model const useCache = model.info.supportsPromptCache // Find indices of user messages that we want to cache @@ -158,9 +170,10 @@ export class VertexHandler implements ApiHandler, SingleCompletionHandler { // Create the stream with appropriate caching configuration const params = { - model: model.id, - max_tokens: model.info.maxTokens || 8192, - temperature: this.options.modelTemperature ?? 0, + model: id, + max_tokens: maxTokens, + temperature, + thinking, // Cache the system prompt if caching is enabled system: useCache ? [ @@ -220,6 +233,19 @@ export class VertexHandler implements ApiHandler, SingleCompletionHandler { } break } + case "thinking": { + if (chunk.index! > 0) { + yield { + type: "reasoning", + text: "\n", + } + } + yield { + type: "reasoning", + text: (chunk.content_block as any).thinking, + } + break + } } break } @@ -232,6 +258,13 @@ export class VertexHandler implements ApiHandler, SingleCompletionHandler { } break } + case "thinking_delta": { + yield { + type: "reasoning", + text: (chunk.delta as any).thinking, + } + break + } } break } @@ -239,24 +272,63 @@ export class VertexHandler implements ApiHandler, SingleCompletionHandler { } } - getModel(): { id: VertexModelId; info: ModelInfo } { + getModel(): { + id: VertexModelId + info: ModelInfo + temperature: number + maxTokens: number + thinking?: BetaThinkingConfigParam + } { const modelId = this.options.apiModelId + let temperature = this.options.modelTemperature ?? 0 + let thinking: BetaThinkingConfigParam | undefined = undefined + if (modelId && modelId in vertexModels) { const id = modelId as VertexModelId - return { id, info: vertexModels[id] } + const info: ModelInfo = vertexModels[id] + + // The `:thinking` variant is a virtual identifier for thinking-enabled models + // Similar to how it's handled in the Anthropic provider + let actualId = id + if (id.endsWith(":thinking")) { + actualId = id.replace(":thinking", "") as VertexModelId + } + + const maxTokens = this.options.modelMaxTokens || info.maxTokens || 8192 + + if (info.thinking) { + temperature = 1.0 // Thinking requires temperature 1.0 + const maxBudgetTokens = Math.floor(maxTokens * 0.8) + const budgetTokens = Math.max( + Math.min( + this.options.vertexThinking ?? this.options.anthropicThinking ?? maxBudgetTokens, + maxBudgetTokens, + ), + 1024, + ) + thinking = { type: "enabled", budget_tokens: budgetTokens } + } + + return { id: actualId, info, temperature, maxTokens, thinking } } - return { id: vertexDefaultModelId, info: vertexModels[vertexDefaultModelId] } + + const id = vertexDefaultModelId + const info = vertexModels[id] + const maxTokens = this.options.modelMaxTokens || info.maxTokens || 8192 + + return { id, info, temperature, maxTokens, thinking } } async completePrompt(prompt: string): Promise { try { - const model = this.getModel() - const useCache = model.info.supportsPromptCache + let { id, info, temperature, maxTokens, thinking } = this.getModel() + const useCache = info.supportsPromptCache const params = { - model: model.id, - max_tokens: model.info.maxTokens || 8192, - temperature: this.options.modelTemperature ?? 0, + model: id, + max_tokens: maxTokens, + temperature, + thinking, system: "", // No system prompt needed for single completions messages: [ { diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index 633c7d7293d..5417e54ff73 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -1652,6 +1652,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { lmStudioBaseUrl, anthropicBaseUrl, anthropicThinking, + vertexThinking, geminiApiKey, openAiNativeApiKey, deepSeekApiKey, @@ -1701,6 +1702,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { this.updateGlobalState("lmStudioBaseUrl", lmStudioBaseUrl), this.updateGlobalState("anthropicBaseUrl", anthropicBaseUrl), this.updateGlobalState("anthropicThinking", anthropicThinking), + this.updateGlobalState("vertexThinking", vertexThinking), this.storeSecret("geminiApiKey", geminiApiKey), this.storeSecret("openAiNativeApiKey", openAiNativeApiKey), this.storeSecret("deepSeekApiKey", deepSeekApiKey), @@ -2158,6 +2160,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { lmStudioBaseUrl, anthropicBaseUrl, anthropicThinking, + vertexThinking, geminiApiKey, openAiNativeApiKey, deepSeekApiKey, @@ -2242,6 +2245,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { this.getGlobalState("lmStudioBaseUrl") as Promise, this.getGlobalState("anthropicBaseUrl") as Promise, this.getGlobalState("anthropicThinking") as Promise, + this.getGlobalState("vertexThinking") as Promise, this.getSecret("geminiApiKey") as Promise, this.getSecret("openAiNativeApiKey") as Promise, this.getSecret("deepSeekApiKey") as Promise, @@ -2343,6 +2347,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { lmStudioBaseUrl, anthropicBaseUrl, anthropicThinking, + vertexThinking, geminiApiKey, openAiNativeApiKey, deepSeekApiKey, diff --git a/src/shared/api.ts b/src/shared/api.ts index 442282d5876..f048761d0f3 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -41,6 +41,7 @@ export interface ApiHandlerOptions { awsUseProfile?: boolean vertexProjectId?: string vertexRegion?: string + vertexThinking?: number openAiBaseUrl?: string openAiApiKey?: string openAiModelId?: string @@ -436,6 +437,18 @@ export const openRouterDefaultModelInfo: ModelInfo = { export type VertexModelId = keyof typeof vertexModels export const vertexDefaultModelId: VertexModelId = "claude-3-7-sonnet@20250219" export const vertexModels = { + "claude-3-7-sonnet@20250219:thinking": { + maxTokens: 64000, + contextWindow: 200_000, + supportsImages: true, + supportsComputerUse: true, + supportsPromptCache: true, + inputPrice: 3.0, + outputPrice: 15.0, + cacheWritesPrice: 3.75, + cacheReadsPrice: 0.3, + thinking: true, + }, "claude-3-7-sonnet@20250219": { maxTokens: 8192, contextWindow: 200_000, @@ -446,6 +459,7 @@ export const vertexModels = { outputPrice: 15.0, cacheWritesPrice: 3.75, cacheReadsPrice: 0.3, + thinking: false, }, "claude-3-5-sonnet-v2@20241022": { maxTokens: 8192, diff --git a/src/shared/globalState.ts b/src/shared/globalState.ts index 0863b34db22..05b868a450c 100644 --- a/src/shared/globalState.ts +++ b/src/shared/globalState.ts @@ -24,6 +24,7 @@ export type GlobalStateKey = | "awsUseProfile" | "vertexProjectId" | "vertexRegion" + | "vertexThinking" | "lastShownAnnouncementId" | "customInstructions" | "alwaysAllowReadOnly" @@ -43,6 +44,7 @@ export type GlobalStateKey = | "lmStudioBaseUrl" | "anthropicBaseUrl" | "anthropicThinking" + | "vertexThinking" | "azureApiVersion" | "openAiStreamingEnabled" | "openRouterModelId" diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index c30035cef01..42ac5cdcb30 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -7,6 +7,7 @@ import * as vscodemodels from "vscode" import { ApiConfiguration, ModelInfo, + ApiProvider, anthropicDefaultModelId, anthropicModels, azureOpenAiDefaultApiVersion, @@ -1380,9 +1381,11 @@ const ApiOptions = ({ />
(field: K, value: ApiConfiguration[K]) => void modelInfo?: ModelInfo + provider?: ApiProvider } -export const ThinkingBudget = ({ apiConfiguration, setApiConfigurationField, modelInfo }: ThinkingBudgetProps) => { +export const ThinkingBudget = ({ + apiConfiguration, + setApiConfigurationField, + modelInfo, + provider, +}: ThinkingBudgetProps) => { + const isVertexProvider = provider === "vertex" + const budgetField = isVertexProvider ? "vertexThinking" : "anthropicThinking" + const tokens = apiConfiguration?.modelMaxTokens || modelInfo?.maxTokens || 64_000 const tokensMin = 8192 const tokensMax = modelInfo?.maxTokens || 64_000 - const thinkingTokens = apiConfiguration?.anthropicThinking || 8192 + // Get the appropriate thinking tokens based on provider + const thinkingTokens = useMemo(() => { + const value = isVertexProvider ? apiConfiguration?.vertexThinking : apiConfiguration?.anthropicThinking + return value || Math.min(Math.floor(0.8 * tokens), 8192) + }, [apiConfiguration, isVertexProvider, tokens]) + const thinkingTokensMin = 1024 const thinkingTokensMax = Math.floor(0.8 * tokens) useEffect(() => { if (thinkingTokens > thinkingTokensMax) { - setApiConfigurationField("anthropicThinking", thinkingTokensMax) + setApiConfigurationField(budgetField, thinkingTokensMax) } - }, [thinkingTokens, thinkingTokensMax, setApiConfigurationField]) + }, [thinkingTokens, thinkingTokensMax, setApiConfigurationField, budgetField]) - if (!modelInfo || !modelInfo.thinking) { + if (!modelInfo?.thinking) { return null } @@ -52,7 +66,7 @@ export const ThinkingBudget = ({ apiConfiguration, setApiConfigurationField, mod max={thinkingTokensMax} step={1024} value={[thinkingTokens]} - onValueChange={([value]) => setApiConfigurationField("anthropicThinking", value)} + onValueChange={([value]) => setApiConfigurationField(budgetField, value)} />
{thinkingTokens}
From 5eba1d53fbeef6f71f027d8317d9f99d120e8026 Mon Sep 17 00:00:00 2001 From: Catalin Lupuleti Date: Thu, 27 Feb 2025 22:15:17 +0000 Subject: [PATCH 101/145] Added tests for Claude Sonnet Thinking --- src/api/providers/__tests__/vertex.test.ts | 250 ++++++++++++++++++ .../settings/__tests__/ApiOptions.test.tsx | 57 +++- .../__tests__/ThinkingBudget.test.tsx | 145 ++++++++++ 3 files changed, 451 insertions(+), 1 deletion(-) create mode 100644 webview-ui/src/components/settings/__tests__/ThinkingBudget.test.tsx diff --git a/src/api/providers/__tests__/vertex.test.ts b/src/api/providers/__tests__/vertex.test.ts index 6e81fd771b7..076f902ca2b 100644 --- a/src/api/providers/__tests__/vertex.test.ts +++ b/src/api/providers/__tests__/vertex.test.ts @@ -2,6 +2,7 @@ import { Anthropic } from "@anthropic-ai/sdk" import { AnthropicVertex } from "@anthropic-ai/vertex-sdk" +import { BetaThinkingConfigParam } from "@anthropic-ai/sdk/resources/beta" import { VertexHandler } from "../vertex" import { ApiStreamChunk } from "../../transform/stream" @@ -431,6 +432,138 @@ describe("VertexHandler", () => { }) }) + describe("thinking functionality", () => { + const mockMessages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: "Hello", + }, + ] + + const systemPrompt = "You are a helpful assistant" + + it("should handle thinking content blocks and deltas", async () => { + const mockStream = [ + { + type: "message_start", + message: { + usage: { + input_tokens: 10, + output_tokens: 0, + }, + }, + }, + { + type: "content_block_start", + index: 0, + content_block: { + type: "thinking", + thinking: "Let me think about this...", + }, + }, + { + type: "content_block_delta", + delta: { + type: "thinking_delta", + thinking: " I need to consider all options.", + }, + }, + { + type: "content_block_start", + index: 1, + content_block: { + type: "text", + text: "Here's my answer:", + }, + }, + ] + + // Setup async iterator for mock stream + const asyncIterator = { + async *[Symbol.asyncIterator]() { + for (const chunk of mockStream) { + yield chunk + } + }, + } + + const mockCreate = jest.fn().mockResolvedValue(asyncIterator) + ;(handler["client"].messages as any).create = mockCreate + + const stream = handler.createMessage(systemPrompt, mockMessages) + const chunks: ApiStreamChunk[] = [] + + for await (const chunk of stream) { + chunks.push(chunk) + } + + // Verify thinking content is processed correctly + const reasoningChunks = chunks.filter((chunk) => chunk.type === "reasoning") + expect(reasoningChunks).toHaveLength(2) + expect(reasoningChunks[0].text).toBe("Let me think about this...") + expect(reasoningChunks[1].text).toBe(" I need to consider all options.") + + // Verify text content is processed correctly + const textChunks = chunks.filter((chunk) => chunk.type === "text") + expect(textChunks).toHaveLength(2) // One for the text block, one for the newline + expect(textChunks[0].text).toBe("\n") + expect(textChunks[1].text).toBe("Here's my answer:") + }) + + it("should handle multiple thinking blocks with line breaks", async () => { + const mockStream = [ + { + type: "content_block_start", + index: 0, + content_block: { + type: "thinking", + thinking: "First thinking block", + }, + }, + { + type: "content_block_start", + index: 1, + content_block: { + type: "thinking", + thinking: "Second thinking block", + }, + }, + ] + + const asyncIterator = { + async *[Symbol.asyncIterator]() { + for (const chunk of mockStream) { + yield chunk + } + }, + } + + const mockCreate = jest.fn().mockResolvedValue(asyncIterator) + ;(handler["client"].messages as any).create = mockCreate + + const stream = handler.createMessage(systemPrompt, mockMessages) + const chunks: ApiStreamChunk[] = [] + + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(chunks.length).toBe(3) + expect(chunks[0]).toEqual({ + type: "reasoning", + text: "First thinking block", + }) + expect(chunks[1]).toEqual({ + type: "reasoning", + text: "\n", + }) + expect(chunks[2]).toEqual({ + type: "reasoning", + text: "Second thinking block", + }) + }) + }) + describe("completePrompt", () => { it("should complete prompt successfully", async () => { const result = await handler.completePrompt("Test prompt") @@ -500,4 +633,121 @@ describe("VertexHandler", () => { expect(modelInfo.id).toBe("claude-3-7-sonnet@20250219") // Default model }) }) + + describe("thinking model configuration", () => { + it("should configure thinking for models with :thinking suffix", () => { + const thinkingHandler = new VertexHandler({ + apiModelId: "claude-3-7-sonnet@20250219:thinking", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + modelMaxTokens: 16384, + vertexThinking: 4096, + }) + + const modelInfo = thinkingHandler.getModel() + + // Verify thinking configuration + expect(modelInfo.id).toBe("claude-3-7-sonnet@20250219") + expect(modelInfo.thinking).toBeDefined() + const thinkingConfig = modelInfo.thinking as { type: "enabled"; budget_tokens: number } + expect(thinkingConfig.type).toBe("enabled") + expect(thinkingConfig.budget_tokens).toBe(4096) + expect(modelInfo.temperature).toBe(1.0) // Thinking requires temperature 1.0 + }) + + it("should calculate thinking budget correctly", () => { + // Test with explicit thinking budget + const handlerWithBudget = new VertexHandler({ + apiModelId: "claude-3-7-sonnet@20250219:thinking", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + modelMaxTokens: 16384, + vertexThinking: 5000, + }) + + expect((handlerWithBudget.getModel().thinking as any).budget_tokens).toBe(5000) + + // Test with default thinking budget (80% of max tokens) + const handlerWithDefaultBudget = new VertexHandler({ + apiModelId: "claude-3-7-sonnet@20250219:thinking", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + modelMaxTokens: 10000, + }) + + expect((handlerWithDefaultBudget.getModel().thinking as any).budget_tokens).toBe(8000) // 80% of 10000 + + // Test with minimum thinking budget (should be at least 1024) + const handlerWithSmallMaxTokens = new VertexHandler({ + apiModelId: "claude-3-7-sonnet@20250219:thinking", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + modelMaxTokens: 1000, // This would result in 800 tokens for thinking, but minimum is 1024 + }) + + expect((handlerWithSmallMaxTokens.getModel().thinking as any).budget_tokens).toBe(1024) + }) + + it("should use anthropicThinking value if vertexThinking is not provided", () => { + const handler = new VertexHandler({ + apiModelId: "claude-3-7-sonnet@20250219:thinking", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + modelMaxTokens: 16384, + anthropicThinking: 6000, // Should be used as fallback + }) + + expect((handler.getModel().thinking as any).budget_tokens).toBe(6000) + }) + + it("should pass thinking configuration to API", async () => { + const thinkingHandler = new VertexHandler({ + apiModelId: "claude-3-7-sonnet@20250219:thinking", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + modelMaxTokens: 16384, + vertexThinking: 4096, + }) + + const mockCreate = jest.fn().mockImplementation(async (options) => { + if (!options.stream) { + return { + id: "test-completion", + content: [{ type: "text", text: "Test response" }], + role: "assistant", + model: options.model, + usage: { + input_tokens: 10, + output_tokens: 5, + }, + } + } + return { + async *[Symbol.asyncIterator]() { + yield { + type: "message_start", + message: { + usage: { + input_tokens: 10, + output_tokens: 5, + }, + }, + } + }, + } + }) + ;(thinkingHandler["client"].messages as any).create = mockCreate + + await thinkingHandler + .createMessage("You are a helpful assistant", [{ role: "user", content: "Hello" }]) + .next() + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + thinking: { type: "enabled", budget_tokens: 4096 }, + temperature: 1.0, // Thinking requires temperature 1.0 + }), + ) + }) + }) }) diff --git a/webview-ui/src/components/settings/__tests__/ApiOptions.test.tsx b/webview-ui/src/components/settings/__tests__/ApiOptions.test.tsx index 73394bae104..65ae1370035 100644 --- a/webview-ui/src/components/settings/__tests__/ApiOptions.test.tsx +++ b/webview-ui/src/components/settings/__tests__/ApiOptions.test.tsx @@ -46,6 +46,21 @@ jest.mock("../TemperatureControl", () => ({ ), })) +// Mock ThinkingBudget component +jest.mock("../ThinkingBudget", () => ({ + ThinkingBudget: ({ apiConfiguration, setApiConfigurationField, modelInfo, provider }: any) => + modelInfo?.thinking ? ( +
+ +
+ ) : null, +})) + describe("ApiOptions", () => { const renderApiOptions = (props = {}) => { render( @@ -72,5 +87,45 @@ describe("ApiOptions", () => { expect(screen.queryByTestId("temperature-control")).not.toBeInTheDocument() }) - //TODO: More test cases needed + describe("thinking functionality", () => { + it("should show ThinkingBudget for Anthropic models that support thinking", () => { + renderApiOptions({ + apiConfiguration: { + apiProvider: "anthropic", + apiModelId: "claude-3-7-sonnet-20250219:thinking", + }, + }) + + expect(screen.getByTestId("thinking-budget")).toBeInTheDocument() + expect(screen.getByTestId("thinking-budget")).toHaveAttribute("data-provider", "anthropic") + }) + + it("should show ThinkingBudget for Vertex models that support thinking", () => { + renderApiOptions({ + apiConfiguration: { + apiProvider: "vertex", + apiModelId: "claude-3-7-sonnet@20250219:thinking", + }, + }) + + expect(screen.getByTestId("thinking-budget")).toBeInTheDocument() + expect(screen.getByTestId("thinking-budget")).toHaveAttribute("data-provider", "vertex") + }) + + it("should not show ThinkingBudget for models that don't support thinking", () => { + renderApiOptions({ + apiConfiguration: { + apiProvider: "anthropic", + apiModelId: "claude-3-opus-20240229", + modelInfo: { thinking: false }, // Non-thinking model + }, + }) + + expect(screen.queryByTestId("thinking-budget")).not.toBeInTheDocument() + }) + + // Note: We don't need to test the actual ThinkingBudget component functionality here + // since we have separate tests for that component. We just need to verify that + // it's included in the ApiOptions component when appropriate. + }) }) diff --git a/webview-ui/src/components/settings/__tests__/ThinkingBudget.test.tsx b/webview-ui/src/components/settings/__tests__/ThinkingBudget.test.tsx new file mode 100644 index 00000000000..54f6b1037b4 --- /dev/null +++ b/webview-ui/src/components/settings/__tests__/ThinkingBudget.test.tsx @@ -0,0 +1,145 @@ +import React from "react" +import { render, screen, fireEvent } from "@testing-library/react" +import { ThinkingBudget } from "../ThinkingBudget" +import { ApiProvider, ModelInfo } from "../../../../../src/shared/api" + +// Mock Slider component +jest.mock("@/components/ui", () => ({ + Slider: ({ value, onValueChange, min, max }: any) => ( + onValueChange([parseInt(e.target.value)])} + /> + ), +})) + +describe("ThinkingBudget", () => { + const mockModelInfo: ModelInfo = { + thinking: true, + maxTokens: 16384, + contextWindow: 200000, + supportsPromptCache: true, + supportsImages: true, + } + const defaultProps = { + apiConfiguration: {}, + setApiConfigurationField: jest.fn(), + modelInfo: mockModelInfo, + provider: "anthropic" as ApiProvider, + } + + beforeEach(() => { + jest.clearAllMocks() + }) + + it("should render nothing when model doesn't support thinking", () => { + const { container } = render( + , + ) + + expect(container.firstChild).toBeNull() + }) + + it("should render sliders when model supports thinking", () => { + render() + + expect(screen.getAllByTestId("slider")).toHaveLength(2) + }) + + it("should use anthropicThinking field for Anthropic provider", () => { + const setApiConfigurationField = jest.fn() + + render( + , + ) + + const sliders = screen.getAllByTestId("slider") + fireEvent.change(sliders[1], { target: { value: "5000" } }) + + expect(setApiConfigurationField).toHaveBeenCalledWith("anthropicThinking", 5000) + }) + + it("should use vertexThinking field for Vertex provider", () => { + const setApiConfigurationField = jest.fn() + + render( + , + ) + + const sliders = screen.getAllByTestId("slider") + fireEvent.change(sliders[1], { target: { value: "5000" } }) + + expect(setApiConfigurationField).toHaveBeenCalledWith("vertexThinking", 5000) + }) + + it("should cap thinking tokens at 80% of max tokens", () => { + const setApiConfigurationField = jest.fn() + + render( + , + ) + + // Effect should trigger and cap the value + expect(setApiConfigurationField).toHaveBeenCalledWith("anthropicThinking", 8000) // 80% of 10000 + }) + + it("should use default thinking tokens if not provided", () => { + render() + + // Default is 80% of max tokens, capped at 8192 + const sliders = screen.getAllByTestId("slider") + expect(sliders[1]).toHaveValue("8000") // 80% of 10000 + }) + + it("should use min thinking tokens of 1024", () => { + render() + + const sliders = screen.getAllByTestId("slider") + expect(sliders[1].getAttribute("min")).toBe("1024") + }) + + it("should update max tokens when slider changes", () => { + const setApiConfigurationField = jest.fn() + + render( + , + ) + + const sliders = screen.getAllByTestId("slider") + fireEvent.change(sliders[0], { target: { value: "12000" } }) + + expect(setApiConfigurationField).toHaveBeenCalledWith("modelMaxTokens", 12000) + }) +}) From 2b3d23ebd750bfaf19efd6fbcc5bbcc8f1cb3aef Mon Sep 17 00:00:00 2001 From: Catalin Lupuleti <105351510+lupuletic@users.noreply.github.com> Date: Thu, 27 Feb 2025 22:17:09 +0000 Subject: [PATCH 102/145] Update src/shared/globalState.ts Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com> --- src/shared/globalState.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/shared/globalState.ts b/src/shared/globalState.ts index 05b868a450c..6e29e038357 100644 --- a/src/shared/globalState.ts +++ b/src/shared/globalState.ts @@ -44,7 +44,6 @@ export type GlobalStateKey = | "lmStudioBaseUrl" | "anthropicBaseUrl" | "anthropicThinking" - | "vertexThinking" | "azureApiVersion" | "openAiStreamingEnabled" | "openRouterModelId" From 87b70cef83bafcf8ea4751de165ba41c35b38ba2 Mon Sep 17 00:00:00 2001 From: Catalin Lupuleti Date: Thu, 27 Feb 2025 22:20:35 +0000 Subject: [PATCH 103/145] Removed unnecessary comment --- src/shared/globalState.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/shared/globalState.ts b/src/shared/globalState.ts index 6e29e038357..05b868a450c 100644 --- a/src/shared/globalState.ts +++ b/src/shared/globalState.ts @@ -44,6 +44,7 @@ export type GlobalStateKey = | "lmStudioBaseUrl" | "anthropicBaseUrl" | "anthropicThinking" + | "vertexThinking" | "azureApiVersion" | "openAiStreamingEnabled" | "openRouterModelId" From dd4fb6b3097430f98345e85d4c563e29baade089 Mon Sep 17 00:00:00 2001 From: Catalin Lupuleti <105351510+lupuletic@users.noreply.github.com> Date: Thu, 27 Feb 2025 22:45:12 +0000 Subject: [PATCH 104/145] Update src/shared/globalState.ts Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com> --- src/shared/globalState.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/shared/globalState.ts b/src/shared/globalState.ts index 05b868a450c..6e29e038357 100644 --- a/src/shared/globalState.ts +++ b/src/shared/globalState.ts @@ -44,7 +44,6 @@ export type GlobalStateKey = | "lmStudioBaseUrl" | "anthropicBaseUrl" | "anthropicThinking" - | "vertexThinking" | "azureApiVersion" | "openAiStreamingEnabled" | "openRouterModelId" From 8cbce2ded08e107454dee7d2eec256973d3e85e0 Mon Sep 17 00:00:00 2001 From: cte Date: Thu, 27 Feb 2025 16:06:47 -0800 Subject: [PATCH 105/145] Add provider-agnostic modelMaxThinkingTokens setting --- src/api/providers/__tests__/vertex.test.ts | 18 +++--------------- src/api/providers/anthropic.ts | 2 +- src/api/providers/openrouter.ts | 2 +- src/api/providers/vertex.ts | 5 +---- src/core/webview/ClineProvider.ts | 15 +++++---------- .../__tests__/checkExistApiConfig.test.ts | 2 +- src/shared/api.ts | 3 +-- src/shared/globalState.ts | 3 +-- .../src/components/settings/ThinkingBudget.tsx | 13 +++++-------- .../settings/__tests__/ApiOptions.test.tsx | 7 +------ .../settings/__tests__/ThinkingBudget.test.tsx | 16 ++++++++-------- 11 files changed, 28 insertions(+), 58 deletions(-) diff --git a/src/api/providers/__tests__/vertex.test.ts b/src/api/providers/__tests__/vertex.test.ts index 076f902ca2b..9cf92f0a16b 100644 --- a/src/api/providers/__tests__/vertex.test.ts +++ b/src/api/providers/__tests__/vertex.test.ts @@ -641,7 +641,7 @@ describe("VertexHandler", () => { vertexProjectId: "test-project", vertexRegion: "us-central1", modelMaxTokens: 16384, - vertexThinking: 4096, + modelMaxThinkingTokens: 4096, }) const modelInfo = thinkingHandler.getModel() @@ -662,7 +662,7 @@ describe("VertexHandler", () => { vertexProjectId: "test-project", vertexRegion: "us-central1", modelMaxTokens: 16384, - vertexThinking: 5000, + modelMaxThinkingTokens: 5000, }) expect((handlerWithBudget.getModel().thinking as any).budget_tokens).toBe(5000) @@ -688,25 +688,13 @@ describe("VertexHandler", () => { expect((handlerWithSmallMaxTokens.getModel().thinking as any).budget_tokens).toBe(1024) }) - it("should use anthropicThinking value if vertexThinking is not provided", () => { - const handler = new VertexHandler({ - apiModelId: "claude-3-7-sonnet@20250219:thinking", - vertexProjectId: "test-project", - vertexRegion: "us-central1", - modelMaxTokens: 16384, - anthropicThinking: 6000, // Should be used as fallback - }) - - expect((handler.getModel().thinking as any).budget_tokens).toBe(6000) - }) - it("should pass thinking configuration to API", async () => { const thinkingHandler = new VertexHandler({ apiModelId: "claude-3-7-sonnet@20250219:thinking", vertexProjectId: "test-project", vertexRegion: "us-central1", modelMaxTokens: 16384, - vertexThinking: 4096, + modelMaxThinkingTokens: 4096, }) const mockCreate = jest.fn().mockImplementation(async (options) => { diff --git a/src/api/providers/anthropic.ts b/src/api/providers/anthropic.ts index eca81eab2e2..fc0b99c59b1 100644 --- a/src/api/providers/anthropic.ts +++ b/src/api/providers/anthropic.ts @@ -206,7 +206,7 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler { // least 1024 tokens. const maxBudgetTokens = Math.floor(maxTokens * 0.8) const budgetTokens = Math.max( - Math.min(this.options.anthropicThinking ?? maxBudgetTokens, maxBudgetTokens), + Math.min(this.options.modelMaxThinkingTokens ?? maxBudgetTokens, maxBudgetTokens), 1024, ) diff --git a/src/api/providers/openrouter.ts b/src/api/providers/openrouter.ts index 69bcb0074c1..82c02e20a7d 100644 --- a/src/api/providers/openrouter.ts +++ b/src/api/providers/openrouter.ts @@ -117,7 +117,7 @@ export class OpenRouterHandler implements ApiHandler, SingleCompletionHandler { // least 1024 tokens. const maxBudgetTokens = Math.floor((maxTokens || 8192) * 0.8) const budgetTokens = Math.max( - Math.min(this.options.anthropicThinking ?? maxBudgetTokens, maxBudgetTokens), + Math.min(this.options.modelMaxThinkingTokens ?? maxBudgetTokens, maxBudgetTokens), 1024, ) diff --git a/src/api/providers/vertex.ts b/src/api/providers/vertex.ts index 69fb7d26f78..a25fad07ee8 100644 --- a/src/api/providers/vertex.ts +++ b/src/api/providers/vertex.ts @@ -300,10 +300,7 @@ export class VertexHandler implements ApiHandler, SingleCompletionHandler { temperature = 1.0 // Thinking requires temperature 1.0 const maxBudgetTokens = Math.floor(maxTokens * 0.8) const budgetTokens = Math.max( - Math.min( - this.options.vertexThinking ?? this.options.anthropicThinking ?? maxBudgetTokens, - maxBudgetTokens, - ), + Math.min(this.options.modelMaxThinkingTokens ?? maxBudgetTokens, maxBudgetTokens), 1024, ) thinking = { type: "enabled", budget_tokens: budgetTokens } diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index 5417e54ff73..7b6f2c89719 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -1651,8 +1651,6 @@ export class ClineProvider implements vscode.WebviewViewProvider { lmStudioModelId, lmStudioBaseUrl, anthropicBaseUrl, - anthropicThinking, - vertexThinking, geminiApiKey, openAiNativeApiKey, deepSeekApiKey, @@ -1673,6 +1671,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { requestyModelInfo, modelTemperature, modelMaxTokens, + modelMaxThinkingTokens, } = apiConfiguration await Promise.all([ this.updateGlobalState("apiProvider", apiProvider), @@ -1701,8 +1700,6 @@ export class ClineProvider implements vscode.WebviewViewProvider { this.updateGlobalState("lmStudioModelId", lmStudioModelId), this.updateGlobalState("lmStudioBaseUrl", lmStudioBaseUrl), this.updateGlobalState("anthropicBaseUrl", anthropicBaseUrl), - this.updateGlobalState("anthropicThinking", anthropicThinking), - this.updateGlobalState("vertexThinking", vertexThinking), this.storeSecret("geminiApiKey", geminiApiKey), this.storeSecret("openAiNativeApiKey", openAiNativeApiKey), this.storeSecret("deepSeekApiKey", deepSeekApiKey), @@ -1723,6 +1720,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { this.updateGlobalState("requestyModelInfo", requestyModelInfo), this.updateGlobalState("modelTemperature", modelTemperature), this.updateGlobalState("modelMaxTokens", modelMaxTokens), + this.updateGlobalState("anthropicThinking", modelMaxThinkingTokens), ]) if (this.cline) { this.cline.api = buildApiHandler(apiConfiguration) @@ -2159,8 +2157,6 @@ export class ClineProvider implements vscode.WebviewViewProvider { lmStudioModelId, lmStudioBaseUrl, anthropicBaseUrl, - anthropicThinking, - vertexThinking, geminiApiKey, openAiNativeApiKey, deepSeekApiKey, @@ -2216,6 +2212,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { requestyModelInfo, modelTemperature, modelMaxTokens, + modelMaxThinkingTokens, maxOpenTabsContext, ] = await Promise.all([ this.getGlobalState("apiProvider") as Promise, @@ -2244,8 +2241,6 @@ export class ClineProvider implements vscode.WebviewViewProvider { this.getGlobalState("lmStudioModelId") as Promise, this.getGlobalState("lmStudioBaseUrl") as Promise, this.getGlobalState("anthropicBaseUrl") as Promise, - this.getGlobalState("anthropicThinking") as Promise, - this.getGlobalState("vertexThinking") as Promise, this.getSecret("geminiApiKey") as Promise, this.getSecret("openAiNativeApiKey") as Promise, this.getSecret("deepSeekApiKey") as Promise, @@ -2301,6 +2296,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { this.getGlobalState("requestyModelInfo") as Promise, this.getGlobalState("modelTemperature") as Promise, this.getGlobalState("modelMaxTokens") as Promise, + this.getGlobalState("anthropicThinking") as Promise, this.getGlobalState("maxOpenTabsContext") as Promise, ]) @@ -2346,8 +2342,6 @@ export class ClineProvider implements vscode.WebviewViewProvider { lmStudioModelId, lmStudioBaseUrl, anthropicBaseUrl, - anthropicThinking, - vertexThinking, geminiApiKey, openAiNativeApiKey, deepSeekApiKey, @@ -2368,6 +2362,7 @@ export class ClineProvider implements vscode.WebviewViewProvider { requestyModelInfo, modelTemperature, modelMaxTokens, + modelMaxThinkingTokens, }, lastShownAnnouncementId, customInstructions, diff --git a/src/shared/__tests__/checkExistApiConfig.test.ts b/src/shared/__tests__/checkExistApiConfig.test.ts index 62517d69584..c99ddddbc45 100644 --- a/src/shared/__tests__/checkExistApiConfig.test.ts +++ b/src/shared/__tests__/checkExistApiConfig.test.ts @@ -32,7 +32,7 @@ describe("checkExistKey", () => { apiKey: "test-key", apiProvider: undefined, anthropicBaseUrl: undefined, - anthropicThinking: undefined, + modelMaxThinkingTokens: undefined, } expect(checkExistKey(config)).toBe(true) }) diff --git a/src/shared/api.ts b/src/shared/api.ts index b36781d630a..f88bb5e8b51 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -21,7 +21,6 @@ export interface ApiHandlerOptions { apiModelId?: string apiKey?: string // anthropic anthropicBaseUrl?: string - anthropicThinking?: number vsCodeLmModelSelector?: vscode.LanguageModelChatSelector glamaModelId?: string glamaModelInfo?: ModelInfo @@ -41,7 +40,6 @@ export interface ApiHandlerOptions { awsUseProfile?: boolean vertexProjectId?: string vertexRegion?: string - vertexThinking?: number openAiBaseUrl?: string openAiApiKey?: string openAiModelId?: string @@ -70,6 +68,7 @@ export interface ApiHandlerOptions { requestyModelInfo?: ModelInfo modelTemperature?: number modelMaxTokens?: number + modelMaxThinkingTokens?: number } export type ApiConfiguration = ApiHandlerOptions & { diff --git a/src/shared/globalState.ts b/src/shared/globalState.ts index 6e29e038357..aabc77cc01b 100644 --- a/src/shared/globalState.ts +++ b/src/shared/globalState.ts @@ -24,7 +24,6 @@ export type GlobalStateKey = | "awsUseProfile" | "vertexProjectId" | "vertexRegion" - | "vertexThinking" | "lastShownAnnouncementId" | "customInstructions" | "alwaysAllowReadOnly" @@ -43,7 +42,6 @@ export type GlobalStateKey = | "lmStudioModelId" | "lmStudioBaseUrl" | "anthropicBaseUrl" - | "anthropicThinking" | "azureApiVersion" | "openAiStreamingEnabled" | "openRouterModelId" @@ -83,5 +81,6 @@ export type GlobalStateKey = | "unboundModelInfo" | "modelTemperature" | "modelMaxTokens" + | "anthropicThinking" // TODO: Rename to `modelMaxThinkingTokens`. | "mistralCodestralUrl" | "maxOpenTabsContext" diff --git a/webview-ui/src/components/settings/ThinkingBudget.tsx b/webview-ui/src/components/settings/ThinkingBudget.tsx index d21e1fb7ead..557a69538d5 100644 --- a/webview-ui/src/components/settings/ThinkingBudget.tsx +++ b/webview-ui/src/components/settings/ThinkingBudget.tsx @@ -17,27 +17,24 @@ export const ThinkingBudget = ({ modelInfo, provider, }: ThinkingBudgetProps) => { - const isVertexProvider = provider === "vertex" - const budgetField = isVertexProvider ? "vertexThinking" : "anthropicThinking" - const tokens = apiConfiguration?.modelMaxTokens || modelInfo?.maxTokens || 64_000 const tokensMin = 8192 const tokensMax = modelInfo?.maxTokens || 64_000 // Get the appropriate thinking tokens based on provider const thinkingTokens = useMemo(() => { - const value = isVertexProvider ? apiConfiguration?.vertexThinking : apiConfiguration?.anthropicThinking + const value = apiConfiguration?.modelMaxThinkingTokens return value || Math.min(Math.floor(0.8 * tokens), 8192) - }, [apiConfiguration, isVertexProvider, tokens]) + }, [apiConfiguration, tokens]) const thinkingTokensMin = 1024 const thinkingTokensMax = Math.floor(0.8 * tokens) useEffect(() => { if (thinkingTokens > thinkingTokensMax) { - setApiConfigurationField(budgetField, thinkingTokensMax) + setApiConfigurationField("modelMaxThinkingTokens", thinkingTokensMax) } - }, [thinkingTokens, thinkingTokensMax, setApiConfigurationField, budgetField]) + }, [thinkingTokens, thinkingTokensMax, setApiConfigurationField]) if (!modelInfo?.thinking) { return null @@ -66,7 +63,7 @@ export const ThinkingBudget = ({ max={thinkingTokensMax} step={1024} value={[thinkingTokens]} - onValueChange={([value]) => setApiConfigurationField(budgetField, value)} + onValueChange={([value]) => setApiConfigurationField("modelMaxThinkingTokens", value)} />
{thinkingTokens}
diff --git a/webview-ui/src/components/settings/__tests__/ApiOptions.test.tsx b/webview-ui/src/components/settings/__tests__/ApiOptions.test.tsx index 65ae1370035..06ed95585ac 100644 --- a/webview-ui/src/components/settings/__tests__/ApiOptions.test.tsx +++ b/webview-ui/src/components/settings/__tests__/ApiOptions.test.tsx @@ -51,12 +51,7 @@ jest.mock("../ThinkingBudget", () => ({ ThinkingBudget: ({ apiConfiguration, setApiConfigurationField, modelInfo, provider }: any) => modelInfo?.thinking ? (
- +
) : null, })) diff --git a/webview-ui/src/components/settings/__tests__/ThinkingBudget.test.tsx b/webview-ui/src/components/settings/__tests__/ThinkingBudget.test.tsx index 54f6b1037b4..212316ea9ac 100644 --- a/webview-ui/src/components/settings/__tests__/ThinkingBudget.test.tsx +++ b/webview-ui/src/components/settings/__tests__/ThinkingBudget.test.tsx @@ -60,13 +60,13 @@ describe("ThinkingBudget", () => { expect(screen.getAllByTestId("slider")).toHaveLength(2) }) - it("should use anthropicThinking field for Anthropic provider", () => { + it("should use modelMaxThinkingTokens field for Anthropic provider", () => { const setApiConfigurationField = jest.fn() render( , @@ -75,16 +75,16 @@ describe("ThinkingBudget", () => { const sliders = screen.getAllByTestId("slider") fireEvent.change(sliders[1], { target: { value: "5000" } }) - expect(setApiConfigurationField).toHaveBeenCalledWith("anthropicThinking", 5000) + expect(setApiConfigurationField).toHaveBeenCalledWith("modelMaxThinkingTokens", 5000) }) - it("should use vertexThinking field for Vertex provider", () => { + it("should use modelMaxThinkingTokens field for Vertex provider", () => { const setApiConfigurationField = jest.fn() render( , @@ -93,7 +93,7 @@ describe("ThinkingBudget", () => { const sliders = screen.getAllByTestId("slider") fireEvent.change(sliders[1], { target: { value: "5000" } }) - expect(setApiConfigurationField).toHaveBeenCalledWith("vertexThinking", 5000) + expect(setApiConfigurationField).toHaveBeenCalledWith("modelMaxThinkingTokens", 5000) }) it("should cap thinking tokens at 80% of max tokens", () => { @@ -102,13 +102,13 @@ describe("ThinkingBudget", () => { render( , ) // Effect should trigger and cap the value - expect(setApiConfigurationField).toHaveBeenCalledWith("anthropicThinking", 8000) // 80% of 10000 + expect(setApiConfigurationField).toHaveBeenCalledWith("modelMaxThinkingTokens", 8000) // 80% of 10000 }) it("should use default thinking tokens if not provided", () => { From 21fed4cb799ff8397d5d1f1348252156dfcbbf71 Mon Sep 17 00:00:00 2001 From: Chris Estreich Date: Thu, 27 Feb 2025 22:26:13 -0800 Subject: [PATCH 106/145] Delete task confirmation enhancements --- .changeset/chilly-bugs-pay.md | 5 ++ webview-ui/src/components/chat/TaskHeader.tsx | 84 ++++++++++++++----- .../components/history/DeleteTaskDialog.tsx | 42 ++++++---- .../src/components/history/HistoryView.tsx | 30 +++---- .../history/__tests__/HistoryView.test.tsx | 60 +++++++++---- 5 files changed, 148 insertions(+), 73 deletions(-) create mode 100644 .changeset/chilly-bugs-pay.md diff --git a/.changeset/chilly-bugs-pay.md b/.changeset/chilly-bugs-pay.md new file mode 100644 index 00000000000..b30f8241ef7 --- /dev/null +++ b/.changeset/chilly-bugs-pay.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Delete task confirmation enhancements diff --git a/webview-ui/src/components/chat/TaskHeader.tsx b/webview-ui/src/components/chat/TaskHeader.tsx index fb7db6f6173..319a9aeccd3 100644 --- a/webview-ui/src/components/chat/TaskHeader.tsx +++ b/webview-ui/src/components/chat/TaskHeader.tsx @@ -3,15 +3,18 @@ import { useWindowSize } from "react-use" import { VSCodeButton } from "@vscode/webview-ui-toolkit/react" import prettyBytes from "pretty-bytes" +import { vscode } from "@/utils/vscode" +import { formatLargeNumber } from "@/utils/format" +import { Button } from "@/components/ui" + import { ClineMessage } from "../../../../src/shared/ExtensionMessage" +import { mentionRegexGlobal } from "../../../../src/shared/context-mentions" +import { HistoryItem } from "../../../../src/shared/HistoryItem" + import { useExtensionState } from "../../context/ExtensionStateContext" -import { vscode } from "../../utils/vscode" import Thumbnails from "../common/Thumbnails" -import { mentionRegexGlobal } from "../../../../src/shared/context-mentions" -import { formatLargeNumber } from "../../utils/format" import { normalizeApiConfiguration } from "../settings/ApiOptions" -import { Button } from "../ui" -import { HistoryItem } from "../../../../src/shared/HistoryItem" +import { DeleteTaskDialog } from "../history/DeleteTaskDialog" interface TaskHeaderProps { task: ClineMessage @@ -46,7 +49,21 @@ const TaskHeader: React.FC = ({ const contextWindow = selectedModelInfo?.contextWindow || 1 /* - When dealing with event listeners in React components that depend on state variables, we face a challenge. We want our listener to always use the most up-to-date version of a callback function that relies on current state, but we don't want to constantly add and remove event listeners as that function updates. This scenario often arises with resize listeners or other window events. Simply adding the listener in a useEffect with an empty dependency array risks using stale state, while including the callback in the dependencies can lead to unnecessary re-registrations of the listener. There are react hook libraries that provide a elegant solution to this problem by utilizing the useRef hook to maintain a reference to the latest callback function without triggering re-renders or effect re-runs. This approach ensures that our event listener always has access to the most current state while minimizing performance overhead and potential memory leaks from multiple listener registrations. + When dealing with event listeners in React components that depend on state + variables, we face a challenge. We want our listener to always use the most + up-to-date version of a callback function that relies on current state, but + we don't want to constantly add and remove event listeners as that function + updates. This scenario often arises with resize listeners or other window + events. Simply adding the listener in a useEffect with an empty dependency + array risks using stale state, while including the callback in the + dependencies can lead to unnecessary re-registrations of the listener. There + are react hook libraries that provide a elegant solution to this problem by + utilizing the useRef hook to maintain a reference to the latest callback + function without triggering re-renders or effect re-runs. This approach + ensures that our event listener always has access to the most current state + while minimizing performance overhead and potential memory leaks from + multiple listener registrations. + Sources - https://usehooks-ts.com/react-hook/use-event-listener - https://streamich.github.io/react-use/?path=/story/sensors-useevent--docs @@ -350,27 +367,48 @@ export const highlightMentions = (text?: string, withShadow = true) => { }) } -const TaskActions = ({ item }: { item: HistoryItem | undefined }) => ( -
- - {!!item?.size && item.size > 0 && ( +const TaskActions = ({ item }: { item: HistoryItem | undefined }) => { + const [deleteTaskId, setDeleteTaskId] = useState(null) + + return ( +
- )} -
-) + {!!item?.size && item.size > 0 && ( + <> + + {deleteTaskId && ( + !open && setDeleteTaskId(null)} + open + /> + )} + + )} +
+ ) +} const ContextWindowProgress = ({ contextWindow, contextTokens }: { contextWindow: number; contextTokens: number }) => ( <> diff --git a/webview-ui/src/components/history/DeleteTaskDialog.tsx b/webview-ui/src/components/history/DeleteTaskDialog.tsx index b40adeae3de..31d85abd370 100644 --- a/webview-ui/src/components/history/DeleteTaskDialog.tsx +++ b/webview-ui/src/components/history/DeleteTaskDialog.tsx @@ -1,4 +1,7 @@ -import React from "react" +import { useCallback, useEffect } from "react" +import { useKeyPress } from "react-use" +import { AlertDialogProps } from "@radix-ui/react-alert-dialog" + import { AlertDialog, AlertDialogAction, @@ -8,25 +11,36 @@ import { AlertDialogFooter, AlertDialogHeader, AlertDialogTitle, -} from "@/components/ui/alert-dialog" -import { Button } from "@/components/ui" + Button, +} from "@/components/ui" + import { vscode } from "@/utils/vscode" -interface DeleteTaskDialogProps { +interface DeleteTaskDialogProps extends AlertDialogProps { taskId: string - open: boolean - onOpenChange: (open: boolean) => void } -export const DeleteTaskDialog = ({ taskId, open, onOpenChange }: DeleteTaskDialogProps) => { - const handleDelete = () => { - vscode.postMessage({ type: "deleteTaskWithId", text: taskId }) - onOpenChange(false) - } +export const DeleteTaskDialog = ({ taskId, ...props }: DeleteTaskDialogProps) => { + const [isEnterPressed] = useKeyPress("Enter") + + const { onOpenChange } = props + + const onDelete = useCallback(() => { + if (taskId) { + vscode.postMessage({ type: "deleteTaskWithId", text: taskId }) + onOpenChange?.(false) + } + }, [taskId, onOpenChange]) + + useEffect(() => { + if (taskId && isEnterPressed) { + onDelete() + } + }, [taskId, isEnterPressed, onDelete]) return ( - - + + onOpenChange?.(false)}> Delete Task @@ -38,7 +52,7 @@ export const DeleteTaskDialog = ({ taskId, open, onOpenChange }: DeleteTaskDialo - diff --git a/webview-ui/src/components/history/HistoryView.tsx b/webview-ui/src/components/history/HistoryView.tsx index ca60e1fcb89..49d71e5ddd9 100644 --- a/webview-ui/src/components/history/HistoryView.tsx +++ b/webview-ui/src/components/history/HistoryView.tsx @@ -38,13 +38,7 @@ const HistoryView = ({ onDone }: HistoryViewProps) => { vscode.postMessage({ type: "showTaskWithId", text: id }) } - const [deleteDialogOpen, setDeleteDialogOpen] = useState(false) - const [taskToDelete, setTaskToDelete] = useState(null) - - const handleDeleteHistoryItem = (id: string) => { - setTaskToDelete(id) - setDeleteDialogOpen(true) - } + const [deleteTaskId, setDeleteTaskId] = useState(null) const formatDate = (timestamp: number) => { const date = new Date(timestamp) @@ -230,10 +224,15 @@ const HistoryView = ({ onDone }: HistoryViewProps) => {
- {taskToDelete && ( - { - setDeleteDialogOpen(open) - if (!open) { - setTaskToDelete(null) - } - }} - /> + {deleteTaskId && ( + !open && setDeleteTaskId(null)} open /> )}
) diff --git a/webview-ui/src/components/history/__tests__/HistoryView.test.tsx b/webview-ui/src/components/history/__tests__/HistoryView.test.tsx index 12b0181af6b..4b761d6fc4b 100644 --- a/webview-ui/src/components/history/__tests__/HistoryView.test.tsx +++ b/webview-ui/src/components/history/__tests__/HistoryView.test.tsx @@ -135,26 +135,54 @@ describe("HistoryView", () => { }) }) - it("handles task deletion", async () => { - const onDone = jest.fn() - render() + describe("task deletion", () => { + it("shows confirmation dialog on regular click", () => { + const onDone = jest.fn() + render() + + // Find and hover over first task + const taskContainer = screen.getByTestId("virtuoso-item-1") + fireEvent.mouseEnter(taskContainer) + + // Click delete button to open confirmation dialog + const deleteButton = within(taskContainer).getByTitle("Delete Task (Shift + Click to skip confirmation)") + fireEvent.click(deleteButton) + + // Verify dialog is shown + const dialog = screen.getByRole("alertdialog") + expect(dialog).toBeInTheDocument() + + // Find and click the confirm delete button in the dialog + const confirmDeleteButton = within(dialog).getByRole("button", { name: /delete/i }) + fireEvent.click(confirmDeleteButton) + + // Verify vscode message was sent + expect(vscode.postMessage).toHaveBeenCalledWith({ + type: "deleteTaskWithId", + text: "1", + }) + }) - // Find and hover over first task - const taskContainer = screen.getByTestId("virtuoso-item-1") - fireEvent.mouseEnter(taskContainer) + it("deletes immediately on shift-click without confirmation", () => { + const onDone = jest.fn() + render() - // Click delete button to open confirmation dialog - const deleteButton = within(taskContainer).getByTitle("Delete Task") - fireEvent.click(deleteButton) + // Find and hover over first task + const taskContainer = screen.getByTestId("virtuoso-item-1") + fireEvent.mouseEnter(taskContainer) - // Find and click the confirm delete button in the dialog - const confirmDeleteButton = screen.getByRole("button", { name: /delete/i }) - fireEvent.click(confirmDeleteButton) + // Shift-click delete button + const deleteButton = within(taskContainer).getByTitle("Delete Task (Shift + Click to skip confirmation)") + fireEvent.click(deleteButton, { shiftKey: true }) - // Verify vscode message was sent - expect(vscode.postMessage).toHaveBeenCalledWith({ - type: "deleteTaskWithId", - text: "1", + // Verify no dialog is shown + expect(screen.queryByRole("alertdialog")).not.toBeInTheDocument() + + // Verify vscode message was sent + expect(vscode.postMessage).toHaveBeenCalledWith({ + type: "deleteTaskWithId", + text: "1", + }) }) }) From b3fd1a2e232f059b9ba6d480795445624ff8206c Mon Sep 17 00:00:00 2001 From: Chris Estreich Date: Fri, 28 Feb 2025 09:08:58 -0800 Subject: [PATCH 107/145] Prettier thinking blocks --- webview-ui/src/components/chat/ChatRow.tsx | 59 +++------ .../src/components/chat/ReasoningBlock.tsx | 117 +++++++++++------- webview-ui/src/index.css | 2 + 3 files changed, 89 insertions(+), 89 deletions(-) diff --git a/webview-ui/src/components/chat/ChatRow.tsx b/webview-ui/src/components/chat/ChatRow.tsx index 4017ccf318e..1533bba3a8f 100644 --- a/webview-ui/src/components/chat/ChatRow.tsx +++ b/webview-ui/src/components/chat/ChatRow.tsx @@ -16,7 +16,7 @@ import { vscode } from "../../utils/vscode" import CodeAccordian, { removeLeadingNonAlphanumeric } from "../common/CodeAccordian" import CodeBlock, { CODE_BLOCK_BG_COLOR } from "../common/CodeBlock" import MarkdownBlock from "../common/MarkdownBlock" -import ReasoningBlock from "./ReasoningBlock" +import { ReasoningBlock } from "./ReasoningBlock" import Thumbnails from "../common/Thumbnails" import McpResourceRow from "../mcp/McpResourceRow" import McpToolRow from "../mcp/McpToolRow" @@ -25,12 +25,12 @@ import { CheckpointSaved } from "./checkpoints/CheckpointSaved" interface ChatRowProps { message: ClineMessage - isExpanded: boolean - onToggleExpand: () => void lastModifiedMessage?: ClineMessage + isExpanded: boolean isLast: boolean - onHeightChange: (isTaller: boolean) => void isStreaming: boolean + onToggleExpand: () => void + onHeightChange: (isTaller: boolean) => void } interface ChatRowContentProps extends Omit {} @@ -43,10 +43,7 @@ const ChatRow = memo( const prevHeightRef = useRef(0) const [chatrow, { height }] = useSize( -
+
, ) @@ -75,33 +72,32 @@ export default ChatRow export const ChatRowContent = ({ message, - isExpanded, - onToggleExpand, lastModifiedMessage, + isExpanded, isLast, isStreaming, + onToggleExpand, }: ChatRowContentProps) => { const { mcpServers, alwaysAllowMcp, currentCheckpoint } = useExtensionState() - const [reasoningCollapsed, setReasoningCollapsed] = useState(false) + const [reasoningCollapsed, setReasoningCollapsed] = useState(true) - // Auto-collapse reasoning when new messages arrive - useEffect(() => { - if (!isLast && message.say === "reasoning") { - setReasoningCollapsed(true) - } - }, [isLast, message.say]) const [cost, apiReqCancelReason, apiReqStreamingFailedMessage] = useMemo(() => { if (message.text !== null && message.text !== undefined && message.say === "api_req_started") { const info: ClineApiReqInfo = JSON.parse(message.text) return [info.cost, info.cancelReason, info.streamingFailedMessage] } + return [undefined, undefined, undefined] }, [message.text, message.say]) - // when resuming task, last wont be api_req_failed but a resume_task message, so api_req_started will show loading spinner. that's why we just remove the last api_req_started that failed without streaming anything + + // When resuming task, last wont be api_req_failed but a resume_task + // message, so api_req_started will show loading spinner. That's why we just + // remove the last api_req_started that failed without streaming anything. const apiRequestFailedMessage = isLast && lastModifiedMessage?.ask === "api_req_failed" // if request is retried then the latest message is a api_req_retried ? lastModifiedMessage?.text : undefined + const isCommandExecuting = isLast && lastModifiedMessage?.ask === "command" && lastModifiedMessage?.text?.includes(COMMAND_OUTPUT_STRING) @@ -428,32 +424,6 @@ export const ChatRowContent = ({ /> ) - // case "inspectSite": - // const isInspecting = - // isLast && lastModifiedMessage?.say === "inspect_site_result" && !lastModifiedMessage?.images - // return ( - // <> - //
- // {isInspecting ? : toolIcon("inspect")} - // - // {message.type === "ask" ? ( - // <>Roo wants to inspect this website: - // ) : ( - // <>Roo is inspecting this website: - // )} - // - //
- //
- // - //
- // - // ) case "switchMode": return ( <> @@ -501,6 +471,7 @@ export const ChatRowContent = ({ return ( setReasoningCollapsed(!reasoningCollapsed)} /> diff --git a/webview-ui/src/components/chat/ReasoningBlock.tsx b/webview-ui/src/components/chat/ReasoningBlock.tsx index 0c9971f2690..fa128990924 100644 --- a/webview-ui/src/components/chat/ReasoningBlock.tsx +++ b/webview-ui/src/components/chat/ReasoningBlock.tsx @@ -1,70 +1,97 @@ -import React, { useEffect, useRef } from "react" -import { CODE_BLOCK_BG_COLOR } from "../common/CodeBlock" +import { useCallback, useEffect, useRef, useState } from "react" +import { CaretDownIcon, CaretUpIcon, CounterClockwiseClockIcon } from "@radix-ui/react-icons" + import MarkdownBlock from "../common/MarkdownBlock" +import { useMount } from "react-use" interface ReasoningBlockProps { content: string + elapsed?: number isCollapsed?: boolean onToggleCollapse?: () => void - autoHeight?: boolean } -const ReasoningBlock: React.FC = ({ - content, - isCollapsed = false, - onToggleCollapse, - autoHeight = false, -}) => { +export const ReasoningBlock = ({ content, elapsed, isCollapsed = false, onToggleCollapse }: ReasoningBlockProps) => { const contentRef = useRef(null) + const elapsedRef = useRef(0) + const [thought, setThought] = useState() + const [prevThought, setPrevThought] = useState("Thinking") + const [isTransitioning, setIsTransitioning] = useState(false) + const cursorRef = useRef(0) + const queueRef = useRef([]) - // Scroll to bottom when content updates useEffect(() => { if (contentRef.current && !isCollapsed) { contentRef.current.scrollTop = contentRef.current.scrollHeight } }, [content, isCollapsed]) + useEffect(() => { + if (elapsed) { + elapsedRef.current = elapsed + } + }, [elapsed]) + + // Process the transition queue. + const processNextTransition = useCallback(() => { + const nextThought = queueRef.current.pop() + queueRef.current = [] + + if (nextThought) { + setIsTransitioning(true) + } + + setTimeout(() => { + if (nextThought) { + setPrevThought(nextThought) + setIsTransitioning(false) + } + + setTimeout(() => processNextTransition(), 500) + }, 200) + }, []) + + useMount(() => { + processNextTransition() + }) + + useEffect(() => { + if (content.length - cursorRef.current > 160) { + setThought("... " + content.slice(cursorRef.current)) + cursorRef.current = content.length + } + }, [content]) + + useEffect(() => { + if (thought && thought !== prevThought) { + queueRef.current.push(thought) + } + }, [thought, prevThought]) + return ( -
+
- Reasoning - + className="flex items-center justify-between gap-1 px-3 py-2 cursor-pointer text-muted-foreground" + onClick={onToggleCollapse}> +
+ {prevThought} +
+
+ {elapsedRef.current > 1000 && ( + <> + +
{Math.round(elapsedRef.current / 1000)}s
+ + )} + {isCollapsed ? : } +
{!isCollapsed && ( -
-
- -
+
+
)}
) } - -export default ReasoningBlock diff --git a/webview-ui/src/index.css b/webview-ui/src/index.css index 53025be01a6..74c8463b372 100644 --- a/webview-ui/src/index.css +++ b/webview-ui/src/index.css @@ -64,6 +64,8 @@ --color-vscode-editor-foreground: var(--vscode-editor-foreground); --color-vscode-editor-background: var(--vscode-editor-background); + --color-vscode-editorGroup-border: var(--vscode-editorGroup-border); + --color-vscode-button-foreground: var(--vscode-button-foreground); --color-vscode-button-background: var(--vscode-button-background); --color-vscode-button-secondaryForeground: var(--vscode-button-secondaryForeground); From 360e47d641e9acbb33586342a079efaf150ef85f Mon Sep 17 00:00:00 2001 From: Chris Estreich Date: Fri, 28 Feb 2025 09:09:42 -0800 Subject: [PATCH 108/145] Add changeset --- .changeset/young-hornets-taste.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/young-hornets-taste.md diff --git a/.changeset/young-hornets-taste.md b/.changeset/young-hornets-taste.md new file mode 100644 index 00000000000..1b9c3d94e80 --- /dev/null +++ b/.changeset/young-hornets-taste.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Prettier thinking blocks From 9b30065231061dfbcae9d8ef6e14082fe9064757 Mon Sep 17 00:00:00 2001 From: Chris Estreich Date: Fri, 28 Feb 2025 08:25:04 -0800 Subject: [PATCH 109/145] Make the copy action on history and history preview consistent --- .../src/components/history/CopyButton.tsx | 32 +++++ .../src/components/history/ExportButton.tsx | 16 +++ .../src/components/history/HistoryPreview.tsx | 124 ++++-------------- .../src/components/history/HistoryView.tsx | 51 +------ webview-ui/src/index.css | 2 + webview-ui/src/utils/__tests__/format.test.ts | 51 +++++++ webview-ui/src/utils/format.ts | 15 +++ 7 files changed, 147 insertions(+), 144 deletions(-) create mode 100644 webview-ui/src/components/history/CopyButton.tsx create mode 100644 webview-ui/src/components/history/ExportButton.tsx create mode 100644 webview-ui/src/utils/__tests__/format.test.ts diff --git a/webview-ui/src/components/history/CopyButton.tsx b/webview-ui/src/components/history/CopyButton.tsx new file mode 100644 index 00000000000..0e693b44703 --- /dev/null +++ b/webview-ui/src/components/history/CopyButton.tsx @@ -0,0 +1,32 @@ +import { useCallback } from "react" + +import { useClipboard } from "@/components/ui/hooks" +import { Button } from "@/components/ui" +import { cn } from "@/lib/utils" + +type CopyButtonProps = { + itemTask: string +} + +export const CopyButton = ({ itemTask }: CopyButtonProps) => { + const { isCopied, copy } = useClipboard() + + const onCopy = useCallback( + (e: React.MouseEvent) => { + e.stopPropagation() + !isCopied && copy(itemTask) + }, + [isCopied, copy, itemTask], + ) + + return ( + + ) +} diff --git a/webview-ui/src/components/history/ExportButton.tsx b/webview-ui/src/components/history/ExportButton.tsx new file mode 100644 index 00000000000..6617e475bdd --- /dev/null +++ b/webview-ui/src/components/history/ExportButton.tsx @@ -0,0 +1,16 @@ +import { vscode } from "@/utils/vscode" +import { Button } from "@/components/ui" + +export const ExportButton = ({ itemId }: { itemId: string }) => ( + +) diff --git a/webview-ui/src/components/history/HistoryPreview.tsx b/webview-ui/src/components/history/HistoryPreview.tsx index b2898fc6a8d..bf53845da7e 100644 --- a/webview-ui/src/components/history/HistoryPreview.tsx +++ b/webview-ui/src/components/history/HistoryPreview.tsx @@ -1,9 +1,11 @@ -import { VSCodeButton } from "@vscode/webview-ui-toolkit/react" -import { useExtensionState } from "../../context/ExtensionStateContext" -import { vscode } from "../../utils/vscode" import { memo } from "react" -import { formatLargeNumber } from "../../utils/format" -import { useCopyToClipboard } from "../../utils/clipboard" + +import { vscode } from "@/utils/vscode" +import { formatLargeNumber, formatDate } from "@/utils/format" +import { Button } from "@/components/ui" + +import { useExtensionState } from "../../context/ExtensionStateContext" +import { CopyButton } from "./CopyButton" type HistoryPreviewProps = { showHistoryView: () => void @@ -11,52 +13,15 @@ type HistoryPreviewProps = { const HistoryPreview = ({ showHistoryView }: HistoryPreviewProps) => { const { taskHistory } = useExtensionState() - const { showCopyFeedback, copyWithFeedback } = useCopyToClipboard() + const handleHistorySelect = (id: string) => { vscode.postMessage({ type: "showTaskWithId", text: id }) } - const formatDate = (timestamp: number) => { - const date = new Date(timestamp) - return date - ?.toLocaleString("en-US", { - month: "long", - day: "numeric", - hour: "numeric", - minute: "2-digit", - hour12: true, - }) - .replace(", ", " ") - .replace(" at", ",") - .toUpperCase() - } - return (
- {showCopyFeedback &&
Prompt Copied to Clipboard
} -
{ display: "flex", alignItems: "center", }}> - - - Recent Tasks - + + Recent Tasks
- -
+
{taskHistory .filter((item) => item.ts && item.task) .slice(0, 3) @@ -103,48 +57,25 @@ const HistoryPreview = ({ showHistoryView }: HistoryPreviewProps) => { key={item.id} className="history-preview-item" onClick={() => handleHistorySelect(item.id)}> -
-
- +
+
+ {formatDate(item.ts)} - +
{item.task}
-
+
Tokens: ↑{formatLargeNumber(item.tokensIn || 0)} ↓ {formatLargeNumber(item.tokensOut || 0)} @@ -168,21 +99,14 @@ const HistoryPreview = ({ showHistoryView }: HistoryPreviewProps) => {
))} -
- +
diff --git a/webview-ui/src/components/history/HistoryView.tsx b/webview-ui/src/components/history/HistoryView.tsx index 49d71e5ddd9..d50a569c8d9 100644 --- a/webview-ui/src/components/history/HistoryView.tsx +++ b/webview-ui/src/components/history/HistoryView.tsx @@ -5,12 +5,14 @@ import prettyBytes from "pretty-bytes" import { Virtuoso } from "react-virtuoso" import { VSCodeButton, VSCodeTextField, VSCodeRadioGroup, VSCodeRadio } from "@vscode/webview-ui-toolkit/react" +import { vscode } from "@/utils/vscode" +import { formatLargeNumber, formatDate } from "@/utils/format" +import { highlightFzfMatch } from "@/utils/highlight" +import { Button } from "@/components/ui" + import { useExtensionState } from "../../context/ExtensionStateContext" -import { vscode } from "../../utils/vscode" -import { formatLargeNumber } from "../../utils/format" -import { highlightFzfMatch } from "../../utils/highlight" -import { useCopyToClipboard } from "../../utils/clipboard" -import { Button } from "../ui" +import { ExportButton } from "./ExportButton" +import { CopyButton } from "./CopyButton" type HistoryViewProps = { onDone: () => void @@ -40,21 +42,6 @@ const HistoryView = ({ onDone }: HistoryViewProps) => { const [deleteTaskId, setDeleteTaskId] = useState(null) - const formatDate = (timestamp: number) => { - const date = new Date(timestamp) - return date - ?.toLocaleString("en-US", { - month: "long", - day: "numeric", - hour: "numeric", - minute: "2-digit", - hour12: true, - }) - .replace(", ", " ") - .replace(" at", ",") - .toUpperCase() - } - const presentableTasks = useMemo(() => { return taskHistory.filter((item) => item.ts && item.task) }, [taskHistory]) @@ -409,28 +396,4 @@ const HistoryView = ({ onDone }: HistoryViewProps) => { ) } -const CopyButton = ({ itemTask }: { itemTask: string }) => { - const { showCopyFeedback, copyWithFeedback } = useCopyToClipboard() - - return ( - - ) -} - -const ExportButton = ({ itemId }: { itemId: string }) => ( - -) - export default memo(HistoryView) diff --git a/webview-ui/src/index.css b/webview-ui/src/index.css index 53025be01a6..0e80d1b0c62 100644 --- a/webview-ui/src/index.css +++ b/webview-ui/src/index.css @@ -23,6 +23,8 @@ @theme { --font-display: var(--vscode-font-family); + + --text-xs: calc(var(--vscode-font-size) * 0.85); --text-sm: calc(var(--vscode-font-size) * 0.9); --text-base: var(--vscode-font-size); --text-lg: calc(var(--vscode-font-size) * 1.1); diff --git a/webview-ui/src/utils/__tests__/format.test.ts b/webview-ui/src/utils/__tests__/format.test.ts new file mode 100644 index 00000000000..7377874fd01 --- /dev/null +++ b/webview-ui/src/utils/__tests__/format.test.ts @@ -0,0 +1,51 @@ +// npx jest src/utils/__tests__/format.test.ts + +import { formatDate } from "../format" + +describe("formatDate", () => { + it("formats a timestamp correctly", () => { + // January 15, 2023, 10:30 AM + const timestamp = new Date(2023, 0, 15, 10, 30).getTime() + const result = formatDate(timestamp) + + expect(result).toBe("JANUARY 15, 10:30 AM") + }) + + it("handles different months correctly", () => { + // February 28, 2023, 3:45 PM + const timestamp1 = new Date(2023, 1, 28, 15, 45).getTime() + expect(formatDate(timestamp1)).toBe("FEBRUARY 28, 3:45 PM") + + // December 31, 2023, 11:59 PM + const timestamp2 = new Date(2023, 11, 31, 23, 59).getTime() + expect(formatDate(timestamp2)).toBe("DECEMBER 31, 11:59 PM") + }) + + it("handles AM/PM correctly", () => { + // Morning time - 7:05 AM + const morningTimestamp = new Date(2023, 5, 15, 7, 5).getTime() + expect(formatDate(morningTimestamp)).toBe("JUNE 15, 7:05 AM") + + // Noon - 12:00 PM + const noonTimestamp = new Date(2023, 5, 15, 12, 0).getTime() + expect(formatDate(noonTimestamp)).toBe("JUNE 15, 12:00 PM") + + // Evening time - 8:15 PM + const eveningTimestamp = new Date(2023, 5, 15, 20, 15).getTime() + expect(formatDate(eveningTimestamp)).toBe("JUNE 15, 8:15 PM") + }) + + it("handles single-digit minutes with leading zeros", () => { + // 9:05 AM + const timestamp = new Date(2023, 3, 10, 9, 5).getTime() + expect(formatDate(timestamp)).toBe("APRIL 10, 9:05 AM") + }) + + it("converts the result to uppercase", () => { + const timestamp = new Date(2023, 8, 21, 16, 45).getTime() + const result = formatDate(timestamp) + + expect(result).toBe(result.toUpperCase()) + expect(result).toBe("SEPTEMBER 21, 4:45 PM") + }) +}) diff --git a/webview-ui/src/utils/format.ts b/webview-ui/src/utils/format.ts index 2e473c9b8ac..12e99962051 100644 --- a/webview-ui/src/utils/format.ts +++ b/webview-ui/src/utils/format.ts @@ -10,3 +10,18 @@ export function formatLargeNumber(num: number): string { } return num.toString() } + +export const formatDate = (timestamp: number) => { + const date = new Date(timestamp) + return date + .toLocaleString("en-US", { + month: "long", + day: "numeric", + hour: "numeric", + minute: "2-digit", + hour12: true, + }) + .replace(", ", " ") + .replace(" at", ",") + .toUpperCase() +} From 31d2d17847abdb30cf1f7430aa5ef63df9f3eea9 Mon Sep 17 00:00:00 2001 From: Chris Estreich Date: Fri, 28 Feb 2025 01:15:44 -0800 Subject: [PATCH 110/145] Fix maxTokens defaults for Claude 3.7 Sonnet models --- .changeset/tasty-grapes-suffer.md | 5 ++++ src/api/providers/openrouter.ts | 2 +- src/shared/api.ts | 6 ++--- .../src/components/settings/ApiOptions.tsx | 2 -- .../components/settings/ThinkingBudget.tsx | 12 +++------ .../settings/__tests__/ApiOptions.test.tsx | 2 -- .../__tests__/ThinkingBudget.test.tsx | 26 +++---------------- 7 files changed, 15 insertions(+), 40 deletions(-) create mode 100644 .changeset/tasty-grapes-suffer.md diff --git a/.changeset/tasty-grapes-suffer.md b/.changeset/tasty-grapes-suffer.md new file mode 100644 index 00000000000..7382b38c77f --- /dev/null +++ b/.changeset/tasty-grapes-suffer.md @@ -0,0 +1,5 @@ +--- +"roo-cline": patch +--- + +Fix maxTokens defaults for Claude 3.7 Sonnet models diff --git a/src/api/providers/openrouter.ts b/src/api/providers/openrouter.ts index 82c02e20a7d..70c40c2c279 100644 --- a/src/api/providers/openrouter.ts +++ b/src/api/providers/openrouter.ts @@ -278,7 +278,7 @@ export async function getOpenRouterModels() { modelInfo.supportsPromptCache = true modelInfo.cacheWritesPrice = 3.75 modelInfo.cacheReadsPrice = 0.3 - modelInfo.maxTokens = 64_000 + modelInfo.maxTokens = rawModel.id === "anthropic/claude-3.7-sonnet:thinking" ? 64_000 : 16_384 break case rawModel.id.startsWith("anthropic/claude-3.5-sonnet-20240620"): modelInfo.supportsPromptCache = true diff --git a/src/shared/api.ts b/src/shared/api.ts index f88bb5e8b51..99e2986e882 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -111,7 +111,7 @@ export const anthropicModels = { thinking: true, }, "claude-3-7-sonnet-20250219": { - maxTokens: 64_000, + maxTokens: 16_384, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, @@ -437,7 +437,7 @@ export type VertexModelId = keyof typeof vertexModels export const vertexDefaultModelId: VertexModelId = "claude-3-7-sonnet@20250219" export const vertexModels = { "claude-3-7-sonnet@20250219:thinking": { - maxTokens: 64000, + maxTokens: 64_000, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, @@ -449,7 +449,7 @@ export const vertexModels = { thinking: true, }, "claude-3-7-sonnet@20250219": { - maxTokens: 8192, + maxTokens: 16_384, contextWindow: 200_000, supportsImages: true, supportsComputerUse: true, diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 42ac5cdcb30..6f0dba9f00e 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -7,7 +7,6 @@ import * as vscodemodels from "vscode" import { ApiConfiguration, ModelInfo, - ApiProvider, anthropicDefaultModelId, anthropicModels, azureOpenAiDefaultApiVersion, @@ -1385,7 +1384,6 @@ const ApiOptions = ({ apiConfiguration={apiConfiguration} setApiConfigurationField={setApiConfigurationField} modelInfo={selectedModelInfo} - provider={selectedProvider as ApiProvider} /> (field: K, value: ApiConfiguration[K]) => void modelInfo?: ModelInfo - provider?: ApiProvider } -export const ThinkingBudget = ({ - apiConfiguration, - setApiConfigurationField, - modelInfo, - provider, -}: ThinkingBudgetProps) => { - const tokens = apiConfiguration?.modelMaxTokens || modelInfo?.maxTokens || 64_000 +export const ThinkingBudget = ({ apiConfiguration, setApiConfigurationField, modelInfo }: ThinkingBudgetProps) => { + const tokens = apiConfiguration?.modelMaxTokens || 16_384 const tokensMin = 8192 const tokensMax = modelInfo?.maxTokens || 64_000 diff --git a/webview-ui/src/components/settings/__tests__/ApiOptions.test.tsx b/webview-ui/src/components/settings/__tests__/ApiOptions.test.tsx index 06ed95585ac..0b1fb284987 100644 --- a/webview-ui/src/components/settings/__tests__/ApiOptions.test.tsx +++ b/webview-ui/src/components/settings/__tests__/ApiOptions.test.tsx @@ -92,7 +92,6 @@ describe("ApiOptions", () => { }) expect(screen.getByTestId("thinking-budget")).toBeInTheDocument() - expect(screen.getByTestId("thinking-budget")).toHaveAttribute("data-provider", "anthropic") }) it("should show ThinkingBudget for Vertex models that support thinking", () => { @@ -104,7 +103,6 @@ describe("ApiOptions", () => { }) expect(screen.getByTestId("thinking-budget")).toBeInTheDocument() - expect(screen.getByTestId("thinking-budget")).toHaveAttribute("data-provider", "vertex") }) it("should not show ThinkingBudget for models that don't support thinking", () => { diff --git a/webview-ui/src/components/settings/__tests__/ThinkingBudget.test.tsx b/webview-ui/src/components/settings/__tests__/ThinkingBudget.test.tsx index 212316ea9ac..1e14e945249 100644 --- a/webview-ui/src/components/settings/__tests__/ThinkingBudget.test.tsx +++ b/webview-ui/src/components/settings/__tests__/ThinkingBudget.test.tsx @@ -1,7 +1,6 @@ -import React from "react" import { render, screen, fireEvent } from "@testing-library/react" import { ThinkingBudget } from "../ThinkingBudget" -import { ApiProvider, ModelInfo } from "../../../../../src/shared/api" +import { ModelInfo } from "../../../../../src/shared/api" // Mock Slider component jest.mock("@/components/ui", () => ({ @@ -25,11 +24,11 @@ describe("ThinkingBudget", () => { supportsPromptCache: true, supportsImages: true, } + const defaultProps = { apiConfiguration: {}, setApiConfigurationField: jest.fn(), modelInfo: mockModelInfo, - provider: "anthropic" as ApiProvider, } beforeEach(() => { @@ -60,25 +59,7 @@ describe("ThinkingBudget", () => { expect(screen.getAllByTestId("slider")).toHaveLength(2) }) - it("should use modelMaxThinkingTokens field for Anthropic provider", () => { - const setApiConfigurationField = jest.fn() - - render( - , - ) - - const sliders = screen.getAllByTestId("slider") - fireEvent.change(sliders[1], { target: { value: "5000" } }) - - expect(setApiConfigurationField).toHaveBeenCalledWith("modelMaxThinkingTokens", 5000) - }) - - it("should use modelMaxThinkingTokens field for Vertex provider", () => { + it("should update modelMaxThinkingTokens", () => { const setApiConfigurationField = jest.fn() render( @@ -86,7 +67,6 @@ describe("ThinkingBudget", () => { {...defaultProps} apiConfiguration={{ modelMaxThinkingTokens: 4096 }} setApiConfigurationField={setApiConfigurationField} - provider="vertex" />, ) From 0a81853d84e43ca92718166db6d07ea984889a39 Mon Sep 17 00:00:00 2001 From: yansheng3 Date: Fri, 28 Feb 2025 19:06:43 +0800 Subject: [PATCH 111/145] Optimize Task ID Initialization Order Signed-off-by: yansheng3 --- src/core/Cline.ts | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/core/Cline.ts b/src/core/Cline.ts index 00897eecf4a..3127d3d38bf 100644 --- a/src/core/Cline.ts +++ b/src/core/Cline.ts @@ -148,7 +148,8 @@ export class Cline { throw new Error("Either historyItem or task/images must be provided") } - this.taskId = crypto.randomUUID() + this.taskId = historyItem ? historyItem.id : crypto.randomUUID() + this.apiConfiguration = apiConfiguration this.api = buildApiHandler(apiConfiguration) this.terminalManager = new TerminalManager() @@ -161,10 +162,6 @@ export class Cline { this.diffViewProvider = new DiffViewProvider(cwd) this.enableCheckpoints = enableCheckpoints ?? false - if (historyItem) { - this.taskId = historyItem.id - } - // Initialize diffStrategy based on current state this.updateDiffStrategy(Experiments.isEnabled(experiments ?? {}, EXPERIMENT_IDS.DIFF_STRATEGY)) From 2773d391535db0f03dedc1df13af809b3f23c500 Mon Sep 17 00:00:00 2001 From: samir-nimbly <112695483+samir-nimbly@users.noreply.github.com> Date: Fri, 28 Feb 2025 20:05:31 +0530 Subject: [PATCH 112/145] fix: dropdown hover color fix for white themes --- webview-ui/src/components/chat/ContextMenu.tsx | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/webview-ui/src/components/chat/ContextMenu.tsx b/webview-ui/src/components/chat/ContextMenu.tsx index 2bb7a8ee68f..20bd5222f6d 100644 --- a/webview-ui/src/components/chat/ContextMenu.tsx +++ b/webview-ui/src/components/chat/ContextMenu.tsx @@ -187,10 +187,12 @@ const ContextMenu: React.FC = ({ display: "flex", alignItems: "center", justifyContent: "space-between", - backgroundColor: - index === selectedIndex && isOptionSelectable(option) - ? "var(--vscode-list-activeSelectionBackground)" - : "", + ...(index === selectedIndex && isOptionSelectable(option) + ? { + backgroundColor: "var(--vscode-list-activeSelectionBackground)", + color: "var(--vscode-list-activeSelectionForeground)", + } + : {}), }} onMouseEnter={() => isOptionSelectable(option) && setSelectedIndex(index)}>
Date: Fri, 28 Feb 2025 10:30:37 -0500 Subject: [PATCH 113/145] Update pull_request_template.md --- .github/pull_request_template.md | 42 +++++++++++++++----------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 7ee8bb98ad5..85b5c9063c5 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,37 +1,35 @@ - +## Context -## Description + -## Type of change +## Implementation - + - +## Screenshots -## Checklist: +| before | after | +| ------ | ----- | +| | | - +## How to Test -- [ ] My code follows the patterns of this project -- [ ] I have performed a self-review of my own code -- [ ] I have commented my code, particularly in hard-to-understand areas -- [ ] I have made corresponding changes to the documentation + +A "How To Test" section can look something like this: -## Related Issues +- Sign in with a user with tracks +- Activate `show_awesome_cat_gifs` feature (add `?feature.show_awesome_cat_gifs=1` to your URL) +- You should see a GIF with cats dancing - +--> -## Reviewers +## Get in Touch - + From 87785bb7504710ddda21bb3693090e6a546a77ff Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Fri, 28 Feb 2025 10:35:38 -0500 Subject: [PATCH 114/145] Update pull_request_template.md --- .github/pull_request_template.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 85b5c9063c5..de7e461cb9c 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -32,4 +32,4 @@ A "How To Test" section can look something like this: ## Get in Touch - + From 4b66ce8255b594a94903b3859a29ad25244c3f3f Mon Sep 17 00:00:00 2001 From: Chris Estreich Date: Fri, 28 Feb 2025 09:38:48 -0800 Subject: [PATCH 115/145] Improve terminal command execution output parsing --- src/core/Cline.ts | 28 +- src/integrations/terminal/TerminalManager.ts | 193 +++++++- src/integrations/terminal/TerminalProcess.ts | 421 ++++++++++++------ src/integrations/terminal/TerminalRegistry.ts | 30 ++ .../__tests__/TerminalProcess.test.ts | 157 +++---- .../__tests__/TerminalRegistry.test.ts | 5 +- src/shared/combineCommandSequences.ts | 2 +- 7 files changed, 585 insertions(+), 251 deletions(-) diff --git a/src/core/Cline.ts b/src/core/Cline.ts index 00897eecf4a..fab181b86e4 100644 --- a/src/core/Cline.ts +++ b/src/core/Cline.ts @@ -22,7 +22,7 @@ import { everyLineHasLineNumbers, truncateOutput, } from "../integrations/misc/extract-text" -import { TerminalManager } from "../integrations/terminal/TerminalManager" +import { TerminalManager, ExitCodeDetails } from "../integrations/terminal/TerminalManager" import { UrlContentFetcher } from "../services/browser/UrlContentFetcher" import { listFiles } from "../services/glob/list-files" import { regexSearchFiles } from "../services/ripgrep" @@ -834,10 +834,21 @@ export class Cline { }) let completed = false - process.once("completed", () => { + let exitDetails: ExitCodeDetails | undefined + process.once("completed", (output?: string) => { + // Use provided output if available, otherwise keep existing result. + if (output) { + lines = output.split("\n") + } completed = true }) + process.once("shell_execution_complete", (id: number, details: ExitCodeDetails) => { + if (id === terminalInfo.id) { + exitDetails = details + } + }) + process.once("no_shell_integration", async () => { await this.say("shell_integration_warning") }) @@ -869,7 +880,18 @@ export class Cline { } if (completed) { - return [false, `Command executed.${result.length > 0 ? `\nOutput:\n${result}` : ""}`] + let exitStatus = "No exit code available" + if (exitDetails !== undefined) { + if (exitDetails.signal) { + exitStatus = `Process terminated by signal ${exitDetails.signal} (${exitDetails.signalName})` + if (exitDetails.coreDumpPossible) { + exitStatus += " - core dump possible" + } + } else { + exitStatus = `Exit code: ${exitDetails.exitCode}` + } + } + return [false, `Command executed. ${exitStatus}${result.length > 0 ? `\nOutput:\n${result}` : ""}`] } else { return [ false, diff --git a/src/integrations/terminal/TerminalManager.ts b/src/integrations/terminal/TerminalManager.ts index d5496e20fb9..a55f7867d40 100644 --- a/src/integrations/terminal/TerminalManager.ts +++ b/src/integrations/terminal/TerminalManager.ts @@ -70,6 +70,15 @@ Interestingly, some environments like Cursor enable these APIs even without the This approach allows us to leverage advanced features when available while ensuring broad compatibility. */ declare module "vscode" { + // https://github.com/microsoft/vscode/blob/f0417069c62e20f3667506f4b7e53ca0004b4e3e/src/vscode-dts/vscode.d.ts#L7442 + // interface Terminal { + // shellIntegration?: { + // cwd?: vscode.Uri + // executeCommand?: (command: string) => { + // read: () => AsyncIterable + // } + // } + // } // https://github.com/microsoft/vscode/blob/f0417069c62e20f3667506f4b7e53ca0004b4e3e/src/vscode-dts/vscode.d.ts#L10794 interface Window { onDidStartTerminalShellExecution?: ( @@ -77,17 +86,19 @@ declare module "vscode" { thisArgs?: any, disposables?: vscode.Disposable[], ) => vscode.Disposable + onDidEndTerminalShellExecution?: ( + listener: (e: { terminal: vscode.Terminal; exitCode?: number; shellType?: string }) => any, + thisArgs?: any, + disposables?: vscode.Disposable[], + ) => vscode.Disposable } } -// Extend the Terminal type to include our custom properties -type ExtendedTerminal = vscode.Terminal & { - shellIntegration?: { - cwd?: vscode.Uri - executeCommand?: (command: string) => { - read: () => AsyncIterable - } - } +export interface ExitCodeDetails { + exitCode: number | undefined + signal?: number | undefined + signalName?: string + coreDumpPossible?: boolean } export class TerminalManager { @@ -95,18 +106,156 @@ export class TerminalManager { private processes: Map = new Map() private disposables: vscode.Disposable[] = [] + private interpretExitCode(exitCode: number | undefined): ExitCodeDetails { + if (exitCode === undefined) { + return { exitCode } + } + + if (exitCode <= 128) { + return { exitCode } + } + + const signal = exitCode - 128 + const signals: Record = { + // Standard signals + 1: "SIGHUP", + 2: "SIGINT", + 3: "SIGQUIT", + 4: "SIGILL", + 5: "SIGTRAP", + 6: "SIGABRT", + 7: "SIGBUS", + 8: "SIGFPE", + 9: "SIGKILL", + 10: "SIGUSR1", + 11: "SIGSEGV", + 12: "SIGUSR2", + 13: "SIGPIPE", + 14: "SIGALRM", + 15: "SIGTERM", + 16: "SIGSTKFLT", + 17: "SIGCHLD", + 18: "SIGCONT", + 19: "SIGSTOP", + 20: "SIGTSTP", + 21: "SIGTTIN", + 22: "SIGTTOU", + 23: "SIGURG", + 24: "SIGXCPU", + 25: "SIGXFSZ", + 26: "SIGVTALRM", + 27: "SIGPROF", + 28: "SIGWINCH", + 29: "SIGIO", + 30: "SIGPWR", + 31: "SIGSYS", + + // Real-time signals base + 34: "SIGRTMIN", + + // SIGRTMIN+n signals + 35: "SIGRTMIN+1", + 36: "SIGRTMIN+2", + 37: "SIGRTMIN+3", + 38: "SIGRTMIN+4", + 39: "SIGRTMIN+5", + 40: "SIGRTMIN+6", + 41: "SIGRTMIN+7", + 42: "SIGRTMIN+8", + 43: "SIGRTMIN+9", + 44: "SIGRTMIN+10", + 45: "SIGRTMIN+11", + 46: "SIGRTMIN+12", + 47: "SIGRTMIN+13", + 48: "SIGRTMIN+14", + 49: "SIGRTMIN+15", + + // SIGRTMAX-n signals + 50: "SIGRTMAX-14", + 51: "SIGRTMAX-13", + 52: "SIGRTMAX-12", + 53: "SIGRTMAX-11", + 54: "SIGRTMAX-10", + 55: "SIGRTMAX-9", + 56: "SIGRTMAX-8", + 57: "SIGRTMAX-7", + 58: "SIGRTMAX-6", + 59: "SIGRTMAX-5", + 60: "SIGRTMAX-4", + 61: "SIGRTMAX-3", + 62: "SIGRTMAX-2", + 63: "SIGRTMAX-1", + 64: "SIGRTMAX", + } + + // These signals may produce core dumps: + // SIGQUIT, SIGILL, SIGABRT, SIGBUS, SIGFPE, SIGSEGV + const coreDumpPossible = new Set([3, 4, 6, 7, 8, 11]) + + return { + exitCode, + signal, + signalName: signals[signal] || `Unknown Signal (${signal})`, + coreDumpPossible: coreDumpPossible.has(signal), + } + } + constructor() { - let disposable: vscode.Disposable | undefined + let startDisposable: vscode.Disposable | undefined + let endDisposable: vscode.Disposable | undefined try { - disposable = (vscode.window as vscode.Window).onDidStartTerminalShellExecution?.(async (e) => { - // Creating a read stream here results in a more consistent output. This is most obvious when running the `date` command. - e?.execution?.read() + // onDidStartTerminalShellExecution + startDisposable = (vscode.window as vscode.Window).onDidStartTerminalShellExecution?.(async (e) => { + // Get a handle to the stream as early as possible: + const stream = e?.execution.read() + const terminalInfo = TerminalRegistry.getTerminalInfoByTerminal(e.terminal) + if (stream && terminalInfo) { + const process = this.processes.get(terminalInfo.id) + if (process) { + terminalInfo.stream = stream + terminalInfo.running = true + terminalInfo.streamClosed = false + process.emit("stream_available", terminalInfo.id, stream) + } + } else { + console.error("[TerminalManager] Stream failed, not registered for terminal") + } + + console.info("[TerminalManager] Shell execution started:", { + hasExecution: !!e?.execution, + command: e?.execution?.commandLine?.value, + terminalId: terminalInfo?.id, + }) + }) + + // onDidEndTerminalShellExecution + endDisposable = (vscode.window as vscode.Window).onDidEndTerminalShellExecution?.(async (e) => { + const exitDetails = this.interpretExitCode(e?.exitCode) + console.info("[TerminalManager] Shell execution ended:", { + ...exitDetails, + }) + + // Signal completion to any waiting processes + for (const id of this.terminalIds) { + const info = TerminalRegistry.getTerminal(id) + if (info && info.terminal === e.terminal) { + info.running = false + const process = this.processes.get(id) + if (process) { + process.emit("shell_execution_complete", id, exitDetails) + } + break + } + } }) } catch (error) { - // console.error("Error setting up onDidEndTerminalShellExecution", error) + console.error("[TerminalManager] Error setting up shell execution handlers:", error) + } + if (startDisposable) { + this.disposables.push(startDisposable) } - if (disposable) { - this.disposables.push(disposable) + if (endDisposable) { + this.disposables.push(endDisposable) } } @@ -140,19 +289,16 @@ export class TerminalManager { }) // if shell integration is already active, run the command immediately - const terminal = terminalInfo.terminal as ExtendedTerminal - if (terminal.shellIntegration) { + if (terminalInfo.terminal.shellIntegration) { process.waitForShellIntegration = false - process.run(terminal, command) + process.run(terminalInfo.terminal, command) } else { // docs recommend waiting 3s for shell integration to activate - pWaitFor(() => (terminalInfo.terminal as ExtendedTerminal).shellIntegration !== undefined, { - timeout: 4000, - }).finally(() => { + pWaitFor(() => terminalInfo.terminal.shellIntegration !== undefined, { timeout: 4000 }).finally(() => { const existingProcess = this.processes.get(terminalInfo.id) if (existingProcess && existingProcess.waitForShellIntegration) { existingProcess.waitForShellIntegration = false - existingProcess.run(terminal, command) + existingProcess.run(terminalInfo.terminal, command) } }) } @@ -168,8 +314,7 @@ export class TerminalManager { if (t.busy) { return false } - const terminal = t.terminal as ExtendedTerminal - const terminalCwd = terminal.shellIntegration?.cwd // one of cline's commands could have changed the cwd of the terminal + const terminalCwd = t.terminal.shellIntegration?.cwd // one of cline's commands could have changed the cwd of the terminal if (!terminalCwd) { return false } diff --git a/src/integrations/terminal/TerminalProcess.ts b/src/integrations/terminal/TerminalProcess.ts index 5597350db3c..99ef215e784 100644 --- a/src/integrations/terminal/TerminalProcess.ts +++ b/src/integrations/terminal/TerminalProcess.ts @@ -1,13 +1,24 @@ import { EventEmitter } from "events" import stripAnsi from "strip-ansi" import * as vscode from "vscode" +import { inspect } from "util" + +import { ExitCodeDetails } from "./TerminalManager" +import { TerminalInfo, TerminalRegistry } from "./TerminalRegistry" export interface TerminalProcessEvents { line: [line: string] continue: [] - completed: [] + completed: [output?: string] error: [error: Error] no_shell_integration: [] + /** + * Emitted when a shell execution completes + * @param id The terminal ID + * @param exitDetails Contains exit code and signal information if process was terminated by signal + */ + shell_execution_complete: [id: number, exitDetails: ExitCodeDetails] + stream_available: [id: number, stream: AsyncIterable] } // how long to wait after a process outputs anything before we consider it "cool" again @@ -17,104 +28,99 @@ const PROCESS_HOT_TIMEOUT_COMPILING = 15_000 export class TerminalProcess extends EventEmitter { waitForShellIntegration: boolean = true private isListening: boolean = true - private buffer: string = "" + private terminalInfo: TerminalInfo | undefined + private lastEmitTime_ms: number = 0 private fullOutput: string = "" private lastRetrievedIndex: number = 0 isHot: boolean = false private hotTimer: NodeJS.Timeout | null = null - // constructor() { - // super() - async run(terminal: vscode.Terminal, command: string) { if (terminal.shellIntegration && terminal.shellIntegration.executeCommand) { - const execution = terminal.shellIntegration.executeCommand(command) - const stream = execution.read() - // todo: need to handle errors - let isFirstChunk = true - let didOutputNonCommand = false - let didEmitEmptyLine = false - for await (let data of stream) { - // 1. Process chunk and remove artifacts - if (isFirstChunk) { - /* - The first chunk we get from this stream needs to be processed to be more human readable, ie remove vscode's custom escape sequences and identifiers, removing duplicate first char bug, etc. - */ - - // bug where sometimes the command output makes its way into vscode shell integration metadata - /* - ]633 is a custom sequence number used by VSCode shell integration: - - OSC 633 ; A ST - Mark prompt start - - OSC 633 ; B ST - Mark prompt end - - OSC 633 ; C ST - Mark pre-execution (start of command output) - - OSC 633 ; D [; ] ST - Mark execution finished with optional exit code - - OSC 633 ; E ; [; ] ST - Explicitly set command line with optional nonce - */ - // if you print this data you might see something like "eecho hello worldo hello world;5ba85d14-e92a-40c4-b2fd-71525581eeb0]633;C" but this is actually just a bunch of escape sequences, ignore up to the first ;C - /* ddateb15026-6a64-40db-b21f-2a621a9830f0]633;CTue Sep 17 06:37:04 EDT 2024 % ]633;D;0]633;P;Cwd=/Users/saoud/Repositories/test */ - // Gets output between ]633;C (command start) and ]633;D (command end) - const outputBetweenSequences = this.removeLastLineArtifacts( - data.match(/\]633;C([\s\S]*?)\]633;D/)?.[1] || "", - ).trim() - - // Once we've retrieved any potential output between sequences, we can remove everything up to end of the last sequence - // https://code.visualstudio.com/docs/terminal/shell-integration#_vs-code-custom-sequences-osc-633-st - const vscodeSequenceRegex = /\x1b\]633;.[^\x07]*\x07/g - const lastMatch = [...data.matchAll(vscodeSequenceRegex)].pop() - if (lastMatch && lastMatch.index !== undefined) { - data = data.slice(lastMatch.index + lastMatch[0].length) - } - // Place output back after removing vscode sequences - if (outputBetweenSequences) { - data = outputBetweenSequences + "\n" + data - } - // remove ansi - data = stripAnsi(data) - // Split data by newlines - let lines = data ? data.split("\n") : [] - // Remove non-human readable characters from the first line - if (lines.length > 0) { - lines[0] = lines[0].replace(/[^\x20-\x7E]/g, "") - } - // Check if first two characters are the same, if so remove the first character - if (lines.length > 0 && lines[0].length >= 2 && lines[0][0] === lines[0][1]) { - lines[0] = lines[0].slice(1) - } - // Remove everything up to the first alphanumeric character for first two lines - if (lines.length > 0) { - lines[0] = lines[0].replace(/^[^a-zA-Z0-9]*/, "") + // Get terminal info to access stream + const terminalInfo = TerminalRegistry.getTerminalInfoByTerminal(terminal) + if (!terminalInfo) { + console.error("[TerminalProcess] Terminal not found in registry") + this.emit("no_shell_integration") + this.emit("completed") + this.emit("continue") + return + } + + // When executeCommand() is called, onDidStartTerminalShellExecution will fire in TerminalManager + // which creates a new stream via execution.read() and emits 'stream_available' + const streamAvailable = new Promise>((resolve) => { + this.once("stream_available", (id: number, stream: AsyncIterable) => { + if (id === terminalInfo.id) { + resolve(stream) } - if (lines.length > 1) { - lines[1] = lines[1].replace(/^[^a-zA-Z0-9]*/, "") + }) + }) + + // Create promise that resolves when shell execution completes for this terminal + const shellExecutionComplete = new Promise((resolve) => { + this.once("shell_execution_complete", (id: number, exitDetails: ExitCodeDetails) => { + if (id === terminalInfo.id) { + resolve(exitDetails) } - // Join lines back - data = lines.join("\n") - isFirstChunk = false - } else { - data = stripAnsi(data) - } + }) + }) + + // getUnretrievedOutput needs to know if streamClosed, so store this for later + this.terminalInfo = terminalInfo + + // Execute command + terminal.shellIntegration.executeCommand(command) + this.isHot = true + + // Wait for stream to be available + const stream = await streamAvailable - // first few chunks could be the command being echoed back, so we must ignore - // note this means that 'echo' commands wont work - if (!didOutputNonCommand) { - const lines = data.split("\n") - for (let i = 0; i < lines.length; i++) { - if (command.includes(lines[i].trim())) { - lines.splice(i, 1) - i-- // Adjust index after removal - } else { - didOutputNonCommand = true - break - } + let preOutput = "" + let commandOutputStarted = false + + /* + * Extract clean output from raw accumulated output. FYI: + * ]633 is a custom sequence number used by VSCode shell integration: + * - OSC 633 ; A ST - Mark prompt start + * - OSC 633 ; B ST - Mark prompt end + * - OSC 633 ; C ST - Mark pre-execution (start of command output) + * - OSC 633 ; D [; ] ST - Mark execution finished with optional exit code + * - OSC 633 ; E ; [; ] ST - Explicitly set command line with optional nonce + */ + + // Process stream data + for await (let data of stream) { + // Check for command output start marker + if (!commandOutputStarted) { + preOutput += data + const match = this.matchAfterVsceStartMarkers(data) + if (match !== undefined) { + commandOutputStarted = true + data = match + this.fullOutput = "" // Reset fullOutput when command actually starts + } else { + continue } - data = lines.join("\n") } - // FIXME: right now it seems that data chunks returned to us from the shell integration stream contains random commas, which from what I can tell is not the expected behavior. There has to be a better solution here than just removing all commas. - data = data.replace(/,/g, "") + // Command output started, accumulate data without filtering. + // notice to future programmers: do not add escape sequence + // filtering here: fullOutput cannot change in length (see getUnretrievedOutput), + // and chunks may not be complete so you cannot rely on detecting or removing escape sequences mid-stream. + this.fullOutput += data - // 2. Set isHot depending on the command - // Set to hot to stall API requests until terminal is cool again + // For non-immediately returning commands we want to show loading spinner + // right away but this wouldnt happen until it emits a line break, so + // as soon as we get any output we emit to let webview know to show spinner + const now = Date.now() + if (this.isListening && (now - this.lastEmitTime_ms > 100 || this.lastEmitTime_ms === 0)) { + this.emitRemainingBufferIfListening() + this.lastEmitTime_ms = now + } + + // 2. Set isHot depending on the command. + // This stalls API requests until terminal is cool again. this.isHot = true if (this.hotTimer) { clearTimeout(this.hotTimer) @@ -144,21 +150,37 @@ export class TerminalProcess extends EventEmitter { }, isCompiling ? PROCESS_HOT_TIMEOUT_COMPILING : PROCESS_HOT_TIMEOUT_NORMAL, ) + } - // For non-immediately returning commands we want to show loading spinner right away but this wouldnt happen until it emits a line break, so as soon as we get any output we emit "" to let webview know to show spinner - if (!didEmitEmptyLine && !this.fullOutput && data) { - this.emit("line", "") // empty line to indicate start of command output stream - didEmitEmptyLine = true - } + // Set streamClosed immediately after stream ends + if (this.terminalInfo) { + this.terminalInfo.streamClosed = true + } - this.fullOutput += data - if (this.isListening) { - this.emitIfEol(data) - this.lastRetrievedIndex = this.fullOutput.length - this.buffer.length - } + // Wait for shell execution to complete and handle exit details + const exitDetails = await shellExecutionComplete + this.isHot = false + + if (commandOutputStarted) { + // Emit any remaining output before completing + this.emitRemainingBufferIfListening() + } else { + console.error( + "[Terminal Process] VSCE output start escape sequence (]633;C or ]133;C) not received! VSCE Bug? preOutput: " + + inspect(preOutput, { colors: false, breakLength: Infinity }), + ) + } + + // console.debug("[Terminal Process] raw output: " + inspect(output, { colors: false, breakLength: Infinity })) + + // fullOutput begins after C marker so we only need to trim off D marker + // (if D exists, see VSCode bug# 237208): + const match = this.matchBeforeVsceEndMarkers(this.fullOutput) + if (match !== undefined) { + this.fullOutput = match } - this.emitRemainingBufferIfListening() + // console.debug(`[Terminal Process] processed output via ${matchSource}: ` + inspect(output, { colors: false, breakLength: Infinity })) // for now we don't want this delaying requests since we don't send diagnostics automatically anymore (previous: "even though the command is finished, we still want to consider it 'hot' in case so that api request stalls to let diagnostics catch up") if (this.hotTimer) { @@ -166,7 +188,7 @@ export class TerminalProcess extends EventEmitter { } this.isHot = false - this.emit("completed") + this.emit("completed", this.removeEscapeSequences(this.fullOutput)) this.emit("continue") } else { terminal.sendText(command, true) @@ -182,29 +204,12 @@ export class TerminalProcess extends EventEmitter { } } - // Inspired by https://github.com/sindresorhus/execa/blob/main/lib/transform/split.js - private emitIfEol(chunk: string) { - this.buffer += chunk - let lineEndIndex: number - while ((lineEndIndex = this.buffer.indexOf("\n")) !== -1) { - let line = this.buffer.slice(0, lineEndIndex).trimEnd() // removes trailing \r - // Remove \r if present (for Windows-style line endings) - // if (line.endsWith("\r")) { - // line = line.slice(0, -1) - // } - this.emit("line", line) - this.buffer = this.buffer.slice(lineEndIndex + 1) - } - } - private emitRemainingBufferIfListening() { - if (this.buffer && this.isListening) { - const remainingBuffer = this.removeLastLineArtifacts(this.buffer) - if (remainingBuffer) { + if (this.isListening) { + const remainingBuffer = this.getUnretrievedOutput() + if (remainingBuffer !== "") { this.emit("line", remainingBuffer) } - this.buffer = "" - this.lastRetrievedIndex = this.fullOutput.length } } @@ -215,22 +220,180 @@ export class TerminalProcess extends EventEmitter { this.emit("continue") } + // Returns complete lines with their carriage returns. + // The final line may lack a carriage return if the program didn't send one. getUnretrievedOutput(): string { - const unretrieved = this.fullOutput.slice(this.lastRetrievedIndex) - this.lastRetrievedIndex = this.fullOutput.length - return this.removeLastLineArtifacts(unretrieved) + // Get raw unretrieved output + let outputToProcess = this.fullOutput.slice(this.lastRetrievedIndex) + + // Check for VSCE command end markers + const index633 = outputToProcess.indexOf("\x1b]633;D") + const index133 = outputToProcess.indexOf("\x1b]133;D") + let endIndex = -1 + + if (index633 !== -1 && index133 !== -1) { + endIndex = Math.min(index633, index133) + } else if (index633 !== -1) { + endIndex = index633 + } else if (index133 !== -1) { + endIndex = index133 + } + + // If no end markers were found yet (possibly due to VSCode bug#237208): + // For active streams: return only complete lines (up to last \n). + // For closed streams: return all remaining content. + if (endIndex === -1) { + if (!this.terminalInfo?.streamClosed) { + // Stream still running - only process complete lines + endIndex = outputToProcess.lastIndexOf("\n") + if (endIndex === -1) { + // No complete lines + return "" + } + + // Include carriage return + endIndex++ + } else { + // Stream closed - process all remaining output + endIndex = outputToProcess.length + } + } + + // Update index and slice output + this.lastRetrievedIndex += endIndex + outputToProcess = outputToProcess.slice(0, endIndex) + + // Clean and return output + return this.removeEscapeSequences(outputToProcess) } - // some processing to remove artifacts like '%' at the end of the buffer (it seems that since vsode uses % at the beginning of newlines in terminal, it makes its way into the stream) - // This modification will remove '%', '$', '#', or '>' followed by optional whitespace - removeLastLineArtifacts(output: string) { - const lines = output.trimEnd().split("\n") - if (lines.length > 0) { - const lastLine = lines[lines.length - 1] - // Remove prompt characters and trailing whitespace from the last line - lines[lines.length - 1] = lastLine.replace(/[%$#>]\s*$/, "") + private stringIndexMatch( + data: string, + prefix?: string, + suffix?: string, + bell: string = "\x07", + ): string | undefined { + let startIndex: number + let endIndex: number + let prefixLength: number + + if (prefix === undefined) { + startIndex = 0 + prefixLength = 0 + } else { + startIndex = data.indexOf(prefix) + if (startIndex === -1) { + return undefined + } + if (bell.length > 0) { + // Find the bell character after the prefix + const bellIndex = data.indexOf(bell, startIndex + prefix.length) + if (bellIndex === -1) { + return undefined + } + + const distanceToBell = bellIndex - startIndex + + prefixLength = distanceToBell + bell.length + } else { + prefixLength = prefix.length + } } - return lines.join("\n").trimEnd() + + const contentStart = startIndex + prefixLength + + if (suffix === undefined) { + // When suffix is undefined, match to end + endIndex = data.length + } else { + endIndex = data.indexOf(suffix, contentStart) + if (endIndex === -1) { + return undefined + } + } + + return data.slice(contentStart, endIndex) + } + + // Removes ANSI escape sequences and VSCode-specific terminal control codes from output. + // While stripAnsi handles most ANSI codes, VSCode's shell integration adds custom + // escape sequences (OSC 633) that need special handling. These sequences control + // terminal features like marking command start/end and setting prompts. + // + // This method could be extended to handle other escape sequences, but any additions + // should be carefully considered to ensure they only remove control codes and don't + // alter the actual content or behavior of the output stream. + private removeEscapeSequences(str: string): string { + return stripAnsi(str.replace(/\x1b\]633;[^\x07]+\x07/gs, "").replace(/\x1b\]133;[^\x07]+\x07/gs, "")) + } + + /** + * Helper function to match VSCode shell integration start markers (C). + * Looks for content after ]633;C or ]133;C markers. + * If both exist, takes the content after the last marker found. + */ + private matchAfterVsceStartMarkers(data: string): string | undefined { + return this.matchVsceMarkers(data, "\x1b]633;C", "\x1b]133;C", undefined, undefined) + } + + /** + * Helper function to match VSCode shell integration end markers (D). + * Looks for content before ]633;D or ]133;D markers. + * If both exist, takes the content before the first marker found. + */ + private matchBeforeVsceEndMarkers(data: string): string | undefined { + return this.matchVsceMarkers(data, undefined, undefined, "\x1b]633;D", "\x1b]133;D") + } + + /** + * Handles VSCode shell integration markers for command output: + * + * For C (Command Start): + * - Looks for content after ]633;C or ]133;C markers + * - These markers indicate the start of command output + * - If both exist, takes the content after the last marker found + * - This ensures we get the actual command output after any shell integration prefixes + * + * For D (Command End): + * - Looks for content before ]633;D or ]133;D markers + * - These markers indicate command completion + * - If both exist, takes the content before the first marker found + * - This ensures we don't include shell integration suffixes in the output + * + * In both cases, checks 633 first since it's more commonly used in VSCode shell integration + * + * @param data The string to search for markers in + * @param prefix633 The 633 marker to match after (for C markers) + * @param prefix133 The 133 marker to match after (for C markers) + * @param suffix633 The 633 marker to match before (for D markers) + * @param suffix133 The 133 marker to match before (for D markers) + * @returns The content between/after markers, or undefined if no markers found + * + * Note: Always makes exactly 2 calls to stringIndexMatch regardless of match results. + * Using string indexOf matching is ~500x faster than regular expressions, so even + * matching twice is still very efficient comparatively. + */ + private matchVsceMarkers( + data: string, + prefix633: string | undefined, + prefix133: string | undefined, + suffix633: string | undefined, + suffix133: string | undefined, + ): string | undefined { + // Support both VSCode shell integration markers (633 and 133) + // Check 633 first since it's more commonly used in VSCode shell integration + let match133: string | undefined + const match633 = this.stringIndexMatch(data, prefix633, suffix633) + + // Must check explicitly for undefined because stringIndexMatch can return empty strings + // that are valid matches (e.g., when a marker exists but has no content between markers) + if (match633 !== undefined) { + match133 = this.stringIndexMatch(match633, prefix133, suffix133) + } else { + match133 = this.stringIndexMatch(data, prefix133, suffix133) + } + + return match133 !== undefined ? match133 : match633 } } diff --git a/src/integrations/terminal/TerminalRegistry.ts b/src/integrations/terminal/TerminalRegistry.ts index 2fb49e48257..69a21d94fde 100644 --- a/src/integrations/terminal/TerminalRegistry.ts +++ b/src/integrations/terminal/TerminalRegistry.ts @@ -5,6 +5,9 @@ export interface TerminalInfo { busy: boolean lastCommand: string id: number + stream?: AsyncIterable + running: boolean + streamClosed: boolean } // Although vscode.window.terminals provides a list of all open terminals, there's no way to know whether they're busy or not (exitStatus does not provide useful information for most commands). In order to prevent creating too many terminals, we need to keep track of terminals through the life of the extension, as well as session specific terminals for the life of a task (to get latest unretrieved output). @@ -20,34 +23,61 @@ export class TerminalRegistry { iconPath: new vscode.ThemeIcon("rocket"), env: { PAGER: "cat", + + // VSCode bug#237208: Command output can be lost due to a race between completion + // sequences and consumers. Add 50ms delay via PROMPT_COMMAND to ensure the + // \x1b]633;D escape sequence arrives after command output is processed. + PROMPT_COMMAND: "sleep 0.050", + + // VTE must be disabled because it prevents the prompt command above from executing + // See https://wiki.gnome.org/Apps/Terminal/VTE + VTE_VERSION: "0", }, }) + const newInfo: TerminalInfo = { terminal, busy: false, lastCommand: "", id: this.nextTerminalId++, + running: false, + streamClosed: false, } + this.terminals.push(newInfo) return newInfo } static getTerminal(id: number): TerminalInfo | undefined { const terminalInfo = this.terminals.find((t) => t.id === id) + if (terminalInfo && this.isTerminalClosed(terminalInfo.terminal)) { this.removeTerminal(id) return undefined } + return terminalInfo } static updateTerminal(id: number, updates: Partial) { const terminal = this.getTerminal(id) + if (terminal) { Object.assign(terminal, updates) } } + static getTerminalInfoByTerminal(terminal: vscode.Terminal): TerminalInfo | undefined { + const terminalInfo = this.terminals.find((t) => t.terminal === terminal) + + if (terminalInfo && this.isTerminalClosed(terminalInfo.terminal)) { + this.removeTerminal(terminalInfo.id) + return undefined + } + + return terminalInfo + } + static removeTerminal(id: number) { this.terminals = this.terminals.filter((t) => t.id !== id) } diff --git a/src/integrations/terminal/__tests__/TerminalProcess.test.ts b/src/integrations/terminal/__tests__/TerminalProcess.test.ts index 9ccbaef920e..11c0339f27e 100644 --- a/src/integrations/terminal/__tests__/TerminalProcess.test.ts +++ b/src/integrations/terminal/__tests__/TerminalProcess.test.ts @@ -1,9 +1,24 @@ -import { TerminalProcess, mergePromise } from "../TerminalProcess" +// npx jest src/integrations/terminal/__tests__/TerminalProcess.test.ts + import * as vscode from "vscode" -import { EventEmitter } from "events" -// Mock vscode -jest.mock("vscode") +import { TerminalProcess, mergePromise } from "../TerminalProcess" +import { TerminalInfo, TerminalRegistry } from "../TerminalRegistry" + +// Mock vscode.window.createTerminal +const mockCreateTerminal = jest.fn() + +jest.mock("vscode", () => ({ + window: { + createTerminal: (...args: any[]) => { + mockCreateTerminal(...args) + return { + exitStatus: undefined, + } + }, + }, + ThemeIcon: jest.fn(), +})) describe("TerminalProcess", () => { let terminalProcess: TerminalProcess @@ -14,6 +29,7 @@ describe("TerminalProcess", () => { } } > + let mockTerminalInfo: TerminalInfo let mockExecution: any let mockStream: AsyncIterableIterator @@ -25,7 +41,7 @@ describe("TerminalProcess", () => { shellIntegration: { executeCommand: jest.fn(), }, - name: "Mock Terminal", + name: "Roo Code", processId: Promise.resolve(123), creationOptions: {}, exitStatus: undefined, @@ -42,27 +58,39 @@ describe("TerminalProcess", () => { } > + mockTerminalInfo = { + terminal: mockTerminal, + busy: false, + lastCommand: "", + id: 1, + running: false, + streamClosed: false, + } + + TerminalRegistry["terminals"].push(mockTerminalInfo) + // Reset event listeners terminalProcess.removeAllListeners() }) describe("run", () => { it("handles shell integration commands correctly", async () => { - const lines: string[] = [] - terminalProcess.on("line", (line) => { - // Skip empty lines used for loading spinner - if (line !== "") { - lines.push(line) + let lines: string[] = [] + + terminalProcess.on("completed", (output) => { + if (output) { + lines = output.split("\n") } }) - // Mock stream data with shell integration sequences + // Mock stream data with shell integration sequences. mockStream = (async function* () { - // The first chunk contains the command start sequence + yield "\x1b]633;C\x07" // The first chunk contains the command start sequence with bell character. yield "Initial output\n" yield "More output\n" - // The last chunk contains the command end sequence yield "Final output" + yield "\x1b]633;D\x07" // The last chunk contains the command end sequence with bell character. + terminalProcess.emit("shell_execution_complete", mockTerminalInfo.id, { exitCode: 0 }) })() mockExecution = { @@ -71,12 +99,9 @@ describe("TerminalProcess", () => { mockTerminal.shellIntegration.executeCommand.mockReturnValue(mockExecution) - const completedPromise = new Promise((resolve) => { - terminalProcess.once("completed", resolve) - }) - - await terminalProcess.run(mockTerminal, "test command") - await completedPromise + const runPromise = terminalProcess.run(mockTerminal, "test command") + terminalProcess.emit("stream_available", mockTerminalInfo.id, mockStream) + await runPromise expect(lines).toEqual(["Initial output", "More output", "Final output"]) expect(terminalProcess.isHot).toBe(false) @@ -99,95 +124,41 @@ describe("TerminalProcess", () => { }) it("sets hot state for compiling commands", async () => { - const lines: string[] = [] - terminalProcess.on("line", (line) => { - if (line !== "") { - lines.push(line) + let lines: string[] = [] + + terminalProcess.on("completed", (output) => { + if (output) { + lines = output.split("\n") } }) - // Create a promise that resolves when the first chunk is processed - const firstChunkProcessed = new Promise((resolve) => { - terminalProcess.on("line", () => resolve()) + const completePromise = new Promise((resolve) => { + terminalProcess.on("shell_execution_complete", () => resolve()) }) mockStream = (async function* () { + yield "\x1b]633;C\x07" // The first chunk contains the command start sequence with bell character. yield "compiling...\n" - // Wait to ensure hot state check happens after first chunk - await new Promise((resolve) => setTimeout(resolve, 10)) yield "still compiling...\n" yield "done" + yield "\x1b]633;D\x07" // The last chunk contains the command end sequence with bell character. + terminalProcess.emit("shell_execution_complete", mockTerminalInfo.id, { exitCode: 0 }) })() - mockExecution = { + mockTerminal.shellIntegration.executeCommand.mockReturnValue({ read: jest.fn().mockReturnValue(mockStream), - } - - mockTerminal.shellIntegration.executeCommand.mockReturnValue(mockExecution) + }) - // Start the command execution const runPromise = terminalProcess.run(mockTerminal, "npm run build") + terminalProcess.emit("stream_available", mockTerminalInfo.id, mockStream) - // Wait for the first chunk to be processed - await firstChunkProcessed - - // Hot state should be true while compiling expect(terminalProcess.isHot).toBe(true) - - // Complete the execution - const completedPromise = new Promise((resolve) => { - terminalProcess.once("completed", resolve) - }) - await runPromise - await completedPromise expect(lines).toEqual(["compiling...", "still compiling...", "done"]) - }) - }) - - describe("buffer processing", () => { - it("correctly processes and emits lines", () => { - const lines: string[] = [] - terminalProcess.on("line", (line) => lines.push(line)) - - // Simulate incoming chunks - terminalProcess["emitIfEol"]("first line\n") - terminalProcess["emitIfEol"]("second") - terminalProcess["emitIfEol"](" line\n") - terminalProcess["emitIfEol"]("third line") - - expect(lines).toEqual(["first line", "second line"]) - - // Process remaining buffer - terminalProcess["emitRemainingBufferIfListening"]() - expect(lines).toEqual(["first line", "second line", "third line"]) - }) - it("handles Windows-style line endings", () => { - const lines: string[] = [] - terminalProcess.on("line", (line) => lines.push(line)) - - terminalProcess["emitIfEol"]("line1\r\nline2\r\n") - - expect(lines).toEqual(["line1", "line2"]) - }) - }) - - describe("removeLastLineArtifacts", () => { - it("removes terminal artifacts from output", () => { - const cases = [ - ["output%", "output"], - ["output$ ", "output"], - ["output#", "output"], - ["output> ", "output"], - ["multi\nline%", "multi\nline"], - ["no artifacts", "no artifacts"], - ] - - for (const [input, expected] of cases) { - expect(terminalProcess["removeLastLineArtifacts"](input)).toBe(expected) - } + await completePromise + expect(terminalProcess.isHot).toBe(false) }) }) @@ -204,14 +175,14 @@ describe("TerminalProcess", () => { }) describe("getUnretrievedOutput", () => { - it("returns and clears unretrieved output", () => { - terminalProcess["fullOutput"] = "previous\nnew output" - terminalProcess["lastRetrievedIndex"] = 9 // After "previous\n" + it.only("returns and clears unretrieved output", () => { + terminalProcess["fullOutput"] = `\x1b]633;C\x07previous\nnew output\x1b]633;D\x07` + terminalProcess["lastRetrievedIndex"] = 17 // After "previous\n" const unretrieved = terminalProcess.getUnretrievedOutput() - expect(unretrieved).toBe("new output") - expect(terminalProcess["lastRetrievedIndex"]).toBe(terminalProcess["fullOutput"].length) + + expect(terminalProcess["lastRetrievedIndex"]).toBe(terminalProcess["fullOutput"].length - "previous".length) }) }) diff --git a/src/integrations/terminal/__tests__/TerminalRegistry.test.ts b/src/integrations/terminal/__tests__/TerminalRegistry.test.ts index cc667a851b9..a2b8fcd3b08 100644 --- a/src/integrations/terminal/__tests__/TerminalRegistry.test.ts +++ b/src/integrations/terminal/__tests__/TerminalRegistry.test.ts @@ -1,4 +1,5 @@ -import * as vscode from "vscode" +// npx jest src/integrations/terminal/__tests__/TerminalRegistry.test.ts + import { TerminalRegistry } from "../TerminalRegistry" // Mock vscode.window.createTerminal @@ -30,6 +31,8 @@ describe("TerminalRegistry", () => { iconPath: expect.any(Object), env: { PAGER: "cat", + PROMPT_COMMAND: "sleep 0.050", + VTE_VERSION: "0", }, }) }) diff --git a/src/shared/combineCommandSequences.ts b/src/shared/combineCommandSequences.ts index 31fe219f041..cbd674fc070 100644 --- a/src/shared/combineCommandSequences.ts +++ b/src/shared/combineCommandSequences.ts @@ -44,7 +44,7 @@ export function combineCommandSequences(messages: ClineMessage[]): ClineMessage[ // handle cases where we receive empty command_output (ie when extension is relinquishing control over exit command button) const output = messages[j].text || "" if (output.length > 0) { - combinedText += "\n" + output + combinedText += output } } j++ From e49d886d63965063b4392f8c36f1c052d7ac134c Mon Sep 17 00:00:00 2001 From: Chris Estreich Date: Fri, 28 Feb 2025 10:21:52 -0800 Subject: [PATCH 116/145] Re-enable tests --- src/integrations/terminal/__tests__/TerminalProcess.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/integrations/terminal/__tests__/TerminalProcess.test.ts b/src/integrations/terminal/__tests__/TerminalProcess.test.ts index 11c0339f27e..44cae92580f 100644 --- a/src/integrations/terminal/__tests__/TerminalProcess.test.ts +++ b/src/integrations/terminal/__tests__/TerminalProcess.test.ts @@ -175,7 +175,7 @@ describe("TerminalProcess", () => { }) describe("getUnretrievedOutput", () => { - it.only("returns and clears unretrieved output", () => { + it("returns and clears unretrieved output", () => { terminalProcess["fullOutput"] = `\x1b]633;C\x07previous\nnew output\x1b]633;D\x07` terminalProcess["lastRetrievedIndex"] = 17 // After "previous\n" From 4570ead71fb0ffead4af6ae3467831f0ed01f2de Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Fri, 28 Feb 2025 17:17:19 -0500 Subject: [PATCH 117/145] Update ChatView.tsx --- webview-ui/src/components/chat/ChatView.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webview-ui/src/components/chat/ChatView.tsx b/webview-ui/src/components/chat/ChatView.tsx index fcd1ba9a3b4..786f1ea957c 100644 --- a/webview-ui/src/components/chat/ChatView.tsx +++ b/webview-ui/src/components/chat/ChatView.tsx @@ -880,7 +880,7 @@ const ChatView = ({ isHidden, showAnnouncement, hideAnnouncement, showHistoryVie const placeholderText = useMemo(() => { const baseText = task ? "Type a message..." : "Type your task here..." const contextText = "(@ to add context, / to switch modes" - const imageText = shouldDisableImages ? "hold shift to drag in files" : ", hold shift to drag in files/images" + const imageText = shouldDisableImages ? ", hold shift to drag in files" : ", hold shift to drag in files/images" return baseText + `\n${contextText}${imageText})` }, [task, shouldDisableImages]) From 4caa49228fcfdac74872cb6d0486e380d9c984e6 Mon Sep 17 00:00:00 2001 From: ashktn Date: Fri, 28 Feb 2025 18:54:34 -0500 Subject: [PATCH 118/145] feat: Add support for Gemini models on Vertex AI --- .changeset/dry-suits-shake.md | 5 + package-lock.json | 13 + package.json | 1 + src/api/providers/__tests__/vertex.test.ts | 334 +++++++++++++++-- src/api/providers/vertex.ts | 128 ++++++- .../__tests__/vertex-gemini-format.test.ts | 338 ++++++++++++++++++ src/api/transform/vertex-gemini-format.ts | 83 +++++ src/shared/api.ts | 40 +++ 8 files changed, 897 insertions(+), 45 deletions(-) create mode 100644 .changeset/dry-suits-shake.md create mode 100644 src/api/transform/__tests__/vertex-gemini-format.test.ts create mode 100644 src/api/transform/vertex-gemini-format.ts diff --git a/.changeset/dry-suits-shake.md b/.changeset/dry-suits-shake.md new file mode 100644 index 00000000000..95bb8d3db4e --- /dev/null +++ b/.changeset/dry-suits-shake.md @@ -0,0 +1,5 @@ +--- +"roo-cline": minor +--- + +Add Gemini models on Vertex AI diff --git a/package-lock.json b/package-lock.json index 950769b39b4..17cad607a1b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -12,6 +12,7 @@ "@anthropic-ai/sdk": "^0.37.0", "@anthropic-ai/vertex-sdk": "^0.7.0", "@aws-sdk/client-bedrock-runtime": "^3.706.0", + "@google-cloud/vertexai": "^1.9.3", "@google/generative-ai": "^0.18.0", "@mistralai/mistralai": "^1.3.6", "@modelcontextprotocol/sdk": "^1.0.1", @@ -3241,6 +3242,18 @@ "node": "^12.22.0 || ^14.17.0 || >=16.0.0" } }, + "node_modules/@google-cloud/vertexai": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/@google-cloud/vertexai/-/vertexai-1.9.3.tgz", + "integrity": "sha512-35o5tIEMLW3JeFJOaaMNR2e5sq+6rpnhrF97PuAxeOm0GlqVTESKhkGj7a5B5mmJSSSU3hUfIhcQCRRsw4Ipzg==", + "license": "Apache-2.0", + "dependencies": { + "google-auth-library": "^9.1.0" + }, + "engines": { + "node": ">=18.0.0" + } + }, "node_modules/@google/generative-ai": { "version": "0.18.0", "resolved": "https://registry.npmjs.org/@google/generative-ai/-/generative-ai-0.18.0.tgz", diff --git a/package.json b/package.json index fe64af16d65..8e8bb0712a9 100644 --- a/package.json +++ b/package.json @@ -308,6 +308,7 @@ "@anthropic-ai/vertex-sdk": "^0.7.0", "@aws-sdk/client-bedrock-runtime": "^3.706.0", "@google/generative-ai": "^0.18.0", + "@google-cloud/vertexai": "^1.9.3", "@mistralai/mistralai": "^1.3.6", "@modelcontextprotocol/sdk": "^1.0.1", "@types/clone-deep": "^4.0.4", diff --git a/src/api/providers/__tests__/vertex.test.ts b/src/api/providers/__tests__/vertex.test.ts index 9cf92f0a16b..d3f34fcfa16 100644 --- a/src/api/providers/__tests__/vertex.test.ts +++ b/src/api/providers/__tests__/vertex.test.ts @@ -6,6 +6,7 @@ import { BetaThinkingConfigParam } from "@anthropic-ai/sdk/resources/beta" import { VertexHandler } from "../vertex" import { ApiStreamChunk } from "../../transform/stream" +import { VertexAI } from "@google-cloud/vertexai" // Mock Vertex SDK jest.mock("@anthropic-ai/vertex-sdk", () => ({ @@ -49,24 +50,100 @@ jest.mock("@anthropic-ai/vertex-sdk", () => ({ })), })) -describe("VertexHandler", () => { - let handler: VertexHandler +// Mock Vertex Gemini SDK +jest.mock("@google-cloud/vertexai", () => { + const mockGenerateContentStream = jest.fn().mockImplementation(() => { + return { + stream: { + async *[Symbol.asyncIterator]() { + yield { + candidates: [ + { + content: { + parts: [{ text: "Test Gemini response" }], + }, + }, + ], + } + }, + }, + response: { + usageMetadata: { + promptTokenCount: 5, + candidatesTokenCount: 10, + }, + }, + } + }) - beforeEach(() => { - handler = new VertexHandler({ - apiModelId: "claude-3-5-sonnet-v2@20241022", - vertexProjectId: "test-project", - vertexRegion: "us-central1", - }) + const mockGenerateContent = jest.fn().mockResolvedValue({ + response: { + candidates: [ + { + content: { + parts: [{ text: "Test Gemini response" }], + }, + }, + ], + }, + }) + + const mockGenerativeModel = jest.fn().mockImplementation(() => { + return { + generateContentStream: mockGenerateContentStream, + generateContent: mockGenerateContent, + } }) + return { + VertexAI: jest.fn().mockImplementation(() => { + return { + getGenerativeModel: mockGenerativeModel, + } + }), + GenerativeModel: mockGenerativeModel, + } +}) + +describe("VertexHandler", () => { + let handler: VertexHandler + describe("constructor", () => { - it("should initialize with provided config", () => { + it("should initialize with provided config for Claude", () => { + handler = new VertexHandler({ + apiModelId: "claude-3-5-sonnet-v2@20241022", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) + expect(AnthropicVertex).toHaveBeenCalledWith({ projectId: "test-project", region: "us-central1", }) }) + + it("should initialize with provided config for Gemini", () => { + handler = new VertexHandler({ + apiModelId: "gemini-1.5-pro-001", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) + + expect(VertexAI).toHaveBeenCalledWith({ + project: "test-project", + location: "us-central1", + }) + }) + + it("should throw error for invalid model", () => { + expect(() => { + new VertexHandler({ + apiModelId: "invalid-model", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) + }).toThrow("Unknown model ID: invalid-model") + }) }) describe("createMessage", () => { @@ -83,7 +160,13 @@ describe("VertexHandler", () => { const systemPrompt = "You are a helpful assistant" - it("should handle streaming responses correctly", async () => { + it("should handle streaming responses correctly for Claude", async () => { + handler = new VertexHandler({ + apiModelId: "claude-3-5-sonnet-v2@20241022", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) + const mockStream = [ { type: "message_start", @@ -127,7 +210,7 @@ describe("VertexHandler", () => { } const mockCreate = jest.fn().mockResolvedValue(asyncIterator) - ;(handler["client"].messages as any).create = mockCreate + ;(handler["anthropicClient"].messages as any).create = mockCreate const stream = handler.createMessage(systemPrompt, mockMessages) const chunks: ApiStreamChunk[] = [] @@ -187,7 +270,58 @@ describe("VertexHandler", () => { }) }) - it("should handle multiple content blocks with line breaks", async () => { + it("should handle streaming responses correctly for Gemini", async () => { + const mockGemini = require("@google-cloud/vertexai") + const mockGenerateContentStream = mockGemini.VertexAI().getGenerativeModel().generateContentStream + handler = new VertexHandler({ + apiModelId: "gemini-1.5-pro-001", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) + + const stream = handler.createMessage(systemPrompt, mockMessages) + const chunks: ApiStreamChunk[] = [] + + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(chunks.length).toBe(2) + expect(chunks[0]).toEqual({ + type: "text", + text: "Test Gemini response", + }) + expect(chunks[1]).toEqual({ + type: "usage", + inputTokens: 5, + outputTokens: 10, + }) + + expect(mockGenerateContentStream).toHaveBeenCalledWith({ + contents: [ + { + role: "user", + parts: [{ text: "Hello" }], + }, + { + role: "model", + parts: [{ text: "Hi there!" }], + }, + ], + generationConfig: { + maxOutputTokens: 16384, + temperature: 0, + }, + }) + }) + + it("should handle multiple content blocks with line breaks for Claude", async () => { + handler = new VertexHandler({ + apiModelId: "claude-3-5-sonnet-v2@20241022", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) + const mockStream = [ { type: "content_block_start", @@ -216,7 +350,7 @@ describe("VertexHandler", () => { } const mockCreate = jest.fn().mockResolvedValue(asyncIterator) - ;(handler["client"].messages as any).create = mockCreate + ;(handler["anthropicClient"].messages as any).create = mockCreate const stream = handler.createMessage(systemPrompt, mockMessages) const chunks: ApiStreamChunk[] = [] @@ -240,10 +374,16 @@ describe("VertexHandler", () => { }) }) - it("should handle API errors", async () => { + it("should handle API errors for Claude", async () => { + handler = new VertexHandler({ + apiModelId: "claude-3-5-sonnet-v2@20241022", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) + const mockError = new Error("Vertex API error") const mockCreate = jest.fn().mockRejectedValue(mockError) - ;(handler["client"].messages as any).create = mockCreate + ;(handler["anthropicClient"].messages as any).create = mockCreate const stream = handler.createMessage(systemPrompt, mockMessages) @@ -254,7 +394,13 @@ describe("VertexHandler", () => { }).rejects.toThrow("Vertex API error") }) - it("should handle prompt caching for supported models", async () => { + it("should handle prompt caching for supported models for Claude", async () => { + handler = new VertexHandler({ + apiModelId: "claude-3-5-sonnet-v2@20241022", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) + const mockStream = [ { type: "message_start", @@ -299,7 +445,7 @@ describe("VertexHandler", () => { } const mockCreate = jest.fn().mockResolvedValue(asyncIterator) - ;(handler["client"].messages as any).create = mockCreate + ;(handler["anthropicClient"].messages as any).create = mockCreate const stream = handler.createMessage(systemPrompt, [ { @@ -383,7 +529,13 @@ describe("VertexHandler", () => { ) }) - it("should handle cache-related usage metrics", async () => { + it("should handle cache-related usage metrics for Claude", async () => { + handler = new VertexHandler({ + apiModelId: "claude-3-5-sonnet-v2@20241022", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) + const mockStream = [ { type: "message_start", @@ -415,7 +567,7 @@ describe("VertexHandler", () => { } const mockCreate = jest.fn().mockResolvedValue(asyncIterator) - ;(handler["client"].messages as any).create = mockCreate + ;(handler["anthropicClient"].messages as any).create = mockCreate const stream = handler.createMessage(systemPrompt, mockMessages) const chunks: ApiStreamChunk[] = [] @@ -442,7 +594,13 @@ describe("VertexHandler", () => { const systemPrompt = "You are a helpful assistant" - it("should handle thinking content blocks and deltas", async () => { + it("should handle thinking content blocks and deltas for Claude", async () => { + handler = new VertexHandler({ + apiModelId: "claude-3-5-sonnet-v2@20241022", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) + const mockStream = [ { type: "message_start", @@ -488,7 +646,7 @@ describe("VertexHandler", () => { } const mockCreate = jest.fn().mockResolvedValue(asyncIterator) - ;(handler["client"].messages as any).create = mockCreate + ;(handler["anthropicClient"].messages as any).create = mockCreate const stream = handler.createMessage(systemPrompt, mockMessages) const chunks: ApiStreamChunk[] = [] @@ -510,7 +668,13 @@ describe("VertexHandler", () => { expect(textChunks[1].text).toBe("Here's my answer:") }) - it("should handle multiple thinking blocks with line breaks", async () => { + it("should handle multiple thinking blocks with line breaks for Claude", async () => { + handler = new VertexHandler({ + apiModelId: "claude-3-5-sonnet-v2@20241022", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) + const mockStream = [ { type: "content_block_start", @@ -539,7 +703,7 @@ describe("VertexHandler", () => { } const mockCreate = jest.fn().mockResolvedValue(asyncIterator) - ;(handler["client"].messages as any).create = mockCreate + ;(handler["anthropicClient"].messages as any).create = mockCreate const stream = handler.createMessage(systemPrompt, mockMessages) const chunks: ApiStreamChunk[] = [] @@ -565,10 +729,16 @@ describe("VertexHandler", () => { }) describe("completePrompt", () => { - it("should complete prompt successfully", async () => { + it("should complete prompt successfully for Claude", async () => { + handler = new VertexHandler({ + apiModelId: "claude-3-5-sonnet-v2@20241022", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) + const result = await handler.completePrompt("Test prompt") expect(result).toBe("Test response") - expect(handler["client"].messages.create).toHaveBeenCalledWith({ + expect(handler["anthropicClient"].messages.create).toHaveBeenCalledWith({ model: "claude-3-5-sonnet-v2@20241022", max_tokens: 8192, temperature: 0, @@ -583,31 +753,109 @@ describe("VertexHandler", () => { }) }) - it("should handle API errors", async () => { + it("should complete prompt successfully for Gemini", async () => { + const mockGemini = require("@google-cloud/vertexai") + const mockGenerateContent = mockGemini.VertexAI().getGenerativeModel().generateContent + + handler = new VertexHandler({ + apiModelId: "gemini-1.5-pro-001", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) + + const result = await handler.completePrompt("Test prompt") + expect(result).toBe("Test Gemini response") + expect(mockGenerateContent).toHaveBeenCalled() + expect(mockGenerateContent).toHaveBeenCalledWith({ + contents: [{ role: "user", parts: [{ text: "Test prompt" }] }], + generationConfig: { + temperature: 0, + }, + }) + }) + + it("should handle API errors for Claude", async () => { + handler = new VertexHandler({ + apiModelId: "claude-3-5-sonnet-v2@20241022", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) + const mockError = new Error("Vertex API error") const mockCreate = jest.fn().mockRejectedValue(mockError) - ;(handler["client"].messages as any).create = mockCreate + ;(handler["anthropicClient"].messages as any).create = mockCreate + + await expect(handler.completePrompt("Test prompt")).rejects.toThrow( + "Vertex completion error: Vertex API error", + ) + }) + + it("should handle API errors for Gemini", async () => { + const mockGemini = require("@google-cloud/vertexai") + const mockGenerateContent = mockGemini.VertexAI().getGenerativeModel().generateContent + mockGenerateContent.mockRejectedValue(new Error("Vertex API error")) + handler = new VertexHandler({ + apiModelId: "gemini-1.5-pro-001", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) await expect(handler.completePrompt("Test prompt")).rejects.toThrow( "Vertex completion error: Vertex API error", ) }) - it("should handle non-text content", async () => { + it("should handle non-text content for Claude", async () => { + handler = new VertexHandler({ + apiModelId: "claude-3-5-sonnet-v2@20241022", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) + const mockCreate = jest.fn().mockResolvedValue({ content: [{ type: "image" }], }) - ;(handler["client"].messages as any).create = mockCreate + ;(handler["anthropicClient"].messages as any).create = mockCreate const result = await handler.completePrompt("Test prompt") expect(result).toBe("") }) - it("should handle empty response", async () => { + it("should handle empty response for Claude", async () => { + handler = new VertexHandler({ + apiModelId: "claude-3-5-sonnet-v2@20241022", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) + const mockCreate = jest.fn().mockResolvedValue({ content: [{ type: "text", text: "" }], }) - ;(handler["client"].messages as any).create = mockCreate + ;(handler["anthropicClient"].messages as any).create = mockCreate + + const result = await handler.completePrompt("Test prompt") + expect(result).toBe("") + }) + + it("should handle empty response for Gemini", async () => { + const mockGemini = require("@google-cloud/vertexai") + const mockGenerateContent = mockGemini.VertexAI().getGenerativeModel().generateContent + mockGenerateContent.mockResolvedValue({ + response: { + candidates: [ + { + content: { + parts: [{ text: "" }], + }, + }, + ], + }, + }) + handler = new VertexHandler({ + apiModelId: "gemini-1.5-pro-001", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) const result = await handler.completePrompt("Test prompt") expect(result).toBe("") @@ -615,7 +863,13 @@ describe("VertexHandler", () => { }) describe("getModel", () => { - it("should return correct model info", () => { + it("should return correct model info for Claude", () => { + handler = new VertexHandler({ + apiModelId: "claude-3-5-sonnet-v2@20241022", + vertexProjectId: "test-project", + vertexRegion: "us-central1", + }) + const modelInfo = handler.getModel() expect(modelInfo.id).toBe("claude-3-5-sonnet-v2@20241022") expect(modelInfo.info).toBeDefined() @@ -623,14 +877,18 @@ describe("VertexHandler", () => { expect(modelInfo.info.contextWindow).toBe(200_000) }) - it("should return default model if invalid model specified", () => { - const invalidHandler = new VertexHandler({ - apiModelId: "invalid-model", + it("should return correct model info for Gemini", () => { + handler = new VertexHandler({ + apiModelId: "gemini-2.0-flash-001", vertexProjectId: "test-project", vertexRegion: "us-central1", }) - const modelInfo = invalidHandler.getModel() - expect(modelInfo.id).toBe("claude-3-7-sonnet@20250219") // Default model + + const modelInfo = handler.getModel() + expect(modelInfo.id).toBe("gemini-2.0-flash-001") + expect(modelInfo.info).toBeDefined() + expect(modelInfo.info.maxTokens).toBe(8192) + expect(modelInfo.info.contextWindow).toBe(1048576) }) }) @@ -724,7 +982,7 @@ describe("VertexHandler", () => { }, } }) - ;(thinkingHandler["client"].messages as any).create = mockCreate + ;(thinkingHandler["anthropicClient"].messages as any).create = mockCreate await thinkingHandler .createMessage("You are a helpful assistant", [{ role: "user", content: "Hello" }]) diff --git a/src/api/providers/vertex.ts b/src/api/providers/vertex.ts index a25fad07ee8..5b8fc1ad789 100644 --- a/src/api/providers/vertex.ts +++ b/src/api/providers/vertex.ts @@ -5,6 +5,8 @@ import { ApiHandler, SingleCompletionHandler } from "../" import { BetaThinkingConfigParam } from "@anthropic-ai/sdk/resources/beta" import { ApiHandlerOptions, ModelInfo, vertexDefaultModelId, VertexModelId, vertexModels } from "../../shared/api" import { ApiStream } from "../transform/stream" +import { VertexAI } from "@google-cloud/vertexai" +import { convertAnthropicMessageToVertexGemini } from "../transform/vertex-gemini-format" // Types for Vertex SDK @@ -91,19 +93,37 @@ interface VertexMessageStreamEvent { thinking: string } } - // https://docs.anthropic.com/en/api/claude-on-vertex-ai export class VertexHandler implements ApiHandler, SingleCompletionHandler { + MODEL_CLAUDE = "claude" + MODEL_GEMINI = "gemini" + private options: ApiHandlerOptions - private client: AnthropicVertex + private anthropicClient: AnthropicVertex + private geminiClient: VertexAI + private modelType: string constructor(options: ApiHandlerOptions) { this.options = options - this.client = new AnthropicVertex({ + + if (this.options.apiModelId?.startsWith(this.MODEL_CLAUDE)) { + this.modelType = this.MODEL_CLAUDE + } else if (this.options.apiModelId?.startsWith(this.MODEL_GEMINI)) { + this.modelType = this.MODEL_GEMINI + } else { + throw new Error(`Unknown model ID: ${this.options.apiModelId}`) + } + + this.anthropicClient = new AnthropicVertex({ projectId: this.options.vertexProjectId ?? "not-provided", // https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude#regions region: this.options.vertexRegion ?? "us-east5", }) + + this.geminiClient = new VertexAI({ + project: this.options.vertexProjectId ?? "not-provided", + location: this.options.vertexRegion ?? "us-east5", + }) } private formatMessageForCache(message: Anthropic.Messages.MessageParam, shouldCache: boolean): VertexMessage { @@ -154,7 +174,42 @@ export class VertexHandler implements ApiHandler, SingleCompletionHandler { } } - async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { + private async *createGeminiMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { + const model = this.geminiClient.getGenerativeModel({ + model: this.getModel().id, + systemInstruction: systemPrompt, + }) + + const result = await model.generateContentStream({ + contents: messages.map(convertAnthropicMessageToVertexGemini), + generationConfig: { + maxOutputTokens: this.getModel().info.maxTokens, + temperature: this.options.modelTemperature ?? 0, + }, + }) + + for await (const chunk of result.stream) { + if (chunk.candidates?.[0]?.content?.parts) { + for (const part of chunk.candidates[0].content.parts) { + if (part.text) { + yield { + type: "text", + text: part.text, + } + } + } + } + } + + const response = await result.response + yield { + type: "usage", + inputTokens: response.usageMetadata?.promptTokenCount ?? 0, + outputTokens: response.usageMetadata?.candidatesTokenCount ?? 0, + } + } + + private async *createClaudeMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { const model = this.getModel() let { id, info, temperature, maxTokens, thinking } = model const useCache = model.info.supportsPromptCache @@ -192,7 +247,7 @@ export class VertexHandler implements ApiHandler, SingleCompletionHandler { stream: true, } - const stream = (await this.client.messages.create( + const stream = (await this.anthropicClient.messages.create( params as Anthropic.Messages.MessageCreateParamsStreaming, )) as unknown as AnthropicStream @@ -272,6 +327,22 @@ export class VertexHandler implements ApiHandler, SingleCompletionHandler { } } + async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { + switch (this.modelType) { + case this.MODEL_CLAUDE: { + yield* this.createClaudeMessage(systemPrompt, messages) + break + } + case this.MODEL_GEMINI: { + yield* this.createGeminiMessage(systemPrompt, messages) + break + } + default: { + throw new Error(`Invalid model type: ${this.modelType}`) + } + } + } + getModel(): { id: VertexModelId info: ModelInfo @@ -316,7 +387,36 @@ export class VertexHandler implements ApiHandler, SingleCompletionHandler { return { id, info, temperature, maxTokens, thinking } } - async completePrompt(prompt: string): Promise { + private async completePromptGemini(prompt: string): Promise { + try { + const model = this.geminiClient.getGenerativeModel({ + model: this.getModel().id, + }) + + const result = await model.generateContent({ + contents: [{ role: "user", parts: [{ text: prompt }] }], + generationConfig: { + temperature: this.options.modelTemperature ?? 0, + }, + }) + + let text = "" + result.response.candidates?.forEach((candidate) => { + candidate.content.parts.forEach((part) => { + text += part.text + }) + }) + + return text + } catch (error) { + if (error instanceof Error) { + throw new Error(`Vertex completion error: ${error.message}`) + } + throw error + } + } + + private async completePromptClaude(prompt: string): Promise { try { let { id, info, temperature, maxTokens, thinking } = this.getModel() const useCache = info.supportsPromptCache @@ -344,7 +444,7 @@ export class VertexHandler implements ApiHandler, SingleCompletionHandler { stream: false, } - const response = (await this.client.messages.create( + const response = (await this.anthropicClient.messages.create( params as Anthropic.Messages.MessageCreateParamsNonStreaming, )) as unknown as VertexMessageResponse @@ -360,4 +460,18 @@ export class VertexHandler implements ApiHandler, SingleCompletionHandler { throw error } } + + async completePrompt(prompt: string): Promise { + switch (this.modelType) { + case this.MODEL_CLAUDE: { + return this.completePromptClaude(prompt) + } + case this.MODEL_GEMINI: { + return this.completePromptGemini(prompt) + } + default: { + throw new Error(`Invalid model type: ${this.modelType}`) + } + } + } } diff --git a/src/api/transform/__tests__/vertex-gemini-format.test.ts b/src/api/transform/__tests__/vertex-gemini-format.test.ts new file mode 100644 index 00000000000..bcb26df0992 --- /dev/null +++ b/src/api/transform/__tests__/vertex-gemini-format.test.ts @@ -0,0 +1,338 @@ +// npx jest src/api/transform/__tests__/vertex-gemini-format.test.ts + +import { Anthropic } from "@anthropic-ai/sdk" + +import { convertAnthropicMessageToVertexGemini } from "../vertex-gemini-format" + +describe("convertAnthropicMessageToVertexGemini", () => { + it("should convert a simple text message", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: "Hello, world!", + } + + const result = convertAnthropicMessageToVertexGemini(anthropicMessage) + + expect(result).toEqual({ + role: "user", + parts: [{ text: "Hello, world!" }], + }) + }) + + it("should convert assistant role to model role", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "assistant", + content: "I'm an assistant", + } + + const result = convertAnthropicMessageToVertexGemini(anthropicMessage) + + expect(result).toEqual({ + role: "model", + parts: [{ text: "I'm an assistant" }], + }) + }) + + it("should convert a message with text blocks", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { type: "text", text: "First paragraph" }, + { type: "text", text: "Second paragraph" }, + ], + } + + const result = convertAnthropicMessageToVertexGemini(anthropicMessage) + + expect(result).toEqual({ + role: "user", + parts: [{ text: "First paragraph" }, { text: "Second paragraph" }], + }) + }) + + it("should convert a message with an image", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { type: "text", text: "Check out this image:" }, + { + type: "image", + source: { + type: "base64", + media_type: "image/jpeg", + data: "base64encodeddata", + }, + }, + ], + } + + const result = convertAnthropicMessageToVertexGemini(anthropicMessage) + + expect(result).toEqual({ + role: "user", + parts: [ + { text: "Check out this image:" }, + { + inlineData: { + data: "base64encodeddata", + mimeType: "image/jpeg", + }, + }, + ], + }) + }) + + it("should throw an error for unsupported image source type", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { + type: "image", + source: { + type: "url", // Not supported + url: "https://example.com/image.jpg", + } as any, + }, + ], + } + + expect(() => convertAnthropicMessageToVertexGemini(anthropicMessage)).toThrow("Unsupported image source type") + }) + + it("should convert a message with tool use", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "assistant", + content: [ + { type: "text", text: "Let me calculate that for you." }, + { + type: "tool_use", + id: "calc-123", + name: "calculator", + input: { operation: "add", numbers: [2, 3] }, + }, + ], + } + + const result = convertAnthropicMessageToVertexGemini(anthropicMessage) + + expect(result).toEqual({ + role: "model", + parts: [ + { text: "Let me calculate that for you." }, + { + functionCall: { + name: "calculator", + args: { operation: "add", numbers: [2, 3] }, + }, + }, + ], + }) + }) + + it("should convert a message with tool result as string", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { type: "text", text: "Here's the result:" }, + { + type: "tool_result", + tool_use_id: "calculator-123", + content: "The result is 5", + }, + ], + } + + const result = convertAnthropicMessageToVertexGemini(anthropicMessage) + + expect(result).toEqual({ + role: "user", + parts: [ + { text: "Here's the result:" }, + { + functionResponse: { + name: "calculator", + response: { + name: "calculator", + content: "The result is 5", + }, + }, + }, + ], + }) + }) + + it("should handle empty tool result content", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "calculator-123", + content: null as any, // Empty content + }, + ], + } + + const result = convertAnthropicMessageToVertexGemini(anthropicMessage) + + // Should skip the empty tool result + expect(result).toEqual({ + role: "user", + parts: [], + }) + }) + + it("should convert a message with tool result as array with text only", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "search-123", + content: [ + { type: "text", text: "First result" }, + { type: "text", text: "Second result" }, + ], + }, + ], + } + + const result = convertAnthropicMessageToVertexGemini(anthropicMessage) + + expect(result).toEqual({ + role: "user", + parts: [ + { + functionResponse: { + name: "search", + response: { + name: "search", + content: "First result\n\nSecond result", + }, + }, + }, + ], + }) + }) + + it("should convert a message with tool result as array with text and images", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "search-123", + content: [ + { type: "text", text: "Search results:" }, + { + type: "image", + source: { + type: "base64", + media_type: "image/png", + data: "image1data", + }, + }, + { + type: "image", + source: { + type: "base64", + media_type: "image/jpeg", + data: "image2data", + }, + }, + ], + }, + ], + } + + const result = convertAnthropicMessageToVertexGemini(anthropicMessage) + + expect(result).toEqual({ + role: "user", + parts: [ + { + functionResponse: { + name: "search", + response: { + name: "search", + content: "Search results:\n\n(See next part for image)", + }, + }, + }, + { + inlineData: { + data: "image1data", + mimeType: "image/png", + }, + }, + { + inlineData: { + data: "image2data", + mimeType: "image/jpeg", + }, + }, + ], + }) + }) + + it("should convert a message with tool result containing only images", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "imagesearch-123", + content: [ + { + type: "image", + source: { + type: "base64", + media_type: "image/png", + data: "onlyimagedata", + }, + }, + ], + }, + ], + } + + const result = convertAnthropicMessageToVertexGemini(anthropicMessage) + + expect(result).toEqual({ + role: "user", + parts: [ + { + functionResponse: { + name: "imagesearch", + response: { + name: "imagesearch", + content: "\n\n(See next part for image)", + }, + }, + }, + { + inlineData: { + data: "onlyimagedata", + mimeType: "image/png", + }, + }, + ], + }) + }) + + it("should throw an error for unsupported content block type", () => { + const anthropicMessage: Anthropic.Messages.MessageParam = { + role: "user", + content: [ + { + type: "unknown_type", // Unsupported type + data: "some data", + } as any, + ], + } + + expect(() => convertAnthropicMessageToVertexGemini(anthropicMessage)).toThrow( + "Unsupported content block type: unknown_type", + ) + }) +}) diff --git a/src/api/transform/vertex-gemini-format.ts b/src/api/transform/vertex-gemini-format.ts new file mode 100644 index 00000000000..75abb7d3bed --- /dev/null +++ b/src/api/transform/vertex-gemini-format.ts @@ -0,0 +1,83 @@ +import { Anthropic } from "@anthropic-ai/sdk" +import { Content, FunctionCallPart, FunctionResponsePart, InlineDataPart, Part, TextPart } from "@google-cloud/vertexai" + +function convertAnthropicContentToVertexGemini(content: Anthropic.Messages.MessageParam["content"]): Part[] { + if (typeof content === "string") { + return [{ text: content } as TextPart] + } + + return content.flatMap((block) => { + switch (block.type) { + case "text": + return { text: block.text } as TextPart + case "image": + if (block.source.type !== "base64") { + throw new Error("Unsupported image source type") + } + return { + inlineData: { + data: block.source.data, + mimeType: block.source.media_type, + }, + } as InlineDataPart + case "tool_use": + return { + functionCall: { + name: block.name, + args: block.input, + }, + } as FunctionCallPart + case "tool_result": + const name = block.tool_use_id.split("-")[0] + if (!block.content) { + return [] + } + if (typeof block.content === "string") { + return { + functionResponse: { + name, + response: { + name, + content: block.content, + }, + }, + } as FunctionResponsePart + } else { + // The only case when tool_result could be array is when the tool failed and we're providing ie user feedback potentially with images + const textParts = block.content.filter((part) => part.type === "text") + const imageParts = block.content.filter((part) => part.type === "image") + const text = textParts.length > 0 ? textParts.map((part) => part.text).join("\n\n") : "" + const imageText = imageParts.length > 0 ? "\n\n(See next part for image)" : "" + return [ + { + functionResponse: { + name, + response: { + name, + content: text + imageText, + }, + }, + } as FunctionResponsePart, + ...imageParts.map( + (part) => + ({ + inlineData: { + data: part.source.data, + mimeType: part.source.media_type, + }, + }) as InlineDataPart, + ), + ] + } + default: + throw new Error(`Unsupported content block type: ${(block as any).type}`) + } + }) +} + +export function convertAnthropicMessageToVertexGemini(message: Anthropic.Messages.MessageParam): Content { + return { + role: message.role === "assistant" ? "model" : "user", + parts: convertAnthropicContentToVertexGemini(message.content), + } +} diff --git a/src/shared/api.ts b/src/shared/api.ts index 99e2986e882..b16e5142a0e 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -436,6 +436,46 @@ export const openRouterDefaultModelInfo: ModelInfo = { export type VertexModelId = keyof typeof vertexModels export const vertexDefaultModelId: VertexModelId = "claude-3-7-sonnet@20250219" export const vertexModels = { + "gemini-2.0-flash-001": { + maxTokens: 8192, + contextWindow: 1_048_576, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 0.15, + outputPrice: 0.6, + }, + "gemini-2.0-flash-lite-001": { + maxTokens: 8192, + contextWindow: 1_048_576, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 0.075, + outputPrice: 0.3, + }, + "gemini-2.0-flash-thinking-exp-01-21": { + maxTokens: 8192, + contextWindow: 32_768, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 0, + outputPrice: 0, + }, + "gemini-1.5-flash-002": { + maxTokens: 8192, + contextWindow: 1_048_576, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 0.075, + outputPrice: 0.3, + }, + "gemini-1.5-pro-002": { + maxTokens: 8192, + contextWindow: 2_097_152, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 1.25, + outputPrice: 5, + }, "claude-3-7-sonnet@20250219:thinking": { maxTokens: 64_000, contextWindow: 200_000, From 1d3f5380a80d09ac747d93c701473215bdc03ba6 Mon Sep 17 00:00:00 2001 From: Chris Estreich Date: Fri, 28 Feb 2025 21:23:11 -0800 Subject: [PATCH 119/145] Move integration tests into its own module --- .github/workflows/changeset-release.yml | 2 +- .github/workflows/code-qa.yml | 16 +- .vscodeignore | 3 +- .../.env.integration.example | 0 .vscode-test.mjs => e2e/.vscode-test.mjs | 2 +- {src/test => e2e}/VSCODE_INTEGRATION_TESTS.md | 4 +- e2e/package-lock.json | 2387 +++++++++++++++++ e2e/package.json | 21 + {src/test => e2e/src}/runTest.ts | 2 +- {src/test => e2e/src}/suite/extension.test.ts | 0 {src/test => e2e/src}/suite/index.ts | 3 +- {src/test => e2e/src}/suite/modes.test.ts | 0 {src/test => e2e/src}/suite/task.test.ts | 0 .../tsconfig.json | 7 +- knip.json | 4 +- package-lock.json | 930 +------ package.json | 33 +- src/exports/cline.d.ts | 93 + src/shared/modes.ts | 6 +- 19 files changed, 2551 insertions(+), 962 deletions(-) rename .env.integration.example => e2e/.env.integration.example (100%) rename .vscode-test.mjs => e2e/.vscode-test.mjs (89%) rename {src/test => e2e}/VSCODE_INTEGRATION_TESTS.md (98%) create mode 100644 e2e/package-lock.json create mode 100644 e2e/package.json rename {src/test => e2e/src}/runTest.ts (98%) rename {src/test => e2e/src}/suite/extension.test.ts (100%) rename {src/test => e2e/src}/suite/index.ts (95%) rename {src/test => e2e/src}/suite/modes.test.ts (100%) rename {src/test => e2e/src}/suite/task.test.ts (100%) rename tsconfig.integration.json => e2e/tsconfig.json (60%) diff --git a/.github/workflows/changeset-release.yml b/.github/workflows/changeset-release.yml index a2bcd3f0393..462516365b1 100644 --- a/.github/workflows/changeset-release.yml +++ b/.github/workflows/changeset-release.yml @@ -37,7 +37,7 @@ jobs: cache: 'npm' - name: Install Dependencies - run: npm run install:all + run: npm run install:ci # Check if there are any new changesets to process - name: Check for changesets diff --git a/.github/workflows/code-qa.yml b/.github/workflows/code-qa.yml index fde891f8041..b7292dd9ee4 100644 --- a/.github/workflows/code-qa.yml +++ b/.github/workflows/code-qa.yml @@ -20,7 +20,7 @@ jobs: node-version: '18' cache: 'npm' - name: Install dependencies - run: npm run install:all + run: npm run install:ci - name: Compile run: npm run compile - name: Check types @@ -39,7 +39,7 @@ jobs: node-version: '18' cache: 'npm' - name: Install dependencies - run: npm run install:all + run: npm run install:ci - name: Run knip checks run: npm run knip @@ -54,7 +54,7 @@ jobs: node-version: '18' cache: 'npm' - name: Install dependencies - run: npm run install:all + run: npm run install:ci - name: Run unit tests run: npx jest --silent @@ -69,7 +69,7 @@ jobs: node-version: '18' cache: 'npm' - name: Install dependencies - run: npm run install:all + run: npm run install:ci - name: Run unit tests working-directory: webview-ui run: npx jest --silent @@ -108,9 +108,11 @@ jobs: with: node-version: '18' cache: 'npm' + - name: Install dependencies + run: npm run install:ci - name: Create env.integration file + working-directory: e2e run: echo "OPENROUTER_API_KEY=${{ secrets.OPENROUTER_API_KEY }}" > .env.integration - - name: Install dependencies - run: npm run install:all - name: Run integration tests - run: xvfb-run -a npm run test:integration + working-directory: e2e + run: xvfb-run -a npm run ci diff --git a/.vscodeignore b/.vscodeignore index 638ac22db76..1fc5a728b04 100644 --- a/.vscodeignore +++ b/.vscodeignore @@ -4,6 +4,8 @@ .vscode/** .vscode-test/** out/** +out-integration/** +e2e/** node_modules/** src/** .gitignore @@ -25,7 +27,6 @@ demo.gif .roomodes cline_docs/** coverage/** -out-integration/** # Ignore all webview-ui files except the build directory (https://github.com/microsoft/vscode-webview-ui-toolkit-samples/blob/main/frameworks/hello-world-react-cra/.vscodeignore) webview-ui/src/** diff --git a/.env.integration.example b/e2e/.env.integration.example similarity index 100% rename from .env.integration.example rename to e2e/.env.integration.example diff --git a/.vscode-test.mjs b/e2e/.vscode-test.mjs similarity index 89% rename from .vscode-test.mjs rename to e2e/.vscode-test.mjs index dd7760789b3..16ea4271bae 100644 --- a/.vscode-test.mjs +++ b/e2e/.vscode-test.mjs @@ -6,7 +6,7 @@ import { defineConfig } from '@vscode/test-cli'; export default defineConfig({ label: 'integrationTest', - files: 'out-integration/test/**/*.test.js', + files: 'out/e2e/src/suite/**/*.test.js', workspaceFolder: '.', mocha: { ui: 'tdd', diff --git a/src/test/VSCODE_INTEGRATION_TESTS.md b/e2e/VSCODE_INTEGRATION_TESTS.md similarity index 98% rename from src/test/VSCODE_INTEGRATION_TESTS.md rename to e2e/VSCODE_INTEGRATION_TESTS.md index f5882fea1ea..25f54492de0 100644 --- a/src/test/VSCODE_INTEGRATION_TESTS.md +++ b/e2e/VSCODE_INTEGRATION_TESTS.md @@ -11,8 +11,8 @@ The integration tests use the `@vscode/test-electron` package to run tests in a ### Directory Structure ``` -src/test/ -├── runTest.ts # Main test runner +e2e/src/ +├── runTest.ts # Main test runner ├── suite/ │ ├── index.ts # Test suite configuration │ ├── modes.test.ts # Mode switching tests diff --git a/e2e/package-lock.json b/e2e/package-lock.json new file mode 100644 index 00000000000..278df120c28 --- /dev/null +++ b/e2e/package-lock.json @@ -0,0 +1,2387 @@ +{ + "name": "e2e", + "version": "0.1.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "e2e", + "version": "0.1.0", + "devDependencies": { + "@types/mocha": "^10.0.10", + "@vscode/test-cli": "^0.0.9", + "@vscode/test-electron": "^2.4.0", + "mocha": "^11.1.0", + "typescript": "^5.4.5" + } + }, + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/mocha": { + "version": "10.0.10", + "resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-10.0.10.tgz", + "integrity": "sha512-xPyYSz1cMPnJQhl0CLMH68j3gprKZaTjG3s5Vi+fDgx+uhG9NOXwbVt52eFS8ECyXhyKcjDLCBEqBExKuiZb7Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/@vscode/test-cli": { + "version": "0.0.9", + "resolved": "https://registry.npmjs.org/@vscode/test-cli/-/test-cli-0.0.9.tgz", + "integrity": "sha512-vsl5/ueE3Jf0f6XzB0ECHHMsd5A0Yu6StElb8a+XsubZW7kHNAOw4Y3TSSuDzKEpLnJ92nbMy1Zl+KLGCE6NaA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mocha": "^10.0.2", + "c8": "^9.1.0", + "chokidar": "^3.5.3", + "enhanced-resolve": "^5.15.0", + "glob": "^10.3.10", + "minimatch": "^9.0.3", + "mocha": "^10.2.0", + "supports-color": "^9.4.0", + "yargs": "^17.7.2" + }, + "bin": { + "vscode-test": "out/bin.mjs" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@vscode/test-cli/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@vscode/test-cli/node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "node_modules/@vscode/test-cli/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/@vscode/test-cli/node_modules/mocha": { + "version": "10.8.2", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-10.8.2.tgz", + "integrity": "sha512-VZlYo/WE8t1tstuRmqgeyBgCbJc/lEdopaa+axcKzTBJ+UIdlAB9XnmvTCAH4pwR4ElNInaedhEBmZD8iCSVEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-colors": "^4.1.3", + "browser-stdout": "^1.3.1", + "chokidar": "^3.5.3", + "debug": "^4.3.5", + "diff": "^5.2.0", + "escape-string-regexp": "^4.0.0", + "find-up": "^5.0.0", + "glob": "^8.1.0", + "he": "^1.2.0", + "js-yaml": "^4.1.0", + "log-symbols": "^4.1.0", + "minimatch": "^5.1.6", + "ms": "^2.1.3", + "serialize-javascript": "^6.0.2", + "strip-json-comments": "^3.1.1", + "supports-color": "^8.1.1", + "workerpool": "^6.5.1", + "yargs": "^16.2.0", + "yargs-parser": "^20.2.9", + "yargs-unparser": "^2.0.0" + }, + "bin": { + "_mocha": "bin/_mocha", + "mocha": "bin/mocha.js" + }, + "engines": { + "node": ">= 14.0.0" + } + }, + "node_modules/@vscode/test-cli/node_modules/mocha/node_modules/glob": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz", + "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^5.0.1", + "once": "^1.3.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@vscode/test-cli/node_modules/mocha/node_modules/minimatch": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@vscode/test-cli/node_modules/mocha/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/@vscode/test-cli/node_modules/mocha/node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@vscode/test-cli/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@vscode/test-cli/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@vscode/test-cli/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/@vscode/test-cli/node_modules/yargs-parser": { + "version": "20.2.9", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/@vscode/test-electron": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/@vscode/test-electron/-/test-electron-2.4.1.tgz", + "integrity": "sha512-Gc6EdaLANdktQ1t+zozoBVRynfIsMKMc94Svu1QreOBC8y76x4tvaK32TljrLi1LI2+PK58sDVbL7ALdqf3VRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "http-proxy-agent": "^7.0.2", + "https-proxy-agent": "^7.0.5", + "jszip": "^3.10.1", + "ora": "^7.0.1", + "semver": "^7.6.2" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/agent-base": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz", + "integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/ansi-colors": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz", + "integrity": "sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/bl": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-5.1.0.tgz", + "integrity": "sha512-tv1ZJHLfTDnXE6tMHv73YgSJaWR2AFuPwMntBe7XL/GBFHnT0CLnsHMogfk5+GzCDC5ZWarSCYaIGATZt9dNsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer": "^6.0.3", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, + "node_modules/bl/node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browser-stdout": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", + "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", + "dev": true, + "license": "ISC" + }, + "node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, + "node_modules/c8": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/c8/-/c8-9.1.0.tgz", + "integrity": "sha512-mBWcT5iqNir1zIkzSPyI3NCR9EZCVI3WUD+AVO17MVWTSFNyUueXE82qTeampNtTr+ilN/5Ua3j24LgbCKjDVg==", + "dev": true, + "license": "ISC", + "dependencies": { + "@bcoe/v8-coverage": "^0.2.3", + "@istanbuljs/schema": "^0.1.3", + "find-up": "^5.0.0", + "foreground-child": "^3.1.1", + "istanbul-lib-coverage": "^3.2.0", + "istanbul-lib-report": "^3.0.1", + "istanbul-reports": "^3.1.6", + "test-exclude": "^6.0.0", + "v8-to-istanbul": "^9.0.0", + "yargs": "^17.7.2", + "yargs-parser": "^21.1.1" + }, + "bin": { + "c8": "bin/c8.js" + }, + "engines": { + "node": ">=14.14.0" + } + }, + "node_modules/camelcase": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", + "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chalk/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/cli-cursor": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-4.0.0.tgz", + "integrity": "sha512-VGtlMu3x/4DOtIUwEkRezxUZ2lBacNJCHash0N0WeZDBS+7Ux1dm3XWAgWYxLJFMMdOeXMHXorshEFhbMSGelg==", + "dev": true, + "license": "MIT", + "dependencies": { + "restore-cursor": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-spinners": { + "version": "2.9.2", + "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz", + "integrity": "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/cliui/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/cliui/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/debug": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decamelize": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", + "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/diff": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.2.0.tgz", + "integrity": "sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "dev": true, + "license": "MIT" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true, + "license": "MIT" + }, + "node_modules/enhanced-resolve": { + "version": "5.18.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.18.1.tgz", + "integrity": "sha512-ZSW3ma5GkcQBIpwZTSRAI8N71Uuwgs93IezB7mf7R60tC8ZbJideoDNKjHn2O9KIlx6rkGTTEk1xUCK2E1Y2Yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "dev": true, + "license": "BSD-3-Clause", + "bin": { + "flat": "cli.js" + } + }, + "node_modules/foreground-child": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", + "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", + "dev": true, + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.6", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/glob": { + "version": "10.4.5", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true, + "license": "MIT", + "bin": { + "he": "bin/he" + } + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true, + "license": "MIT" + }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/immediate": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.0.6.tgz", + "integrity": "sha512-XXOFtyqDjNDAQxVfYxuF7g9Il/IbWmmlQg2MYKOH8ExIT1qg6xc4zyS3HaEEATgs1btfzxq15ciUiY7gjSXRGQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-interactive": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-2.0.0.tgz", + "integrity": "sha512-qP1vozQRI+BMOPcjFzrjXuQvdak2pHNUMZoeG2eRbiSqyvbEf/wQtEOTOX1guk6E3t36RkaqiSt8A/6YElNxLQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-plain-obj": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-report/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-reports": { + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.7.tgz", + "integrity": "sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jszip": { + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/jszip/-/jszip-3.10.1.tgz", + "integrity": "sha512-xXDvecyTpGLrqFrvkrUSoxxfJI5AH7U8zxxtVclpsUtMCq4JQ290LY8AW5c7Ggnr/Y/oK+bQMbqK2qmtk3pN4g==", + "dev": true, + "license": "(MIT OR GPL-3.0-or-later)", + "dependencies": { + "lie": "~3.3.0", + "pako": "~1.0.2", + "readable-stream": "~2.3.6", + "setimmediate": "^1.0.5" + } + }, + "node_modules/lie": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/lie/-/lie-3.3.0.tgz", + "integrity": "sha512-UaiMJzeWRlEujzAuw5LokY1L5ecNQYZKfmyZ9L7wDHb/p5etKaxXhohBcrw0EYby+G/NA52vRSN4N39dxHAIwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "immediate": "~3.0.5" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/mocha": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/mocha/-/mocha-11.1.0.tgz", + "integrity": "sha512-8uJR5RTC2NgpY3GrYcgpZrsEd9zKbPDpob1RezyR2upGHRQtHWofmzTMzTMSV6dru3tj5Ukt0+Vnq1qhFEEwAg==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-colors": "^4.1.3", + "browser-stdout": "^1.3.1", + "chokidar": "^3.5.3", + "debug": "^4.3.5", + "diff": "^5.2.0", + "escape-string-regexp": "^4.0.0", + "find-up": "^5.0.0", + "glob": "^10.4.5", + "he": "^1.2.0", + "js-yaml": "^4.1.0", + "log-symbols": "^4.1.0", + "minimatch": "^5.1.6", + "ms": "^2.1.3", + "serialize-javascript": "^6.0.2", + "strip-json-comments": "^3.1.1", + "supports-color": "^8.1.1", + "workerpool": "^6.5.1", + "yargs": "^17.7.2", + "yargs-parser": "^21.1.1", + "yargs-unparser": "^2.0.0" + }, + "bin": { + "_mocha": "bin/_mocha", + "mocha": "bin/mocha.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/mocha/node_modules/minimatch": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/mocha/node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/ora/-/ora-7.0.1.tgz", + "integrity": "sha512-0TUxTiFJWv+JnjWm4o9yvuskpEJLXTcng8MJuKd+SzAzp2o+OP3HWqNhB4OdJRt1Vsd9/mR0oyaEYlOnL7XIRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^5.3.0", + "cli-cursor": "^4.0.0", + "cli-spinners": "^2.9.0", + "is-interactive": "^2.0.0", + "is-unicode-supported": "^1.3.0", + "log-symbols": "^5.1.0", + "stdin-discarder": "^0.1.0", + "string-width": "^6.1.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora/node_modules/chalk": { + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.4.1.tgz", + "integrity": "sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/ora/node_modules/emoji-regex": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.4.0.tgz", + "integrity": "sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw==", + "dev": true, + "license": "MIT" + }, + "node_modules/ora/node_modules/is-unicode-supported": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-1.3.0.tgz", + "integrity": "sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora/node_modules/log-symbols": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-5.1.0.tgz", + "integrity": "sha512-l0x2DvrW294C9uDCoQe1VSU4gf529FkSZ6leBl4TiqZH/e+0R7hSfHQBNut2mNygDgHwvYHfFLn6Oxb3VWj2rA==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^5.0.0", + "is-unicode-supported": "^1.1.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora/node_modules/string-width": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-6.1.0.tgz", + "integrity": "sha512-k01swCJAgQmuADB0YIc+7TuatfNvTBVOoaUWJjTB9R4VJzR5vNWzf5t42ESVZFPS8xTySF7CAdV4t/aaIm3UnQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^10.2.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "dev": true, + "license": "BlueOak-1.0.0" + }, + "node_modules/pako": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", + "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==", + "dev": true, + "license": "(MIT AND Zlib)" + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", + "dev": true, + "license": "MIT" + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dev": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/restore-cursor": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-4.0.0.tgz", + "integrity": "sha512-I9fPXU9geO9bHOt9pHHOhOkYerIMsmVaWB0rA2AI9ERh/+x/i7MV5HKBNrg+ljO5eoPVgCcnFuRjJ9uH6I/3eg==", + "dev": true, + "license": "MIT", + "dependencies": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/restore-cursor/node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true, + "license": "MIT" + }, + "node_modules/semver": { + "version": "7.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", + "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/serialize-javascript": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/setimmediate": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/setimmediate/-/setimmediate-1.0.5.tgz", + "integrity": "sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==", + "dev": true, + "license": "MIT" + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/stdin-discarder": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/stdin-discarder/-/stdin-discarder-0.1.0.tgz", + "integrity": "sha512-xhV7w8S+bUwlPTb4bAOUQhv8/cSS5offJuX8GQGq32ONF0ZtDWKfkdomM3HMRA+LhX6um/FZ0COqlwsjD53LeQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "bl": "^5.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/string-width-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "9.4.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-9.4.0.tgz", + "integrity": "sha512-VL+lNrEoIXww1coLPOmiEmK/0sGigko5COxI09KzHc2VJXJsQ37UaQ+8quuxjDeA7+KnLGTWRyOXSLLR2Wb4jw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/tapable": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/test-exclude": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", + "integrity": "sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==", + "dev": true, + "license": "ISC", + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^7.1.4", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/test-exclude/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/test-exclude/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/test-exclude/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/typescript": { + "version": "5.8.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.2.tgz", + "integrity": "sha512-aJn6wq13/afZp/jT9QZmwEjDqqvSGp1VT5GVg+f/t6/oVyrgXM6BY1h9BRh/O5p3PlUPAe+WuiEZOmb/49RqoQ==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, + "node_modules/v8-to-istanbul": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz", + "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==", + "dev": true, + "license": "ISC", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.12", + "@types/istanbul-lib-coverage": "^2.0.1", + "convert-source-map": "^2.0.0" + }, + "engines": { + "node": ">=10.12.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/workerpool": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.5.1.tgz", + "integrity": "sha512-Fs4dNYcsdpYSAfVxhnl1L5zTksjvOJxtC5hzMNl+1t9B8hTJTdKDyZ5ju7ztgPy+ft9tBFXoOlDNiOT9WUXZlA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi-cjs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-unparser": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", + "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", + "dev": true, + "license": "MIT", + "dependencies": { + "camelcase": "^6.0.0", + "decamelize": "^4.0.0", + "flat": "^5.0.2", + "is-plain-obj": "^2.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/yargs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/e2e/package.json b/e2e/package.json new file mode 100644 index 00000000000..7e0daac064d --- /dev/null +++ b/e2e/package.json @@ -0,0 +1,21 @@ +{ + "name": "e2e", + "version": "0.1.0", + "private": true, + "scripts": { + "build": "cd .. && npm run build", + "compile": "tsc -p tsconfig.json", + "lint": "eslint src --ext ts", + "check-types": "tsc --noEmit", + "test": "npm run compile && npx dotenvx run -f .env.integration -- node ./out/e2e/src/runTest.js", + "ci": "npm run build && npm run test" + }, + "dependencies": {}, + "devDependencies": { + "@types/mocha": "^10.0.10", + "@vscode/test-cli": "^0.0.9", + "@vscode/test-electron": "^2.4.0", + "mocha": "^11.1.0", + "typescript": "^5.4.5" + } +} diff --git a/src/test/runTest.ts b/e2e/src/runTest.ts similarity index 98% rename from src/test/runTest.ts rename to e2e/src/runTest.ts index 2576c6072f1..28545efd46b 100644 --- a/src/test/runTest.ts +++ b/e2e/src/runTest.ts @@ -6,7 +6,7 @@ async function main() { try { // The folder containing the Extension Manifest package.json // Passed to `--extensionDevelopmentPath` - const extensionDevelopmentPath = path.resolve(__dirname, "../../") + const extensionDevelopmentPath = path.resolve(__dirname, "../../../../") // The path to the extension test script // Passed to --extensionTestsPath diff --git a/src/test/suite/extension.test.ts b/e2e/src/suite/extension.test.ts similarity index 100% rename from src/test/suite/extension.test.ts rename to e2e/src/suite/extension.test.ts diff --git a/src/test/suite/index.ts b/e2e/src/suite/index.ts similarity index 95% rename from src/test/suite/index.ts rename to e2e/src/suite/index.ts index cc487b0bf78..a9540d96004 100644 --- a/src/test/suite/index.ts +++ b/e2e/src/suite/index.ts @@ -1,8 +1,7 @@ import * as path from "path" import Mocha from "mocha" import { glob } from "glob" -import { ClineAPI } from "../../exports/cline" -import { ClineProvider } from "../../core/webview/ClineProvider" +import { ClineAPI, ClineProvider } from "../../../src/exports/cline" import * as vscode from "vscode" declare global { diff --git a/src/test/suite/modes.test.ts b/e2e/src/suite/modes.test.ts similarity index 100% rename from src/test/suite/modes.test.ts rename to e2e/src/suite/modes.test.ts diff --git a/src/test/suite/task.test.ts b/e2e/src/suite/task.test.ts similarity index 100% rename from src/test/suite/task.test.ts rename to e2e/src/suite/task.test.ts diff --git a/tsconfig.integration.json b/e2e/tsconfig.json similarity index 60% rename from tsconfig.integration.json rename to e2e/tsconfig.json index 0de0ea736a9..99581107673 100644 --- a/tsconfig.integration.json +++ b/e2e/tsconfig.json @@ -9,9 +9,8 @@ "strict": true, "skipLibCheck": true, "useUnknownInCatchVariables": false, - "rootDir": "src", - "outDir": "out-integration" + "outDir": "out" }, - "include": ["**/*.ts"], - "exclude": [".vscode-test", "benchmark", "dist", "**/node_modules/**", "out", "out-integration", "webview-ui"] + "include": ["src", "../src/exports"], + "exclude": [".vscode-test", "**/node_modules/**", "out"] } diff --git a/knip.json b/knip.json index b0e0839da77..a9f0b93e0d2 100644 --- a/knip.json +++ b/knip.json @@ -16,7 +16,9 @@ "src/activate/**", "src/exports/**", "src/extension.ts", - ".vscode-test.mjs" + "e2e/.vscode-test.mjs", + "e2e/src/runTest.ts", + "e2e/src/suite/index.ts" ], "workspaces": { "webview-ui": { diff --git a/package-lock.json b/package-lock.json index 950769b39b4..a8fa18b5f1b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -63,13 +63,10 @@ "@types/diff-match-patch": "^1.0.36", "@types/glob": "^8.1.0", "@types/jest": "^29.5.14", - "@types/mocha": "^10.0.10", "@types/node": "20.x", "@types/string-similarity": "^4.0.2", "@typescript-eslint/eslint-plugin": "^7.14.1", "@typescript-eslint/parser": "^7.11.0", - "@vscode/test-cli": "^0.0.9", - "@vscode/test-electron": "^2.4.0", "esbuild": "^0.24.0", "eslint": "^8.57.0", "glob": "^11.0.1", @@ -79,7 +76,6 @@ "knip": "^5.44.4", "lint-staged": "^15.2.11", "mkdirp": "^3.0.1", - "mocha": "^11.1.0", "npm-run-all": "^4.1.5", "prettier": "^3.4.2", "rimraf": "^6.0.1", @@ -4153,17 +4149,6 @@ "node": ">= 8" } }, - "node_modules/@pkgjs/parseargs": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", - "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", - "dev": true, - "license": "MIT", - "optional": true, - "engines": { - "node": ">=14" - } - }, "node_modules/@puppeteer/browsers": { "version": "2.5.0", "resolved": "https://registry.npmjs.org/@puppeteer/browsers/-/browsers-2.5.0.tgz", @@ -6032,13 +6017,6 @@ "dev": true, "license": "MIT" }, - "node_modules/@types/mocha": { - "version": "10.0.10", - "resolved": "https://registry.npmjs.org/@types/mocha/-/mocha-10.0.10.tgz", - "integrity": "sha512-xPyYSz1cMPnJQhl0CLMH68j3gprKZaTjG3s5Vi+fDgx+uhG9NOXwbVt52eFS8ECyXhyKcjDLCBEqBExKuiZb7Q==", - "dev": true, - "license": "MIT" - }, "node_modules/@types/ms": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz", @@ -6351,332 +6329,6 @@ "resolved": "https://registry.npmjs.org/@vscode/codicons/-/codicons-0.0.36.tgz", "integrity": "sha512-wsNOvNMMJ2BY8rC2N2MNBG7yOowV3ov8KlvUE/AiVUlHKTfWsw3OgAOQduX7h0Un6GssKD3aoTVH+TF3DSQwKQ==" }, - "node_modules/@vscode/test-cli": { - "version": "0.0.9", - "resolved": "https://registry.npmjs.org/@vscode/test-cli/-/test-cli-0.0.9.tgz", - "integrity": "sha512-vsl5/ueE3Jf0f6XzB0ECHHMsd5A0Yu6StElb8a+XsubZW7kHNAOw4Y3TSSuDzKEpLnJ92nbMy1Zl+KLGCE6NaA==", - "dev": true, - "dependencies": { - "@types/mocha": "^10.0.2", - "c8": "^9.1.0", - "chokidar": "^3.5.3", - "enhanced-resolve": "^5.15.0", - "glob": "^10.3.10", - "minimatch": "^9.0.3", - "mocha": "^10.2.0", - "supports-color": "^9.4.0", - "yargs": "^17.7.2" - }, - "bin": { - "vscode-test": "out/bin.mjs" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@vscode/test-cli/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/@vscode/test-cli/node_modules/chokidar": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", - "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", - "dev": true, - "dependencies": { - "anymatch": "~3.1.2", - "braces": "~3.0.2", - "glob-parent": "~5.1.2", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.6.0" - }, - "engines": { - "node": ">= 8.10.0" - }, - "funding": { - "url": "https://paulmillr.com/funding/" - }, - "optionalDependencies": { - "fsevents": "~2.3.2" - } - }, - "node_modules/@vscode/test-cli/node_modules/cliui": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-7.0.4.tgz", - "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", - "dev": true, - "license": "ISC", - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.0", - "wrap-ansi": "^7.0.0" - } - }, - "node_modules/@vscode/test-cli/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true, - "license": "MIT" - }, - "node_modules/@vscode/test-cli/node_modules/glob": { - "version": "10.4.5", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", - "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", - "dev": true, - "license": "ISC", - "dependencies": { - "foreground-child": "^3.1.0", - "jackspeak": "^3.1.2", - "minimatch": "^9.0.4", - "minipass": "^7.1.2", - "package-json-from-dist": "^1.0.0", - "path-scurry": "^1.11.1" - }, - "bin": { - "glob": "dist/esm/bin.mjs" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/@vscode/test-cli/node_modules/jackspeak": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", - "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", - "dev": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "@isaacs/cliui": "^8.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - }, - "optionalDependencies": { - "@pkgjs/parseargs": "^0.11.0" - } - }, - "node_modules/@vscode/test-cli/node_modules/lru-cache": { - "version": "10.4.3", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", - "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/@vscode/test-cli/node_modules/mocha": { - "version": "10.8.2", - "resolved": "https://registry.npmjs.org/mocha/-/mocha-10.8.2.tgz", - "integrity": "sha512-VZlYo/WE8t1tstuRmqgeyBgCbJc/lEdopaa+axcKzTBJ+UIdlAB9XnmvTCAH4pwR4ElNInaedhEBmZD8iCSVEg==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-colors": "^4.1.3", - "browser-stdout": "^1.3.1", - "chokidar": "^3.5.3", - "debug": "^4.3.5", - "diff": "^5.2.0", - "escape-string-regexp": "^4.0.0", - "find-up": "^5.0.0", - "glob": "^8.1.0", - "he": "^1.2.0", - "js-yaml": "^4.1.0", - "log-symbols": "^4.1.0", - "minimatch": "^5.1.6", - "ms": "^2.1.3", - "serialize-javascript": "^6.0.2", - "strip-json-comments": "^3.1.1", - "supports-color": "^8.1.1", - "workerpool": "^6.5.1", - "yargs": "^16.2.0", - "yargs-parser": "^20.2.9", - "yargs-unparser": "^2.0.0" - }, - "bin": { - "_mocha": "bin/_mocha", - "mocha": "bin/mocha.js" - }, - "engines": { - "node": ">= 14.0.0" - } - }, - "node_modules/@vscode/test-cli/node_modules/mocha/node_modules/glob": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz", - "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", - "deprecated": "Glob versions prior to v9 are no longer supported", - "dev": true, - "license": "ISC", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^5.0.1", - "once": "^1.3.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/@vscode/test-cli/node_modules/mocha/node_modules/minimatch": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", - "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@vscode/test-cli/node_modules/mocha/node_modules/supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, - "node_modules/@vscode/test-cli/node_modules/mocha/node_modules/yargs": { - "version": "16.2.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-16.2.0.tgz", - "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", - "dev": true, - "license": "MIT", - "dependencies": { - "cliui": "^7.0.2", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.0", - "y18n": "^5.0.5", - "yargs-parser": "^20.2.2" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@vscode/test-cli/node_modules/path-scurry": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", - "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", - "dev": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "lru-cache": "^10.2.0", - "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" - }, - "engines": { - "node": ">=16 || 14 >=14.18" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/@vscode/test-cli/node_modules/readdirp": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", - "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", - "dev": true, - "dependencies": { - "picomatch": "^2.2.1" - }, - "engines": { - "node": ">=8.10.0" - } - }, - "node_modules/@vscode/test-cli/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/@vscode/test-cli/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/@vscode/test-cli/node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/@vscode/test-cli/node_modules/yargs-parser": { - "version": "20.2.9", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", - "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=10" - } - }, - "node_modules/@vscode/test-electron": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@vscode/test-electron/-/test-electron-2.4.1.tgz", - "integrity": "sha512-Gc6EdaLANdktQ1t+zozoBVRynfIsMKMc94Svu1QreOBC8y76x4tvaK32TljrLi1LI2+PK58sDVbL7ALdqf3VRQ==", - "dev": true, - "dependencies": { - "http-proxy-agent": "^7.0.2", - "https-proxy-agent": "^7.0.5", - "jszip": "^3.10.1", - "ora": "^7.0.1", - "semver": "^7.6.2" - }, - "engines": { - "node": ">=16" - } - }, "node_modules/@xmldom/xmldom": { "version": "0.8.10", "resolved": "https://registry.npmjs.org/@xmldom/xmldom/-/xmldom-0.8.10.tgz", @@ -7179,43 +6831,6 @@ "node": "*" } }, - "node_modules/binary-extensions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", - "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", - "dev": true, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/bl": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/bl/-/bl-5.1.0.tgz", - "integrity": "sha512-tv1ZJHLfTDnXE6tMHv73YgSJaWR2AFuPwMntBe7XL/GBFHnT0CLnsHMogfk5+GzCDC5ZWarSCYaIGATZt9dNsQ==", - "dev": true, - "dependencies": { - "buffer": "^6.0.3", - "inherits": "^2.0.4", - "readable-stream": "^3.4.0" - } - }, - "node_modules/bl/node_modules/readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", - "dev": true, - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, "node_modules/bluebird": { "version": "3.4.7", "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.4.7.tgz", @@ -7251,12 +6866,6 @@ "node": ">=8" } }, - "node_modules/browser-stdout": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", - "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", - "dev": true - }, "node_modules/browserslist": { "version": "4.24.2", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.2.tgz", @@ -7310,30 +6919,6 @@ "node-int64": "^0.4.0" } }, - "node_modules/buffer": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz", - "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.2.1" - } - }, "node_modules/buffer-crc32": { "version": "0.2.13", "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", @@ -7361,31 +6946,6 @@ "node": ">= 0.8" } }, - "node_modules/c8": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/c8/-/c8-9.1.0.tgz", - "integrity": "sha512-mBWcT5iqNir1zIkzSPyI3NCR9EZCVI3WUD+AVO17MVWTSFNyUueXE82qTeampNtTr+ilN/5Ua3j24LgbCKjDVg==", - "dev": true, - "dependencies": { - "@bcoe/v8-coverage": "^0.2.3", - "@istanbuljs/schema": "^0.1.3", - "find-up": "^5.0.0", - "foreground-child": "^3.1.1", - "istanbul-lib-coverage": "^3.2.0", - "istanbul-lib-report": "^3.0.1", - "istanbul-reports": "^3.1.6", - "test-exclude": "^6.0.0", - "v8-to-istanbul": "^9.0.0", - "yargs": "^17.7.2", - "yargs-parser": "^21.1.1" - }, - "bin": { - "c8": "bin/c8.js" - }, - "engines": { - "node": ">=14.14.0" - } - }, "node_modules/call-bind": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", @@ -7595,33 +7155,6 @@ "node": ">=6" } }, - "node_modules/cli-cursor": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-4.0.0.tgz", - "integrity": "sha512-VGtlMu3x/4DOtIUwEkRezxUZ2lBacNJCHash0N0WeZDBS+7Ux1dm3XWAgWYxLJFMMdOeXMHXorshEFhbMSGelg==", - "dev": true, - "dependencies": { - "restore-cursor": "^4.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/cli-spinners": { - "version": "2.9.2", - "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz", - "integrity": "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", - "dev": true, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/cli-truncate": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-4.0.0.tgz", @@ -7980,18 +7513,6 @@ } } }, - "node_modules/decamelize": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-4.0.0.tgz", - "integrity": "sha512-9iE1PgSik9HeIIw2JO94IidnE3eBoQrFJ3w7sFuzSX4DpmZ3v5sZpUiV5Swcf6mQEF+Y0ru8Neo+p+nyh2J+hQ==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/dedent": { "version": "1.5.3", "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.5.3.tgz", @@ -9197,15 +8718,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/flat": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz", - "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", - "dev": true, - "bin": { - "flat": "cli.js" - } - }, "node_modules/flat-cache": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", @@ -9863,15 +9375,6 @@ "node": ">= 0.4" } }, - "node_modules/he": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", - "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", - "dev": true, - "bin": { - "he": "bin/he" - } - }, "node_modules/hosted-git-info": { "version": "2.8.9", "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", @@ -10169,18 +9672,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "dev": true, - "dependencies": { - "binary-extensions": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/is-boolean-object": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.2.0.tgz", @@ -10320,18 +9811,6 @@ "node": ">=0.10.0" } }, - "node_modules/is-interactive": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-2.0.0.tgz", - "integrity": "sha512-qP1vozQRI+BMOPcjFzrjXuQvdak2pHNUMZoeG2eRbiSqyvbEf/wQtEOTOX1guk6E3t36RkaqiSt8A/6YElNxLQ==", - "dev": true, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/is-map": { "version": "2.0.3", "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz", @@ -10389,15 +9868,6 @@ "node": ">=8" } }, - "node_modules/is-plain-obj": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz", - "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", - "dev": true, - "engines": { - "node": ">=8" - } - }, "node_modules/is-plain-object": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", @@ -10516,25 +9986,13 @@ "integrity": "sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw==", "dev": true, "dependencies": { - "which-typed-array": "^1.1.14" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-unicode-supported": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", - "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", - "dev": true, + "which-typed-array": "^1.1.14" + }, "engines": { - "node": ">=10" + "node": ">= 0.4" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/is-weakmap": { @@ -12077,22 +11535,6 @@ "integrity": "sha512-+WKqsK294HMSc2jEbNgpHpd0JfIBhp7rEV4aqXWqFr6AlXov+SlcgB1Fv01y2kGe3Gc8nMW7VA0SrGuSkRfIEg==", "dev": true }, - "node_modules/log-symbols": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", - "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", - "dev": true, - "dependencies": { - "chalk": "^4.1.0", - "is-unicode-supported": "^0.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/log-update": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/log-update/-/log-update-6.1.0.tgz", @@ -12480,183 +11922,6 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/mocha": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/mocha/-/mocha-11.1.0.tgz", - "integrity": "sha512-8uJR5RTC2NgpY3GrYcgpZrsEd9zKbPDpob1RezyR2upGHRQtHWofmzTMzTMSV6dru3tj5Ukt0+Vnq1qhFEEwAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-colors": "^4.1.3", - "browser-stdout": "^1.3.1", - "chokidar": "^3.5.3", - "debug": "^4.3.5", - "diff": "^5.2.0", - "escape-string-regexp": "^4.0.0", - "find-up": "^5.0.0", - "glob": "^10.4.5", - "he": "^1.2.0", - "js-yaml": "^4.1.0", - "log-symbols": "^4.1.0", - "minimatch": "^5.1.6", - "ms": "^2.1.3", - "serialize-javascript": "^6.0.2", - "strip-json-comments": "^3.1.1", - "supports-color": "^8.1.1", - "workerpool": "^6.5.1", - "yargs": "^17.7.2", - "yargs-parser": "^21.1.1", - "yargs-unparser": "^2.0.0" - }, - "bin": { - "_mocha": "bin/_mocha", - "mocha": "bin/mocha.js" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - } - }, - "node_modules/mocha/node_modules/chokidar": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", - "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", - "dev": true, - "dependencies": { - "anymatch": "~3.1.2", - "braces": "~3.0.2", - "glob-parent": "~5.1.2", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.6.0" - }, - "engines": { - "node": ">= 8.10.0" - }, - "funding": { - "url": "https://paulmillr.com/funding/" - }, - "optionalDependencies": { - "fsevents": "~2.3.2" - } - }, - "node_modules/mocha/node_modules/glob": { - "version": "10.4.5", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", - "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", - "dev": true, - "license": "ISC", - "dependencies": { - "foreground-child": "^3.1.0", - "jackspeak": "^3.1.2", - "minimatch": "^9.0.4", - "minipass": "^7.1.2", - "package-json-from-dist": "^1.0.0", - "path-scurry": "^1.11.1" - }, - "bin": { - "glob": "dist/esm/bin.mjs" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/mocha/node_modules/glob/node_modules/minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/mocha/node_modules/jackspeak": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", - "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", - "dev": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "@isaacs/cliui": "^8.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - }, - "optionalDependencies": { - "@pkgjs/parseargs": "^0.11.0" - } - }, - "node_modules/mocha/node_modules/lru-cache": { - "version": "10.4.3", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", - "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/mocha/node_modules/minimatch": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", - "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/mocha/node_modules/path-scurry": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", - "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", - "dev": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "lru-cache": "^10.2.0", - "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" - }, - "engines": { - "node": ">=16 || 14 >=14.18" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/mocha/node_modules/readdirp": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", - "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", - "dev": true, - "dependencies": { - "picomatch": "^2.2.1" - }, - "engines": { - "node": ">=8.10.0" - } - }, - "node_modules/mocha/node_modules/supports-color": { - "version": "8.1.1", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz", - "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, "node_modules/monaco-vscode-textmate-theme-converter": { "version": "0.1.7", "resolved": "https://registry.npmjs.org/monaco-vscode-textmate-theme-converter/-/monaco-vscode-textmate-theme-converter-0.1.7.tgz", @@ -12801,6 +12066,7 @@ "resolved": "https://registry.npmjs.org/npm-run-all/-/npm-run-all-4.1.5.tgz", "integrity": "sha512-Oo82gJDAVcaMdi3nuoKFavkIHBRVqQ1qvMb+9LHk/cF4P6B2m8aP04hGf7oL6wZ9BuGwX1onlLhpuoofSyoQDQ==", "dev": true, + "license": "MIT", "dependencies": { "ansi-styles": "^3.2.1", "chalk": "^2.4.1", @@ -13136,92 +12402,6 @@ "node": ">= 0.8.0" } }, - "node_modules/ora": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/ora/-/ora-7.0.1.tgz", - "integrity": "sha512-0TUxTiFJWv+JnjWm4o9yvuskpEJLXTcng8MJuKd+SzAzp2o+OP3HWqNhB4OdJRt1Vsd9/mR0oyaEYlOnL7XIRw==", - "dev": true, - "dependencies": { - "chalk": "^5.3.0", - "cli-cursor": "^4.0.0", - "cli-spinners": "^2.9.0", - "is-interactive": "^2.0.0", - "is-unicode-supported": "^1.3.0", - "log-symbols": "^5.1.0", - "stdin-discarder": "^0.1.0", - "string-width": "^6.1.0", - "strip-ansi": "^7.1.0" - }, - "engines": { - "node": ">=16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/ora/node_modules/chalk": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.3.0.tgz", - "integrity": "sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==", - "dev": true, - "engines": { - "node": "^12.17.0 || ^14.13 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/ora/node_modules/emoji-regex": { - "version": "10.4.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.4.0.tgz", - "integrity": "sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw==", - "dev": true - }, - "node_modules/ora/node_modules/is-unicode-supported": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-1.3.0.tgz", - "integrity": "sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ==", - "dev": true, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/ora/node_modules/log-symbols": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-5.1.0.tgz", - "integrity": "sha512-l0x2DvrW294C9uDCoQe1VSU4gf529FkSZ6leBl4TiqZH/e+0R7hSfHQBNut2mNygDgHwvYHfFLn6Oxb3VWj2rA==", - "dev": true, - "dependencies": { - "chalk": "^5.0.0", - "is-unicode-supported": "^1.1.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/ora/node_modules/string-width": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-6.1.0.tgz", - "integrity": "sha512-k01swCJAgQmuADB0YIc+7TuatfNvTBVOoaUWJjTB9R4VJzR5vNWzf5t42ESVZFPS8xTySF7CAdV4t/aaIm3UnQ==", - "dev": true, - "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^10.2.1", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/os-name": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/os-name/-/os-name-6.0.0.tgz", @@ -13895,15 +13075,6 @@ "resolved": "https://registry.npmjs.org/queue-tick/-/queue-tick-1.0.1.tgz", "integrity": "sha512-kJt5qhMxoszgU/62PLP1CJytzd2NKetjSRnyuj31fDd3Rlcz3fzlFdFLD1SItunPwyqEOkca6GbV612BWfaBag==" }, - "node_modules/randombytes": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", - "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", - "dev": true, - "dependencies": { - "safe-buffer": "^5.1.0" - } - }, "node_modules/raw-body": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-3.0.0.tgz", @@ -14152,28 +13323,6 @@ "node": ">=10" } }, - "node_modules/restore-cursor": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-4.0.0.tgz", - "integrity": "sha512-I9fPXU9geO9bHOt9pHHOhOkYerIMsmVaWB0rA2AI9ERh/+x/i7MV5HKBNrg+ljO5eoPVgCcnFuRjJ9uH6I/3eg==", - "dev": true, - "dependencies": { - "onetime": "^5.1.0", - "signal-exit": "^3.0.2" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/restore-cursor/node_modules/signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", - "dev": true - }, "node_modules/reusify": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", @@ -14409,15 +13558,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/serialize-javascript": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.2.tgz", - "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", - "dev": true, - "dependencies": { - "randombytes": "^2.1.0" - } - }, "node_modules/set-function-length": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", @@ -14750,21 +13890,6 @@ "node": ">= 0.8" } }, - "node_modules/stdin-discarder": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/stdin-discarder/-/stdin-discarder-0.1.0.tgz", - "integrity": "sha512-xhV7w8S+bUwlPTb4bAOUQhv8/cSS5offJuX8GQGq32ONF0ZtDWKfkdomM3HMRA+LhX6um/FZ0COqlwsjD53LeQ==", - "dev": true, - "dependencies": { - "bl": "^5.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/streamx": { "version": "2.21.0", "resolved": "https://registry.npmjs.org/streamx/-/streamx-2.21.0.tgz", @@ -15051,18 +14176,6 @@ "integrity": "sha512-nMIjMrd5Z2nuB2RZCKJfFMjgS3fygbeyGk9PxPPaJR1RIcyN9yn4A63Isovzm3ZtQuEkLBVgMdPup8UeLH7aQw==", "dev": true }, - "node_modules/supports-color": { - "version": "9.4.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-9.4.0.tgz", - "integrity": "sha512-VL+lNrEoIXww1coLPOmiEmK/0sGigko5COxI09KzHc2VJXJsQ37UaQ+8quuxjDeA7+KnLGTWRyOXSLLR2Wb4jw==", - "dev": true, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/supports-color?sponsor=1" - } - }, "node_modules/supports-preserve-symlinks-flag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", @@ -15966,12 +15079,6 @@ "node": ">=0.10.0" } }, - "node_modules/workerpool": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/workerpool/-/workerpool-6.5.1.tgz", - "integrity": "sha512-Fs4dNYcsdpYSAfVxhnl1L5zTksjvOJxtC5hzMNl+1t9B8hTJTdKDyZ5ju7ztgPy+ft9tBFXoOlDNiOT9WUXZlA==", - "dev": true - }, "node_modules/wrap-ansi": { "version": "8.1.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", @@ -16170,33 +15277,6 @@ "node": ">=12" } }, - "node_modules/yargs-unparser": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/yargs-unparser/-/yargs-unparser-2.0.0.tgz", - "integrity": "sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA==", - "dev": true, - "dependencies": { - "camelcase": "^6.0.0", - "decamelize": "^4.0.0", - "flat": "^5.0.2", - "is-plain-obj": "^2.1.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/yargs-unparser/node_modules/camelcase": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", - "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/yargs/node_modules/ansi-regex": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", diff --git a/package.json b/package.json index fe64af16d65..b9b62bd5e39 100644 --- a/package.json +++ b/package.json @@ -276,21 +276,24 @@ "scripts": { "build": "npm run build:webview && npm run vsix", "build:webview": "cd webview-ui && npm run build", - "changeset": "changeset", - "check-types": "tsc --noEmit && cd webview-ui && npm run check-types", "compile": "tsc -p . --outDir out && node esbuild.js", - "compile:integration": "tsc -p tsconfig.integration.json", - "install:all": "npm install && cd webview-ui && npm install", - "knip": "knip --include files", - "lint": "eslint src --ext ts && npm run lint --prefix webview-ui", - "lint-local": "eslint -c .eslintrc.local.json src --ext ts && npm run lint --prefix webview-ui", - "lint-fix": "eslint src --ext ts --fix && npm run lint-fix --prefix webview-ui", - "lint-fix-local": "eslint -c .eslintrc.local.json src --ext ts --fix && npm run lint-fix --prefix webview-ui", + "install:all": "npm-run-all -p install-*", + "install:ci": "npm install npm-run-all && npm run install:all", + "install-extension": "npm install", + "install-webview-ui": "cd webview-ui && npm install", + "install-e2e": "cd e2e && npm install", + "lint": "npm-run-all -p lint:*", + "lint:extension": "eslint src --ext ts", + "lint:webview-ui": "cd webview-ui && npm run lint", + "lint:e2e": "cd e2e && npm run lint", + "check-types": "npm-run-all -p check-types:*", + "check-types:extension": "tsc --noEmit", + "check-types:webview-ui": "cd webview-ui && npm run check-types", + "check-types:e2e": "cd e2e && npm run check-types", "package": "npm run build:webview && npm run check-types && npm run lint && node esbuild.js --production", - "pretest": "npm run compile && npm run compile:integration", + "pretest": "npm run compile", "dev": "cd webview-ui && npm run dev", "test": "jest && cd webview-ui && npm run test", - "test:integration": "npm run build && npm run compile:integration && npx dotenvx run -f .env.integration -- node ./out-integration/test/runTest.js", "prepare": "husky", "publish:marketplace": "vsce publish && ovsx publish", "publish": "npm run build && changeset publish && npm install --package-lock-only", @@ -300,7 +303,9 @@ "watch": "npm-run-all -p watch:*", "watch:esbuild": "node esbuild.js --watch", "watch:tsc": "tsc --noEmit --watch --project tsconfig.json", - "watch-tests": "tsc -p . -w --outDir out" + "watch-tests": "tsc -p . -w --outDir out", + "changeset": "changeset", + "knip": "knip --include files" }, "dependencies": { "@anthropic-ai/bedrock-sdk": "^0.10.2", @@ -358,13 +363,10 @@ "@types/diff-match-patch": "^1.0.36", "@types/glob": "^8.1.0", "@types/jest": "^29.5.14", - "@types/mocha": "^10.0.10", "@types/node": "20.x", "@types/string-similarity": "^4.0.2", "@typescript-eslint/eslint-plugin": "^7.14.1", "@typescript-eslint/parser": "^7.11.0", - "@vscode/test-cli": "^0.0.9", - "@vscode/test-electron": "^2.4.0", "esbuild": "^0.24.0", "eslint": "^8.57.0", "glob": "^11.0.1", @@ -374,7 +376,6 @@ "knip": "^5.44.4", "lint-staged": "^15.2.11", "mkdirp": "^3.0.1", - "mocha": "^11.1.0", "npm-run-all": "^4.1.5", "prettier": "^3.4.2", "rimraf": "^6.0.1", diff --git a/src/exports/cline.d.ts b/src/exports/cline.d.ts index fcf93fc10d0..e529947b6b4 100644 --- a/src/exports/cline.d.ts +++ b/src/exports/cline.d.ts @@ -40,3 +40,96 @@ export interface ClineAPI { */ sidebarProvider: ClineSidebarProvider } + +export interface ClineProvider { + readonly context: vscode.ExtensionContext + readonly viewLaunched: boolean + readonly messages: ClineMessage[] + + /** + * Resolves the webview view for the provider + * @param webviewView The webview view or panel to resolve + */ + resolveWebviewView(webviewView: vscode.WebviewView | vscode.WebviewPanel): Promise + + /** + * Initializes Cline with a task + */ + initClineWithTask(task?: string, images?: string[]): Promise + + /** + * Initializes Cline with a history item + */ + initClineWithHistoryItem(historyItem: HistoryItem): Promise + + /** + * Posts a message to the webview + */ + postMessageToWebview(message: ExtensionMessage): Promise + + /** + * Handles mode switching + */ + handleModeSwitch(newMode: Mode): Promise + + /** + * Updates custom instructions + */ + updateCustomInstructions(instructions?: string): Promise + + /** + * Cancels the current task + */ + cancelTask(): Promise + + /** + * Clears the current task + */ + clearTask(): Promise + + /** + * Gets the current state + */ + getState(): Promise + + /** + * Updates a value in the global state + * @param key The key to update + * @param value The value to set + */ + updateGlobalState(key: GlobalStateKey, value: any): Promise + + /** + * Gets a value from the global state + * @param key The key to get + */ + getGlobalState(key: GlobalStateKey): Promise + + /** + * Stores a secret value in secure storage + * @param key The key to store the secret under + * @param value The secret value to store, or undefined to remove the secret + */ + storeSecret(key: SecretKey, value?: string): Promise + + /** + * Retrieves a secret value from secure storage + * @param key The key of the secret to retrieve + */ + getSecret(key: SecretKey): Promise + + /** + * Resets the state + */ + resetState(): Promise + + /** + * Logs a message + */ + log(message: string): void + + /** + * Disposes of the provider + */ + dispose(): Promise +} diff --git a/src/shared/modes.ts b/src/shared/modes.ts index ceb1e3e11fe..2eda966519c 100644 --- a/src/shared/modes.ts +++ b/src/shared/modes.ts @@ -36,7 +36,11 @@ export type CustomModePrompts = { // Helper to extract group name regardless of format export function getGroupName(group: GroupEntry): ToolGroup { - return Array.isArray(group) ? group[0] : group + if (typeof group === "string") { + return group + } + + return group[0] } // Helper to get group options if they exist From 481d613bc8701d880af5af598f5c0d9d75e00935 Mon Sep 17 00:00:00 2001 From: Chris Estreich Date: Fri, 28 Feb 2025 21:34:53 -0800 Subject: [PATCH 120/145] Estimate the number of tokens in the last message for sliding window math --- e2e/tsconfig.json | 2 +- package-lock.json | 9 + package.json | 1 + .../__tests__/sliding-window.test.ts | 224 ++++++++++++++++-- src/core/sliding-window/index.ts | 70 +++++- 5 files changed, 276 insertions(+), 30 deletions(-) diff --git a/e2e/tsconfig.json b/e2e/tsconfig.json index 99581107673..792acb14a0d 100644 --- a/e2e/tsconfig.json +++ b/e2e/tsconfig.json @@ -11,6 +11,6 @@ "useUnknownInCatchVariables": false, "outDir": "out" }, - "include": ["src", "../src/exports"], + "include": ["src", "../src/exports/cline.d.ts"], "exclude": [".vscode-test", "**/node_modules/**", "out"] } diff --git a/package-lock.json b/package-lock.json index a8fa18b5f1b..0b24ce6664c 100644 --- a/package-lock.json +++ b/package-lock.json @@ -34,6 +34,7 @@ "get-folder-size": "^5.0.0", "globby": "^14.0.2", "isbinaryfile": "^5.0.2", + "js-tiktoken": "^1.0.19", "mammoth": "^1.8.0", "monaco-vscode-textmate-theme-converter": "^0.1.7", "openai": "^4.78.1", @@ -10909,6 +10910,14 @@ "jiti": "lib/jiti-cli.mjs" } }, + "node_modules/js-tiktoken": { + "version": "1.0.19", + "resolved": "https://registry.npmjs.org/js-tiktoken/-/js-tiktoken-1.0.19.tgz", + "integrity": "sha512-XC63YQeEcS47Y53gg950xiZ4IWmkfMe4p2V9OSaBt26q+p47WHn18izuXzSclCI73B7yGqtfRsT6jcZQI0y08g==", + "dependencies": { + "base64-js": "^1.5.1" + } + }, "node_modules/js-tokens": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", diff --git a/package.json b/package.json index b9b62bd5e39..6f5699a7c7f 100644 --- a/package.json +++ b/package.json @@ -348,6 +348,7 @@ "sound-play": "^1.1.0", "string-similarity": "^4.0.4", "strip-ansi": "^7.1.0", + "js-tiktoken": "^1.0.19", "tmp": "^0.2.3", "tree-sitter-wasms": "^0.1.11", "turndown": "^7.2.0", diff --git a/src/core/sliding-window/__tests__/sliding-window.test.ts b/src/core/sliding-window/__tests__/sliding-window.test.ts index cb897aa8cb0..74b734d7381 100644 --- a/src/core/sliding-window/__tests__/sliding-window.test.ts +++ b/src/core/sliding-window/__tests__/sliding-window.test.ts @@ -3,7 +3,7 @@ import { Anthropic } from "@anthropic-ai/sdk" import { ModelInfo } from "../../../shared/api" -import { truncateConversation, truncateConversationIfNeeded } from "../index" +import { estimateTokenCount, truncateConversation, truncateConversationIfNeeded } from "../index" /** * Tests for the truncateConversation function @@ -118,23 +118,26 @@ describe("getMaxTokens", () => { const modelInfo = createModelInfo(100000, 50000) // Max tokens = 100000 - 50000 = 50000 + // Create messages with very small content in the last one to avoid token overflow + const messagesWithSmallContent = [...messages.slice(0, -1), { ...messages[messages.length - 1], content: "" }] + // Below max tokens - no truncation const result1 = truncateConversationIfNeeded({ - messages, + messages: messagesWithSmallContent, totalTokens: 49999, contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, }) - expect(result1).toEqual(messages) + expect(result1).toEqual(messagesWithSmallContent) // Above max tokens - truncate const result2 = truncateConversationIfNeeded({ - messages, + messages: messagesWithSmallContent, totalTokens: 50001, contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, }) - expect(result2).not.toEqual(messages) + expect(result2).not.toEqual(messagesWithSmallContent) expect(result2.length).toBe(3) // Truncated with 0.5 fraction }) @@ -142,23 +145,26 @@ describe("getMaxTokens", () => { const modelInfo = createModelInfo(100000, undefined) // Max tokens = 100000 - (100000 * 0.2) = 80000 + // Create messages with very small content in the last one to avoid token overflow + const messagesWithSmallContent = [...messages.slice(0, -1), { ...messages[messages.length - 1], content: "" }] + // Below max tokens - no truncation const result1 = truncateConversationIfNeeded({ - messages, + messages: messagesWithSmallContent, totalTokens: 79999, contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, }) - expect(result1).toEqual(messages) + expect(result1).toEqual(messagesWithSmallContent) // Above max tokens - truncate const result2 = truncateConversationIfNeeded({ - messages, + messages: messagesWithSmallContent, totalTokens: 80001, contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, }) - expect(result2).not.toEqual(messages) + expect(result2).not.toEqual(messagesWithSmallContent) expect(result2.length).toBe(3) // Truncated with 0.5 fraction }) @@ -166,23 +172,26 @@ describe("getMaxTokens", () => { const modelInfo = createModelInfo(50000, 10000) // Max tokens = 50000 - 10000 = 40000 + // Create messages with very small content in the last one to avoid token overflow + const messagesWithSmallContent = [...messages.slice(0, -1), { ...messages[messages.length - 1], content: "" }] + // Below max tokens - no truncation const result1 = truncateConversationIfNeeded({ - messages, + messages: messagesWithSmallContent, totalTokens: 39999, contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, }) - expect(result1).toEqual(messages) + expect(result1).toEqual(messagesWithSmallContent) // Above max tokens - truncate const result2 = truncateConversationIfNeeded({ - messages, + messages: messagesWithSmallContent, totalTokens: 40001, contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, }) - expect(result2).not.toEqual(messages) + expect(result2).not.toEqual(messagesWithSmallContent) expect(result2.length).toBe(3) // Truncated with 0.5 fraction }) @@ -190,23 +199,26 @@ describe("getMaxTokens", () => { const modelInfo = createModelInfo(200000, 30000) // Max tokens = 200000 - 30000 = 170000 + // Create messages with very small content in the last one to avoid token overflow + const messagesWithSmallContent = [...messages.slice(0, -1), { ...messages[messages.length - 1], content: "" }] + // Below max tokens - no truncation const result1 = truncateConversationIfNeeded({ - messages, + messages: messagesWithSmallContent, totalTokens: 169999, contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, }) - expect(result1).toEqual(messages) + expect(result1).toEqual(messagesWithSmallContent) // Above max tokens - truncate const result2 = truncateConversationIfNeeded({ - messages, + messages: messagesWithSmallContent, totalTokens: 170001, contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, }) - expect(result2).not.toEqual(messages) + expect(result2).not.toEqual(messagesWithSmallContent) expect(result2.length).toBe(3) // Truncated with 0.5 fraction }) }) @@ -234,13 +246,16 @@ describe("truncateConversationIfNeeded", () => { const maxTokens = 100000 - 30000 // 70000 const totalTokens = 69999 // Below threshold + // Create messages with very small content in the last one to avoid token overflow + const messagesWithSmallContent = [...messages.slice(0, -1), { ...messages[messages.length - 1], content: "" }] + const result = truncateConversationIfNeeded({ - messages, + messages: messagesWithSmallContent, totalTokens, contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, }) - expect(result).toEqual(messages) // No truncation occurs + expect(result).toEqual(messagesWithSmallContent) // No truncation occurs }) it("should truncate if tokens are above max tokens threshold", () => { @@ -248,12 +263,15 @@ describe("truncateConversationIfNeeded", () => { const maxTokens = 100000 - 30000 // 70000 const totalTokens = 70001 // Above threshold + // Create messages with very small content in the last one to avoid token overflow + const messagesWithSmallContent = [...messages.slice(0, -1), { ...messages[messages.length - 1], content: "" }] + // When truncating, always uses 0.5 fraction // With 4 messages after the first, 0.5 fraction means remove 2 messages - const expectedResult = [messages[0], messages[3], messages[4]] + const expectedResult = [messagesWithSmallContent[0], messagesWithSmallContent[3], messagesWithSmallContent[4]] const result = truncateConversationIfNeeded({ - messages, + messages: messagesWithSmallContent, totalTokens, contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, @@ -266,18 +284,21 @@ describe("truncateConversationIfNeeded", () => { const modelInfo1 = createModelInfo(100000, true, 30000) const modelInfo2 = createModelInfo(100000, false, 30000) + // Create messages with very small content in the last one to avoid token overflow + const messagesWithSmallContent = [...messages.slice(0, -1), { ...messages[messages.length - 1], content: "" }] + // Test below threshold const belowThreshold = 69999 expect( truncateConversationIfNeeded({ - messages, + messages: messagesWithSmallContent, totalTokens: belowThreshold, contextWindow: modelInfo1.contextWindow, maxTokens: modelInfo1.maxTokens, }), ).toEqual( truncateConversationIfNeeded({ - messages, + messages: messagesWithSmallContent, totalTokens: belowThreshold, contextWindow: modelInfo2.contextWindow, maxTokens: modelInfo2.maxTokens, @@ -288,18 +309,171 @@ describe("truncateConversationIfNeeded", () => { const aboveThreshold = 70001 expect( truncateConversationIfNeeded({ - messages, + messages: messagesWithSmallContent, totalTokens: aboveThreshold, contextWindow: modelInfo1.contextWindow, maxTokens: modelInfo1.maxTokens, }), ).toEqual( truncateConversationIfNeeded({ - messages, + messages: messagesWithSmallContent, totalTokens: aboveThreshold, contextWindow: modelInfo2.contextWindow, maxTokens: modelInfo2.maxTokens, }), ) }) + + it("should consider incoming content when deciding to truncate", () => { + const modelInfo = createModelInfo(100000, true, 30000) + const maxTokens = 30000 + const availableTokens = modelInfo.contextWindow - maxTokens + + // Test case 1: Small content that won't push us over the threshold + const smallContent = [{ type: "text" as const, text: "Small content" }] + const smallContentTokens = estimateTokenCount(smallContent) + const messagesWithSmallContent: Anthropic.Messages.MessageParam[] = [ + ...messages.slice(0, -1), + { role: messages[messages.length - 1].role, content: smallContent }, + ] + + // Set base tokens so total is below threshold even with small content added + const baseTokensForSmall = availableTokens - smallContentTokens - 10 + const resultWithSmall = truncateConversationIfNeeded({ + messages: messagesWithSmallContent, + totalTokens: baseTokensForSmall, + contextWindow: modelInfo.contextWindow, + maxTokens, + }) + expect(resultWithSmall).toEqual(messagesWithSmallContent) // No truncation + + // Test case 2: Large content that will push us over the threshold + const largeContent = [ + { + type: "text" as const, + text: "A very large incoming message that would consume a significant number of tokens and push us over the threshold", + }, + ] + const largeContentTokens = estimateTokenCount(largeContent) + const messagesWithLargeContent: Anthropic.Messages.MessageParam[] = [ + ...messages.slice(0, -1), + { role: messages[messages.length - 1].role, content: largeContent }, + ] + + // Set base tokens so we're just below threshold without content, but over with content + const baseTokensForLarge = availableTokens - Math.floor(largeContentTokens / 2) + const resultWithLarge = truncateConversationIfNeeded({ + messages: messagesWithLargeContent, + totalTokens: baseTokensForLarge, + contextWindow: modelInfo.contextWindow, + maxTokens, + }) + expect(resultWithLarge).not.toEqual(messagesWithLargeContent) // Should truncate + + // Test case 3: Very large content that will definitely exceed threshold + const veryLargeContent = [{ type: "text" as const, text: "X".repeat(1000) }] + const veryLargeContentTokens = estimateTokenCount(veryLargeContent) + const messagesWithVeryLargeContent: Anthropic.Messages.MessageParam[] = [ + ...messages.slice(0, -1), + { role: messages[messages.length - 1].role, content: veryLargeContent }, + ] + + // Set base tokens so we're just below threshold without content + const baseTokensForVeryLarge = availableTokens - Math.floor(veryLargeContentTokens / 2) + const resultWithVeryLarge = truncateConversationIfNeeded({ + messages: messagesWithVeryLargeContent, + totalTokens: baseTokensForVeryLarge, + contextWindow: modelInfo.contextWindow, + maxTokens, + }) + expect(resultWithVeryLarge).not.toEqual(messagesWithVeryLargeContent) // Should truncate + }) +}) +/** + * Tests for the estimateTokenCount function + */ +describe("estimateTokenCount", () => { + it("should return 0 for empty or undefined content", () => { + expect(estimateTokenCount([])).toBe(0) + // @ts-ignore - Testing with undefined + expect(estimateTokenCount(undefined)).toBe(0) + }) + + it("should estimate tokens for text blocks", () => { + const content: Array = [ + { type: "text", text: "This is a text block with 36 characters" }, + ] + + // With tiktoken, the exact token count may differ from character-based estimation + // Instead of expecting an exact number, we verify it's a reasonable positive number + const result = estimateTokenCount(content) + expect(result).toBeGreaterThan(0) + + // We can also verify that longer text results in more tokens + const longerContent: Array = [ + { + type: "text", + text: "This is a longer text block with significantly more characters to encode into tokens", + }, + ] + const longerResult = estimateTokenCount(longerContent) + expect(longerResult).toBeGreaterThan(result) + }) + + it("should estimate tokens for image blocks based on data size", () => { + // Small image + const smallImage: Array = [ + { type: "image", source: { type: "base64", media_type: "image/jpeg", data: "small_dummy_data" } }, + ] + // Larger image with more data + const largerImage: Array = [ + { type: "image", source: { type: "base64", media_type: "image/png", data: "X".repeat(1000) } }, + ] + + // Verify the token count scales with the size of the image data + const smallImageTokens = estimateTokenCount(smallImage) + const largerImageTokens = estimateTokenCount(largerImage) + + // Small image should have some tokens + expect(smallImageTokens).toBeGreaterThan(0) + + // Larger image should have proportionally more tokens + expect(largerImageTokens).toBeGreaterThan(smallImageTokens) + + // Verify the larger image calculation matches our formula including the 50% fudge factor + expect(largerImageTokens).toBe(48) + }) + + it("should estimate tokens for mixed content blocks", () => { + const content: Array = [ + { type: "text", text: "A text block with 30 characters" }, + { type: "image", source: { type: "base64", media_type: "image/jpeg", data: "dummy_data" } }, + { type: "text", text: "Another text with 24 chars" }, + ] + + // We know image tokens calculation should be consistent + const imageTokens = Math.ceil(Math.sqrt("dummy_data".length)) * 1.5 + + // With tiktoken, we can't predict exact text token counts, + // but we can verify the total is greater than just the image tokens + const result = estimateTokenCount(content) + expect(result).toBeGreaterThan(imageTokens) + + // Also test against a version with only the image to verify text adds tokens + const imageOnlyContent: Array = [ + { type: "image", source: { type: "base64", media_type: "image/jpeg", data: "dummy_data" } }, + ] + const imageOnlyResult = estimateTokenCount(imageOnlyContent) + expect(result).toBeGreaterThan(imageOnlyResult) + }) + + it("should handle empty text blocks", () => { + const content: Array = [{ type: "text", text: "" }] + expect(estimateTokenCount(content)).toBe(0) + }) + + it("should handle plain string messages", () => { + const content = "This is a plain text message" + expect(estimateTokenCount([{ type: "text", text: content }])).toBeGreaterThan(0) + }) }) diff --git a/src/core/sliding-window/index.ts b/src/core/sliding-window/index.ts index 8b646f933b9..0fb26ac38f2 100644 --- a/src/core/sliding-window/index.ts +++ b/src/core/sliding-window/index.ts @@ -1,5 +1,51 @@ import { Anthropic } from "@anthropic-ai/sdk" +import { Tiktoken } from "js-tiktoken/lite" +import o200kBase from "js-tiktoken/ranks/o200k_base" + +const TOKEN_FUDGE_FACTOR = 1.5 + +/** + * Counts tokens for user content using tiktoken for text + * and a size-based calculation for images. + * + * @param {Array} content - The content to count tokens for + * @returns {number} The token count + */ +export function estimateTokenCount(content: Array): number { + if (!content || content.length === 0) return 0 + + let totalTokens = 0 + let encoder = null + + // Create encoder + encoder = new Tiktoken(o200kBase) + + // Process each content block + for (const block of content) { + if (block.type === "text") { + // Use tiktoken for text token counting + const text = block.text || "" + if (text.length > 0) { + const tokens = encoder.encode(text) + totalTokens += tokens.length + } + } else if (block.type === "image") { + // For images, calculate based on data size + const imageSource = block.source + if (imageSource && typeof imageSource === "object" && "data" in imageSource) { + const base64Data = imageSource.data as string + totalTokens += Math.ceil(Math.sqrt(base64Data.length)) + } else { + totalTokens += 300 // Conservative estimate for unknown images + } + } + } + + // Add a fudge factor to account for the fact that tiktoken is not always accurate + return Math.ceil(totalTokens * TOKEN_FUDGE_FACTOR) +} + /** * Truncates a conversation by removing a fraction of the messages. * @@ -25,10 +71,10 @@ export function truncateConversation( /** * Conditionally truncates the conversation messages if the total token count - * exceeds the model's limit. + * exceeds the model's limit, considering the size of incoming content. * * @param {Anthropic.Messages.MessageParam[]} messages - The conversation messages. - * @param {number} totalTokens - The total number of tokens in the conversation. + * @param {number} totalTokens - The total number of tokens in the conversation (excluding the last user message). * @param {number} contextWindow - The context window size. * @param {number} maxTokens - The maximum number of tokens allowed. * @returns {Anthropic.Messages.MessageParam[]} The original or truncated conversation messages. @@ -47,6 +93,22 @@ export function truncateConversationIfNeeded({ contextWindow, maxTokens, }: TruncateOptions): Anthropic.Messages.MessageParam[] { - const allowedTokens = contextWindow - (maxTokens || contextWindow * 0.2) - return totalTokens < allowedTokens ? messages : truncateConversation(messages, 0.5) + // Calculate the maximum tokens reserved for response + const reservedTokens = maxTokens || contextWindow * 0.2 + + // Estimate tokens for the last message (which is always a user message) + const lastMessage = messages[messages.length - 1] + const lastMessageContent = lastMessage.content + const lastMessageTokens = Array.isArray(lastMessageContent) + ? estimateTokenCount(lastMessageContent) + : estimateTokenCount([{ type: "text", text: lastMessageContent as string }]) + + // Calculate total effective tokens (totalTokens never includes the last message) + const effectiveTokens = totalTokens + lastMessageTokens + + // Calculate available tokens for conversation history + const allowedTokens = contextWindow - reservedTokens + + // Determine if truncation is needed and apply if necessary + return effectiveTokens < allowedTokens ? messages : truncateConversation(messages, 0.5) } From e009d5f14c81bd8b89da288f1d43adfe1e1af544 Mon Sep 17 00:00:00 2001 From: Chris Estreich Date: Fri, 28 Feb 2025 21:40:47 -0800 Subject: [PATCH 121/145] Fix e2e test paths --- e2e/.vscode-test.mjs | 2 +- e2e/package.json | 2 +- e2e/src/runTest.ts | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/e2e/.vscode-test.mjs b/e2e/.vscode-test.mjs index 16ea4271bae..ccc8b495ea9 100644 --- a/e2e/.vscode-test.mjs +++ b/e2e/.vscode-test.mjs @@ -6,7 +6,7 @@ import { defineConfig } from '@vscode/test-cli'; export default defineConfig({ label: 'integrationTest', - files: 'out/e2e/src/suite/**/*.test.js', + files: 'out/suite/**/*.test.js', workspaceFolder: '.', mocha: { ui: 'tdd', diff --git a/e2e/package.json b/e2e/package.json index 7e0daac064d..630a13f0e7e 100644 --- a/e2e/package.json +++ b/e2e/package.json @@ -7,7 +7,7 @@ "compile": "tsc -p tsconfig.json", "lint": "eslint src --ext ts", "check-types": "tsc --noEmit", - "test": "npm run compile && npx dotenvx run -f .env.integration -- node ./out/e2e/src/runTest.js", + "test": "npm run compile && npx dotenvx run -f .env.integration -- node ./out/runTest.js", "ci": "npm run build && npm run test" }, "dependencies": {}, diff --git a/e2e/src/runTest.ts b/e2e/src/runTest.ts index 28545efd46b..2576c6072f1 100644 --- a/e2e/src/runTest.ts +++ b/e2e/src/runTest.ts @@ -6,7 +6,7 @@ async function main() { try { // The folder containing the Extension Manifest package.json // Passed to `--extensionDevelopmentPath` - const extensionDevelopmentPath = path.resolve(__dirname, "../../../../") + const extensionDevelopmentPath = path.resolve(__dirname, "../../") // The path to the extension test script // Passed to --extensionTestsPath From 764d963c104be512187ee9cb600029952c58d0a0 Mon Sep 17 00:00:00 2001 From: Matt Rubens Date: Sat, 1 Mar 2025 08:49:10 -0500 Subject: [PATCH 122/145] Add a 5k token buffer before the end of the context window --- .../__tests__/sliding-window.test.ts | 54 +++++++++++++------ src/core/sliding-window/index.ts | 6 ++- 2 files changed, 42 insertions(+), 18 deletions(-) diff --git a/src/core/sliding-window/__tests__/sliding-window.test.ts b/src/core/sliding-window/__tests__/sliding-window.test.ts index 74b734d7381..dbc0c678c2e 100644 --- a/src/core/sliding-window/__tests__/sliding-window.test.ts +++ b/src/core/sliding-window/__tests__/sliding-window.test.ts @@ -3,7 +3,7 @@ import { Anthropic } from "@anthropic-ai/sdk" import { ModelInfo } from "../../../shared/api" -import { estimateTokenCount, truncateConversation, truncateConversationIfNeeded } from "../index" +import { TOKEN_BUFFER, estimateTokenCount, truncateConversation, truncateConversationIfNeeded } from "../index" /** * Tests for the truncateConversation function @@ -121,10 +121,10 @@ describe("getMaxTokens", () => { // Create messages with very small content in the last one to avoid token overflow const messagesWithSmallContent = [...messages.slice(0, -1), { ...messages[messages.length - 1], content: "" }] - // Below max tokens - no truncation + // Below max tokens and buffer - no truncation const result1 = truncateConversationIfNeeded({ messages: messagesWithSmallContent, - totalTokens: 49999, + totalTokens: 44999, // Well below threshold + buffer contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, }) @@ -133,7 +133,7 @@ describe("getMaxTokens", () => { // Above max tokens - truncate const result2 = truncateConversationIfNeeded({ messages: messagesWithSmallContent, - totalTokens: 50001, + totalTokens: 50001, // Above threshold contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, }) @@ -148,10 +148,10 @@ describe("getMaxTokens", () => { // Create messages with very small content in the last one to avoid token overflow const messagesWithSmallContent = [...messages.slice(0, -1), { ...messages[messages.length - 1], content: "" }] - // Below max tokens - no truncation + // Below max tokens and buffer - no truncation const result1 = truncateConversationIfNeeded({ messages: messagesWithSmallContent, - totalTokens: 79999, + totalTokens: 74999, // Well below threshold + buffer contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, }) @@ -160,7 +160,7 @@ describe("getMaxTokens", () => { // Above max tokens - truncate const result2 = truncateConversationIfNeeded({ messages: messagesWithSmallContent, - totalTokens: 80001, + totalTokens: 80001, // Above threshold contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, }) @@ -175,10 +175,10 @@ describe("getMaxTokens", () => { // Create messages with very small content in the last one to avoid token overflow const messagesWithSmallContent = [...messages.slice(0, -1), { ...messages[messages.length - 1], content: "" }] - // Below max tokens - no truncation + // Below max tokens and buffer - no truncation const result1 = truncateConversationIfNeeded({ messages: messagesWithSmallContent, - totalTokens: 39999, + totalTokens: 34999, // Well below threshold + buffer contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, }) @@ -187,7 +187,7 @@ describe("getMaxTokens", () => { // Above max tokens - truncate const result2 = truncateConversationIfNeeded({ messages: messagesWithSmallContent, - totalTokens: 40001, + totalTokens: 40001, // Above threshold contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, }) @@ -202,10 +202,10 @@ describe("getMaxTokens", () => { // Create messages with very small content in the last one to avoid token overflow const messagesWithSmallContent = [...messages.slice(0, -1), { ...messages[messages.length - 1], content: "" }] - // Below max tokens - no truncation + // Below max tokens and buffer - no truncation const result1 = truncateConversationIfNeeded({ messages: messagesWithSmallContent, - totalTokens: 169999, + totalTokens: 164999, // Well below threshold + buffer contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, }) @@ -214,7 +214,7 @@ describe("getMaxTokens", () => { // Above max tokens - truncate const result2 = truncateConversationIfNeeded({ messages: messagesWithSmallContent, - totalTokens: 170001, + totalTokens: 170001, // Above threshold contextWindow: modelInfo.contextWindow, maxTokens: modelInfo.maxTokens, }) @@ -244,7 +244,7 @@ describe("truncateConversationIfNeeded", () => { it("should not truncate if tokens are below max tokens threshold", () => { const modelInfo = createModelInfo(100000, true, 30000) const maxTokens = 100000 - 30000 // 70000 - const totalTokens = 69999 // Below threshold + const totalTokens = 64999 // Well below threshold + buffer // Create messages with very small content in the last one to avoid token overflow const messagesWithSmallContent = [...messages.slice(0, -1), { ...messages[messages.length - 1], content: "" }] @@ -337,8 +337,8 @@ describe("truncateConversationIfNeeded", () => { { role: messages[messages.length - 1].role, content: smallContent }, ] - // Set base tokens so total is below threshold even with small content added - const baseTokensForSmall = availableTokens - smallContentTokens - 10 + // Set base tokens so total is well below threshold + buffer even with small content added + const baseTokensForSmall = availableTokens - smallContentTokens - TOKEN_BUFFER - 10 const resultWithSmall = truncateConversationIfNeeded({ messages: messagesWithSmallContent, totalTokens: baseTokensForSmall, @@ -388,7 +388,29 @@ describe("truncateConversationIfNeeded", () => { }) expect(resultWithVeryLarge).not.toEqual(messagesWithVeryLargeContent) // Should truncate }) + + it("should truncate if tokens are within TOKEN_BUFFER of the threshold", () => { + const modelInfo = createModelInfo(100000, true, 30000) + const maxTokens = 100000 - 30000 // 70000 + const totalTokens = 66000 // Within 5000 of threshold (70000) + + // Create messages with very small content in the last one to avoid token overflow + const messagesWithSmallContent = [...messages.slice(0, -1), { ...messages[messages.length - 1], content: "" }] + + // When truncating, always uses 0.5 fraction + // With 4 messages after the first, 0.5 fraction means remove 2 messages + const expectedResult = [messagesWithSmallContent[0], messagesWithSmallContent[3], messagesWithSmallContent[4]] + + const result = truncateConversationIfNeeded({ + messages: messagesWithSmallContent, + totalTokens, + contextWindow: modelInfo.contextWindow, + maxTokens: modelInfo.maxTokens, + }) + expect(result).toEqual(expectedResult) + }) }) + /** * Tests for the estimateTokenCount function */ diff --git a/src/core/sliding-window/index.ts b/src/core/sliding-window/index.ts index 0fb26ac38f2..d12e7f337ec 100644 --- a/src/core/sliding-window/index.ts +++ b/src/core/sliding-window/index.ts @@ -3,7 +3,8 @@ import { Anthropic } from "@anthropic-ai/sdk" import { Tiktoken } from "js-tiktoken/lite" import o200kBase from "js-tiktoken/ranks/o200k_base" -const TOKEN_FUDGE_FACTOR = 1.5 +export const TOKEN_FUDGE_FACTOR = 1.5 +export const TOKEN_BUFFER = 5000 /** * Counts tokens for user content using tiktoken for text @@ -110,5 +111,6 @@ export function truncateConversationIfNeeded({ const allowedTokens = contextWindow - reservedTokens // Determine if truncation is needed and apply if necessary - return effectiveTokens < allowedTokens ? messages : truncateConversation(messages, 0.5) + // Truncate if we're within TOKEN_BUFFER of the limit + return effectiveTokens > allowedTokens - TOKEN_BUFFER ? truncateConversation(messages, 0.5) : messages } From 251b83629aba03727ce12a598e1bbad84eafa678 Mon Sep 17 00:00:00 2001 From: aheizi Date: Fri, 28 Feb 2025 21:52:48 +0800 Subject: [PATCH 123/145] feat: add shortcut to switch modes --- .../src/components/chat/ChatTextArea.tsx | 7 ++++ webview-ui/src/components/chat/ChatView.tsx | 32 +++++++++++++++++++ .../chat/__tests__/ChatTextArea.test.tsx | 1 + 3 files changed, 40 insertions(+) diff --git a/webview-ui/src/components/chat/ChatTextArea.tsx b/webview-ui/src/components/chat/ChatTextArea.tsx index dcbe0851477..a2af6a3a71e 100644 --- a/webview-ui/src/components/chat/ChatTextArea.tsx +++ b/webview-ui/src/components/chat/ChatTextArea.tsx @@ -31,6 +31,7 @@ interface ChatTextAreaProps { onHeightChange?: (height: number) => void mode: Mode setMode: (value: Mode) => void + modeShortcutText: string } const ChatTextArea = forwardRef( @@ -48,6 +49,7 @@ const ChatTextArea = forwardRef( onHeightChange, mode, setMode, + modeShortcutText, }, ref, ) => { @@ -816,6 +818,11 @@ const ChatTextArea = forwardRef( minWidth: "70px", flex: "0 0 auto", }}> + {getAllModes(customModes).map((mode) => (