diff --git a/.changeset/legal-frogs-agree.md b/.changeset/legal-frogs-agree.md new file mode 100644 index 00000000000..645437627b0 --- /dev/null +++ b/.changeset/legal-frogs-agree.md @@ -0,0 +1,54 @@ +--- +"@kilocode/cli": minor +"kilo-code": minor +--- + +Include changes from Roo Code v3.29.0-v3.30.0 + +- Add token-budget based file reading with intelligent preview to avoid context overruns (thanks @daniel-lxs!) +- Fix: Respect nested .gitignore files in search_files (#7921 by @hannesrudolph, PR by @daniel-lxs) +- Fix: Preserve trailing newlines in stripLineNumbers for apply_diff (#8020 by @liyi3c, PR by @app/roomote) +- Fix: Exclude max tokens field for models that don't support it in export (#7944 by @hannesrudolph, PR by @elianiva) +- Retry API requests on stream failures instead of aborting task (thanks @daniel-lxs!) +- Improve auto-approve button responsiveness (thanks @daniel-lxs!) +- Add checkpoint initialization timeout settings and fix checkpoint timeout warnings (#7843 by @NaccOll, PR by @NaccOll) +- Always show checkpoint restore options regardless of change detection (thanks @daniel-lxs!) +- Improve checkpoint menu translations (thanks @daniel-lxs!) +- Update Mistral Medium model name (#8362 by @ThomsenDrake, PR by @ThomsenDrake) +- Remove GPT-5 instructions/reasoning_summary from UI message metadata to prevent ui_messages.json bloat (thanks @hannesrudolph!) +- Normalize docs-extractor audience tags; remove admin/stakeholder; strip tool invocations (thanks @hannesrudolph!) +- Try 5s status mutation timeout (thanks @cte!) +- Fix: Clean up max output token calculations to prevent context window overruns (#8821 by @enerage, PR by @roomote) +- Fix: Change Add to Context keybinding to avoid Redo conflict (#8652 by @swythan, PR by @roomote) +- Fix provider model loading race conditions (thanks @mrubens!) +- Fix: Remove specific Claude model version from settings descriptions to avoid outdated references (#8435 by @rwydaegh, PR by @roomote) +- Fix: Ensure free models don't display pricing information in the UI (thanks @mrubens!) +- Add reasoning support for Z.ai GLM binary thinking mode (#8465 by @BeWater799, PR by @daniel-lxs) +- Add settings to configure time and cost display in system prompt (#8450 by @jaxnb, PR by @roomote) +- Fix: Use max_output_tokens when available in LiteLLM fetcher (#8454 by @fabb, PR by @roomote) +- Fix: Process queued messages after context condensing completes (#8477 by @JosXa, PR by @roomote) +- Fix: Resolve checkpoint menu popover overflow (thanks @daniel-lxs!) +- Fix: LiteLLM test failures after merge (thanks @daniel-lxs!) +- Improve UX: Focus textbox and add newlines after adding to context (thanks @mrubens!) +- Fix: prevent infinite loop when canceling during auto-retry (#8901 by @mini2s, PR by @app/roomote) +- Fix: Enhanced codebase index recovery and reuse ('Start Indexing' button now reuses existing Qdrant index) (#8129 by @jaroslaw-weber, PR by @heyseth) +- Fix: make code index initialization non-blocking at activation (#8777 by @cjlawson02, PR by @daniel-lxs) +- Fix: remove search_and_replace tool from codebase (#8891 by @hannesrudolph, PR by @app/roomote) +- Fix: custom modes under custom path not showing (#8122 by @hannesrudolph, PR by @elianiva) +- Fix: prevent MCP server restart when toggling tool permissions (#8231 by @hannesrudolph, PR by @heyseth) +- Fix: truncate type definition to match max read line (#8149 by @chenxluo, PR by @elianiva) +- Fix: auto-sync enableReasoningEffort with reasoning dropdown selection (thanks @daniel-lxs!) +- Prevent a noisy cloud agent exception (thanks @cte!) +- Feat: improve @ file search for large projects (#5721 by @Naituw, PR by @daniel-lxs) +- Feat: rename MCP Errors tab to Logs for mixed-level messages (#8893 by @hannesrudolph, PR by @app/roomote) +- docs(vscode-lm): clarify VS Code LM API integration warning (thanks @hannesrudolph!) +- Fix: Resolve Qdrant codebase_search error by adding keyword index for type field (#8963 by @rossdonald, PR by @app/roomote) +- Fix cost and token tracking between provider styles to ensure accurate usage metrics (thanks @mrubens!) +- Feat: Add OpenRouter embedding provider support (#8972 by @dmarkey, PR by @dmarkey) +- Feat: Add GLM-4.6 model to Fireworks provider (#8752 by @mmealman, PR by @app/roomote) +- Feat: Add MiniMax M2 model to Fireworks provider (#8961 by @dmarkey, PR by @app/roomote) +- Feat: Add preserveReasoning flag to include reasoning in API history (thanks @daniel-lxs!) +- Fix: Prevent message loss during queue drain race condition (#8536 by @hannesrudolph, PR by @daniel-lxs) +- Fix: Capture the reasoning content in base-openai-compatible for GLM 4.6 (thanks @mrubens!) +- Fix: Create new Requesty profile during OAuth (thanks @Thibault00!) +- Fix: Cleanup terminal settings tab and change default terminal to inline (thanks @hannesrudolph!) diff --git a/apps/vscode-e2e/src/suite/tools/apply-diff.test.ts b/apps/vscode-e2e/src/suite/tools/apply-diff.test.ts index 729d6839b19..c4f279f5f6d 100644 --- a/apps/vscode-e2e/src/suite/tools/apply-diff.test.ts +++ b/apps/vscode-e2e/src/suite/tools/apply-diff.test.ts @@ -8,7 +8,7 @@ import { RooCodeEventName, type ClineMessage } from "@roo-code/types" import { waitFor, sleep } from "../utils" import { setDefaultSuiteTimeout } from "../test-utils" -suite("Roo Code apply_diff Tool", function () { +suite.skip("Roo Code apply_diff Tool", function () { setDefaultSuiteTimeout(this) let workspaceDir: string diff --git a/apps/vscode-e2e/src/suite/tools/execute-command.test.ts b/apps/vscode-e2e/src/suite/tools/execute-command.test.ts index f207dae685c..3dbfb709348 100644 --- a/apps/vscode-e2e/src/suite/tools/execute-command.test.ts +++ b/apps/vscode-e2e/src/suite/tools/execute-command.test.ts @@ -8,7 +8,7 @@ import { RooCodeEventName, type ClineMessage } from "@roo-code/types" import { waitFor, sleep, waitUntilCompleted } from "../utils" import { setDefaultSuiteTimeout } from "../test-utils" -suite("Roo Code execute_command Tool", function () { +suite.skip("Roo Code execute_command Tool", function () { setDefaultSuiteTimeout(this) let workspaceDir: string diff --git a/apps/vscode-e2e/src/suite/tools/insert-content.test.ts b/apps/vscode-e2e/src/suite/tools/insert-content.test.ts index 4dd0c209280..a3a3abb1866 100644 --- a/apps/vscode-e2e/src/suite/tools/insert-content.test.ts +++ b/apps/vscode-e2e/src/suite/tools/insert-content.test.ts @@ -8,7 +8,7 @@ import { RooCodeEventName, type ClineMessage } from "@roo-code/types" import { waitFor, sleep } from "../utils" import { setDefaultSuiteTimeout } from "../test-utils" -suite("Roo Code insert_content Tool", function () { +suite.skip("Roo Code insert_content Tool", function () { setDefaultSuiteTimeout(this) let workspaceDir: string diff --git a/apps/vscode-e2e/src/suite/tools/list-files.test.ts b/apps/vscode-e2e/src/suite/tools/list-files.test.ts index 5a1fd6cc3be..386433e7b8a 100644 --- a/apps/vscode-e2e/src/suite/tools/list-files.test.ts +++ b/apps/vscode-e2e/src/suite/tools/list-files.test.ts @@ -8,7 +8,7 @@ import { RooCodeEventName, type ClineMessage } from "@roo-code/types" import { waitFor, sleep } from "../utils" import { setDefaultSuiteTimeout } from "../test-utils" -suite("Roo Code list_files Tool", function () { +suite.skip("Roo Code list_files Tool", function () { setDefaultSuiteTimeout(this) let workspaceDir: string diff --git a/apps/vscode-e2e/src/suite/tools/read-file.test.ts b/apps/vscode-e2e/src/suite/tools/read-file.test.ts index 99e3f184577..00aca7f58ab 100644 --- a/apps/vscode-e2e/src/suite/tools/read-file.test.ts +++ b/apps/vscode-e2e/src/suite/tools/read-file.test.ts @@ -9,7 +9,7 @@ import { RooCodeEventName, type ClineMessage } from "@roo-code/types" import { waitFor, sleep } from "../utils" import { setDefaultSuiteTimeout } from "../test-utils" -suite("Roo Code read_file Tool", function () { +suite.skip("Roo Code read_file Tool", function () { setDefaultSuiteTimeout(this) let tempDir: string diff --git a/apps/vscode-e2e/src/suite/tools/search-and-replace.test.ts b/apps/vscode-e2e/src/suite/tools/search-and-replace.test.ts deleted file mode 100644 index 801a829a74b..00000000000 --- a/apps/vscode-e2e/src/suite/tools/search-and-replace.test.ts +++ /dev/null @@ -1,631 +0,0 @@ -import * as assert from "assert" -import * as fs from "fs/promises" -import * as path from "path" -import * as vscode from "vscode" - -import { RooCodeEventName, type ClineMessage } from "@roo-code/types" - -import { waitFor, sleep } from "../utils" -import { setDefaultSuiteTimeout } from "../test-utils" - -suite("Roo Code search_and_replace Tool", function () { - setDefaultSuiteTimeout(this) - - let workspaceDir: string - - // Pre-created test files that will be used across tests - const testFiles = { - simpleReplace: { - name: `test-simple-replace-${Date.now()}.txt`, - content: "Hello World\nThis is a test file\nWith multiple lines\nHello again", - path: "", - }, - regexReplace: { - name: `test-regex-replace-${Date.now()}.js`, - content: `function oldFunction() { - console.log("old implementation") - return "old result" -} - -function anotherOldFunction() { - console.log("another old implementation") - return "another old result" -}`, - path: "", - }, - caseInsensitive: { - name: `test-case-insensitive-${Date.now()}.txt`, - content: `Hello World -HELLO UNIVERSE -hello everyone -HeLLo ThErE`, - path: "", - }, - multipleMatches: { - name: `test-multiple-matches-${Date.now()}.txt`, - content: `TODO: Fix this bug -This is some content -TODO: Add more tests -Some more content -TODO: Update documentation -Final content`, - path: "", - }, - noMatches: { - name: `test-no-matches-${Date.now()}.txt`, - content: "This file has no matching patterns\nJust regular content\nNothing special here", - path: "", - }, - } - - // Get the actual workspace directory that VSCode is using and create all test files - suiteSetup(async function () { - // Get the workspace folder from VSCode - const workspaceFolders = vscode.workspace.workspaceFolders - if (!workspaceFolders || workspaceFolders.length === 0) { - throw new Error("No workspace folder found") - } - workspaceDir = workspaceFolders[0]!.uri.fsPath - console.log("Using workspace directory:", workspaceDir) - - // Create all test files before any tests run - console.log("Creating test files in workspace...") - for (const [key, file] of Object.entries(testFiles)) { - file.path = path.join(workspaceDir, file.name) - await fs.writeFile(file.path, file.content) - console.log(`Created ${key} test file at:`, file.path) - } - - // Verify all files exist - for (const [key, file] of Object.entries(testFiles)) { - const exists = await fs - .access(file.path) - .then(() => true) - .catch(() => false) - if (!exists) { - throw new Error(`Failed to create ${key} test file at ${file.path}`) - } - } - }) - - // Clean up after all tests - suiteTeardown(async () => { - // Cancel any running tasks before cleanup - try { - await globalThis.api.cancelCurrentTask() - } catch { - // Task might not be running - } - - // Clean up all test files - console.log("Cleaning up test files...") - for (const [key, file] of Object.entries(testFiles)) { - try { - await fs.unlink(file.path) - console.log(`Cleaned up ${key} test file`) - } catch (error) { - console.log(`Failed to clean up ${key} test file:`, error) - } - } - }) - - // Clean up before each test - setup(async () => { - // Cancel any previous task - try { - await globalThis.api.cancelCurrentTask() - } catch { - // Task might not be running - } - - // Small delay to ensure clean state - await sleep(100) - }) - - // Clean up after each test - teardown(async () => { - // Cancel the current task - try { - await globalThis.api.cancelCurrentTask() - } catch { - // Task might not be running - } - - // Small delay to ensure clean state - await sleep(100) - }) - - test("Should perform simple text replacement", async function () { - const api = globalThis.api - const messages: ClineMessage[] = [] - const testFile = testFiles.simpleReplace - const expectedContent = "Hello Universe\nThis is a test file\nWith multiple lines\nHello again" - let taskStarted = false - let taskCompleted = false - let errorOccurred: string | null = null - let searchReplaceExecuted = false - - // Listen for messages - const messageHandler = ({ message }: { message: ClineMessage }) => { - messages.push(message) - - // Log important messages for debugging - if (message.type === "say" && message.say === "error") { - errorOccurred = message.text || "Unknown error" - console.error("Error:", message.text) - } - if (message.type === "ask" && message.ask === "tool") { - console.log("Tool request:", message.text?.substring(0, 200)) - } - if (message.type === "say" && (message.say === "completion_result" || message.say === "text")) { - console.log("AI response:", message.text?.substring(0, 200)) - } - - // Check for tool execution - if (message.type === "say" && message.say === "api_req_started" && message.text) { - console.log("API request started:", message.text.substring(0, 200)) - try { - const requestData = JSON.parse(message.text) - if (requestData.request && requestData.request.includes("search_and_replace")) { - searchReplaceExecuted = true - console.log("search_and_replace tool executed!") - } - } catch (e) { - console.log("Failed to parse api_req_started message:", e) - } - } - } - api.on(RooCodeEventName.Message, messageHandler) - - // Listen for task events - const taskStartedHandler = (id: string) => { - if (id === taskId) { - taskStarted = true - console.log("Task started:", id) - } - } - api.on(RooCodeEventName.TaskStarted, taskStartedHandler) - - const taskCompletedHandler = (id: string) => { - if (id === taskId) { - taskCompleted = true - console.log("Task completed:", id) - } - } - api.on(RooCodeEventName.TaskCompleted, taskCompletedHandler) - - let taskId: string - try { - // Start task with search_and_replace instruction - taskId = await api.startNewTask({ - configuration: { - mode: "code", - autoApprovalEnabled: true, - alwaysAllowWrite: true, - alwaysAllowReadOnly: true, - alwaysAllowReadOnlyOutsideWorkspace: true, - }, - text: `Use search_and_replace on the file ${testFile.name} to replace "Hello World" with "Hello Universe". - -The file is located at: ${testFile.path} - -The file already exists with this content: -${testFile.content} - -Assume the file exists and you can modify it directly.`, - }) - - console.log("Task ID:", taskId) - console.log("Test filename:", testFile.name) - - // Wait for task to start - await waitFor(() => taskStarted, { timeout: 45_000 }) - - // Check for early errors - if (errorOccurred) { - console.error("Early error detected:", errorOccurred) - } - - // Wait for task completion - await waitFor(() => taskCompleted, { timeout: 45_000 }) - - // Give extra time for file system operations - await sleep(2000) - - // Check if the file was modified correctly - const actualContent = await fs.readFile(testFile.path, "utf-8") - console.log("File content after modification:", actualContent) - - // Verify tool was executed - assert.strictEqual(searchReplaceExecuted, true, "search_and_replace tool should have been executed") - - // Verify file content - assert.strictEqual( - actualContent.trim(), - expectedContent.trim(), - "File content should be modified correctly", - ) - - console.log("Test passed! search_and_replace tool executed and file modified successfully") - } finally { - // Clean up - api.off(RooCodeEventName.Message, messageHandler) - api.off(RooCodeEventName.TaskStarted, taskStartedHandler) - api.off(RooCodeEventName.TaskCompleted, taskCompletedHandler) - } - }) - - test("Should perform regex pattern replacement", async function () { - const api = globalThis.api - const messages: ClineMessage[] = [] - const testFile = testFiles.regexReplace - const expectedContent = `function newFunction() { - console.log("new implementation") - return "new result" -} - -function anotherNewFunction() { - console.log("another new implementation") - return "another new result" -}` - let taskStarted = false - let taskCompleted = false - let errorOccurred: string | null = null - let searchReplaceExecuted = false - - // Listen for messages - const messageHandler = ({ message }: { message: ClineMessage }) => { - messages.push(message) - - // Log important messages for debugging - if (message.type === "say" && message.say === "error") { - errorOccurred = message.text || "Unknown error" - console.error("Error:", message.text) - } - if (message.type === "ask" && message.ask === "tool") { - console.log("Tool request:", message.text?.substring(0, 200)) - } - if (message.type === "say" && (message.say === "completion_result" || message.say === "text")) { - console.log("AI response:", message.text?.substring(0, 200)) - } - - // Check for tool execution - if (message.type === "say" && message.say === "api_req_started" && message.text) { - console.log("API request started:", message.text.substring(0, 200)) - try { - const requestData = JSON.parse(message.text) - if (requestData.request && requestData.request.includes("search_and_replace")) { - searchReplaceExecuted = true - console.log("search_and_replace tool executed!") - } - } catch (e) { - console.log("Failed to parse api_req_started message:", e) - } - } - } - api.on(RooCodeEventName.Message, messageHandler) - - // Listen for task events - const taskStartedHandler = (id: string) => { - if (id === taskId) { - taskStarted = true - console.log("Task started:", id) - } - } - api.on(RooCodeEventName.TaskStarted, taskStartedHandler) - - const taskCompletedHandler = (id: string) => { - if (id === taskId) { - taskCompleted = true - console.log("Task completed:", id) - } - } - api.on(RooCodeEventName.TaskCompleted, taskCompletedHandler) - - let taskId: string - try { - // Start task with search_and_replace instruction - simpler and more direct - taskId = await api.startNewTask({ - configuration: { - mode: "code", - autoApprovalEnabled: true, - alwaysAllowWrite: true, - alwaysAllowReadOnly: true, - alwaysAllowReadOnlyOutsideWorkspace: true, - }, - text: `Use search_and_replace on the file ${testFile.name} to: -1. First, replace "old" with "new" (use_regex: false) -2. Then, replace "Old" with "New" (use_regex: false) - -The file is located at: ${testFile.path} - -Assume the file exists and you can modify it directly. - -Use the search_and_replace tool twice - once for each replacement.`, - }) - - console.log("Task ID:", taskId) - console.log("Test filename:", testFile.name) - - // Wait for task to start - await waitFor(() => taskStarted, { timeout: 90_000 }) - - // Check for early errors - if (errorOccurred) { - console.error("Early error detected:", errorOccurred) - } - - // Wait for task completion - await waitFor(() => taskCompleted, { timeout: 90_000 }) - - // Give extra time for file system operations - await sleep(2000) - - // Check if the file was modified correctly - const actualContent = await fs.readFile(testFile.path, "utf-8") - console.log("File content after modification:", actualContent) - - // Verify tool was executed - assert.strictEqual(searchReplaceExecuted, true, "search_and_replace tool should have been executed") - - // Verify file content - assert.strictEqual( - actualContent.trim(), - expectedContent.trim(), - "File content should be modified with regex replacement", - ) - - console.log("Test passed! search_and_replace tool executed with regex successfully") - } finally { - // Clean up - api.off(RooCodeEventName.Message, messageHandler) - api.off(RooCodeEventName.TaskStarted, taskStartedHandler) - api.off(RooCodeEventName.TaskCompleted, taskCompletedHandler) - } - }) - - test("Should replace multiple matches in file", async function () { - const api = globalThis.api - const messages: ClineMessage[] = [] - const testFile = testFiles.multipleMatches - const expectedContent = `DONE: Fix this bug -This is some content -DONE: Add more tests -Some more content -DONE: Update documentation -Final content` - let taskStarted = false - let taskCompleted = false - let errorOccurred: string | null = null - let searchReplaceExecuted = false - - // Listen for messages - const messageHandler = ({ message }: { message: ClineMessage }) => { - messages.push(message) - - // Log important messages for debugging - if (message.type === "say" && message.say === "error") { - errorOccurred = message.text || "Unknown error" - console.error("Error:", message.text) - } - if (message.type === "ask" && message.ask === "tool") { - console.log("Tool request:", message.text?.substring(0, 200)) - } - if (message.type === "say" && (message.say === "completion_result" || message.say === "text")) { - console.log("AI response:", message.text?.substring(0, 200)) - } - - // Check for tool execution - if (message.type === "say" && message.say === "api_req_started" && message.text) { - console.log("API request started:", message.text.substring(0, 200)) - try { - const requestData = JSON.parse(message.text) - if (requestData.request && requestData.request.includes("search_and_replace")) { - searchReplaceExecuted = true - console.log("search_and_replace tool executed!") - } - } catch (e) { - console.log("Failed to parse api_req_started message:", e) - } - } - } - api.on(RooCodeEventName.Message, messageHandler) - - // Listen for task events - const taskStartedHandler = (id: string) => { - if (id === taskId) { - taskStarted = true - console.log("Task started:", id) - } - } - api.on(RooCodeEventName.TaskStarted, taskStartedHandler) - - const taskCompletedHandler = (id: string) => { - if (id === taskId) { - taskCompleted = true - console.log("Task completed:", id) - } - } - api.on(RooCodeEventName.TaskCompleted, taskCompletedHandler) - - let taskId: string - try { - // Start task with search_and_replace instruction for multiple matches - taskId = await api.startNewTask({ - configuration: { - mode: "code", - autoApprovalEnabled: true, - alwaysAllowWrite: true, - alwaysAllowReadOnly: true, - alwaysAllowReadOnlyOutsideWorkspace: true, - }, - text: `Use search_and_replace on the file ${testFile.name} to replace all occurrences of "TODO" with "DONE". - -The file is located at: ${testFile.path} - -The file already exists with this content: -${testFile.content} - -Assume the file exists and you can modify it directly.`, - }) - - console.log("Task ID:", taskId) - console.log("Test filename:", testFile.name) - - // Wait for task to start - await waitFor(() => taskStarted, { timeout: 45_000 }) - - // Check for early errors - if (errorOccurred) { - console.error("Early error detected:", errorOccurred) - } - - // Wait for task completion - await waitFor(() => taskCompleted, { timeout: 45_000 }) - - // Give extra time for file system operations - await sleep(2000) - - // Check if the file was modified correctly - const actualContent = await fs.readFile(testFile.path, "utf-8") - console.log("File content after modification:", actualContent) - - // Verify tool was executed - assert.strictEqual(searchReplaceExecuted, true, "search_and_replace tool should have been executed") - - // Verify file content - assert.strictEqual( - actualContent.trim(), - expectedContent.trim(), - "All TODO occurrences should be replaced with DONE", - ) - - console.log("Test passed! search_and_replace tool executed and replaced multiple matches successfully") - } finally { - // Clean up - api.off(RooCodeEventName.Message, messageHandler) - api.off(RooCodeEventName.TaskStarted, taskStartedHandler) - api.off(RooCodeEventName.TaskCompleted, taskCompletedHandler) - } - }) - - test("Should handle case when no matches are found", async function () { - const api = globalThis.api - const messages: ClineMessage[] = [] - const testFile = testFiles.noMatches - const expectedContent = testFile.content // Should remain unchanged - let taskStarted = false - let taskCompleted = false - let errorOccurred: string | null = null - let searchReplaceExecuted = false - - // Listen for messages - const messageHandler = ({ message }: { message: ClineMessage }) => { - messages.push(message) - - // Log important messages for debugging - if (message.type === "say" && message.say === "error") { - errorOccurred = message.text || "Unknown error" - console.error("Error:", message.text) - } - if (message.type === "ask" && message.ask === "tool") { - console.log("Tool request:", message.text?.substring(0, 200)) - } - if (message.type === "say" && (message.say === "completion_result" || message.say === "text")) { - console.log("AI response:", message.text?.substring(0, 200)) - } - - // Check for tool execution - if (message.type === "say" && message.say === "api_req_started" && message.text) { - console.log("API request started:", message.text.substring(0, 200)) - try { - const requestData = JSON.parse(message.text) - if (requestData.request && requestData.request.includes("search_and_replace")) { - searchReplaceExecuted = true - console.log("search_and_replace tool executed!") - } - } catch (e) { - console.log("Failed to parse api_req_started message:", e) - } - } - } - api.on(RooCodeEventName.Message, messageHandler) - - // Listen for task events - const taskStartedHandler = (id: string) => { - if (id === taskId) { - taskStarted = true - console.log("Task started:", id) - } - } - api.on(RooCodeEventName.TaskStarted, taskStartedHandler) - - const taskCompletedHandler = (id: string) => { - if (id === taskId) { - taskCompleted = true - console.log("Task completed:", id) - } - } - api.on(RooCodeEventName.TaskCompleted, taskCompletedHandler) - - let taskId: string - try { - // Start task with search_and_replace instruction for pattern that won't match - taskId = await api.startNewTask({ - configuration: { - mode: "code", - autoApprovalEnabled: true, - alwaysAllowWrite: true, - alwaysAllowReadOnly: true, - alwaysAllowReadOnlyOutsideWorkspace: true, - }, - text: `Use search_and_replace on the file ${testFile.name} to replace "NONEXISTENT_PATTERN" with "REPLACEMENT". This pattern should not be found in the file. - -The file is located at: ${testFile.path} - -The file already exists with this content: -${testFile.content} - -Assume the file exists and you can modify it directly.`, - }) - - console.log("Task ID:", taskId) - console.log("Test filename:", testFile.name) - - // Wait for task to start - await waitFor(() => taskStarted, { timeout: 45_000 }) - - // Check for early errors - if (errorOccurred) { - console.error("Early error detected:", errorOccurred) - } - - // Wait for task completion - await waitFor(() => taskCompleted, { timeout: 45_000 }) - - // Give extra time for file system operations - await sleep(2000) - - // Check if the file remains unchanged - const actualContent = await fs.readFile(testFile.path, "utf-8") - console.log("File content after search (should be unchanged):", actualContent) - - // Verify tool was executed - assert.strictEqual(searchReplaceExecuted, true, "search_and_replace tool should have been executed") - - // Verify file content remains unchanged - assert.strictEqual( - actualContent.trim(), - expectedContent.trim(), - "File content should remain unchanged when no matches are found", - ) - - console.log("Test passed! search_and_replace tool executed and handled no matches correctly") - } finally { - // Clean up - api.off(RooCodeEventName.Message, messageHandler) - api.off(RooCodeEventName.TaskStarted, taskStartedHandler) - api.off(RooCodeEventName.TaskCompleted, taskCompletedHandler) - } - }) -}) diff --git a/apps/vscode-e2e/src/suite/tools/search-files.test.ts b/apps/vscode-e2e/src/suite/tools/search-files.test.ts index 98cfd1b3eed..2b54df3f048 100644 --- a/apps/vscode-e2e/src/suite/tools/search-files.test.ts +++ b/apps/vscode-e2e/src/suite/tools/search-files.test.ts @@ -8,7 +8,7 @@ import { RooCodeEventName, type ClineMessage } from "@roo-code/types" import { waitFor, sleep } from "../utils" import { setDefaultSuiteTimeout } from "../test-utils" -suite("Roo Code search_files Tool", function () { +suite.skip("Roo Code search_files Tool", function () { setDefaultSuiteTimeout(this) let workspaceDir: string diff --git a/apps/vscode-e2e/src/suite/tools/write-to-file.test.ts b/apps/vscode-e2e/src/suite/tools/write-to-file.test.ts index dea51386cf9..fee15add17b 100644 --- a/apps/vscode-e2e/src/suite/tools/write-to-file.test.ts +++ b/apps/vscode-e2e/src/suite/tools/write-to-file.test.ts @@ -8,7 +8,7 @@ import { RooCodeEventName, type ClineMessage } from "@roo-code/types" import { waitFor, sleep } from "../utils" import { setDefaultSuiteTimeout } from "../test-utils" -suite("Roo Code write_to_file Tool", function () { +suite.skip("Roo Code write_to_file Tool", function () { setDefaultSuiteTimeout(this) let tempDir: string diff --git a/apps/web-roo-code/package.json b/apps/web-roo-code/package.json index c387a632709..1cf8d4dd178 100644 --- a/apps/web-roo-code/package.json +++ b/apps/web-roo-code/package.json @@ -17,6 +17,7 @@ "@roo-code/evals": "workspace:^", "@roo-code/types": "workspace:^", "@tanstack/react-query": "^5.79.0", + "@vercel/og": "^0.6.2", "class-variance-authority": "^0.7.1", "clsx": "^2.1.1", "embla-carousel-auto-scroll": "^8.6.0", diff --git a/apps/web-roo-code/public/heroes/agent-pr-fixer.png b/apps/web-roo-code/public/heroes/agent-pr-fixer.png new file mode 100644 index 00000000000..f33c926e6c6 Binary files /dev/null and b/apps/web-roo-code/public/heroes/agent-pr-fixer.png differ diff --git a/apps/web-roo-code/public/heroes/agent-reviewer.png b/apps/web-roo-code/public/heroes/agent-reviewer.png new file mode 100644 index 00000000000..24489802979 Binary files /dev/null and b/apps/web-roo-code/public/heroes/agent-reviewer.png differ diff --git a/apps/web-roo-code/public/og/base_a.png b/apps/web-roo-code/public/og/base_a.png new file mode 100644 index 00000000000..134911054a2 Binary files /dev/null and b/apps/web-roo-code/public/og/base_a.png differ diff --git a/apps/web-roo-code/public/og/base_b.png b/apps/web-roo-code/public/og/base_b.png new file mode 100644 index 00000000000..4ca1375af9f Binary files /dev/null and b/apps/web-roo-code/public/og/base_b.png differ diff --git a/apps/web-roo-code/src/app/api/og/route.tsx b/apps/web-roo-code/src/app/api/og/route.tsx new file mode 100644 index 00000000000..53390a351a2 --- /dev/null +++ b/apps/web-roo-code/src/app/api/og/route.tsx @@ -0,0 +1,165 @@ +import { ImageResponse } from "next/og" +import { NextRequest } from "next/server" + +export const runtime = "edge" + +async function fetchWithTimeout(url: string, init?: RequestInit, timeoutMs = 3000) { + const controller = new AbortController() + const id = setTimeout(() => controller.abort(), timeoutMs) + try { + return await fetch(url, { ...init, signal: controller.signal }) + } finally { + clearTimeout(id) + } +} + +async function loadGoogleFont(font: string, text: string): Promise { + try { + const url = `https://fonts.googleapis.com/css2?family=${font}&text=${encodeURIComponent(text)}` + const cssRes = await fetchWithTimeout(url) + if (!cssRes.ok) return null + const css = await cssRes.text() + + const match = + css.match(/src:\s*url\(([^)]+)\)\s*format\('(?:woff2|woff|opentype|truetype)'\)/i) || + css.match(/url\(([^)]+)\)/i) + + const fontUrl = match && match[1] ? match[1].replace(/^['"]|['"]$/g, "") : null + if (!fontUrl) return null + + const res = await fetchWithTimeout(fontUrl, undefined, 5000) + if (!res.ok) return null + return await res.arrayBuffer() + } catch { + return null + } +} + +export async function GET(request: NextRequest) { + const requestUrl = new URL(request.url) + const { searchParams } = requestUrl + + // Get title and description from query params + const title = searchParams.get("title") || "Roo Code" + const description = searchParams.get("description") || "" + + // Combine all text that will be displayed for font loading + const displayText = title + description + + // Check if we should try to use the background image + const useBackgroundImage = searchParams.get("bg") !== "false" + + // Dynamically get the base URL from the current request + // This ensures it works correctly in development, preview, and production environments + const baseUrl = `${requestUrl.protocol}//${requestUrl.host}` + const variant = title.length % 2 === 0 ? "a" : "b" + const backgroundUrl = `${baseUrl}/og/base_${variant}.png` + + // Preload fonts with graceful fallbacks + const regularFont = await loadGoogleFont("Inter", displayText) + const boldFont = await loadGoogleFont("Inter:wght@700", displayText) + const fonts: { name: string; data: ArrayBuffer; style?: "normal" | "italic"; weight?: 400 | 700 }[] = [] + if (regularFont) { + fonts.push({ name: "Inter", data: regularFont, style: "normal", weight: 400 }) + } + if (boldFont) { + fonts.push({ name: "Inter", data: boldFont, style: "normal", weight: 700 }) + } + + return new ImageResponse( + ( +
+ {/* Optional Background Image - only render if explicitly requested */} + {useBackgroundImage && ( +
+ {/* eslint-disable-next-line @next/next/no-img-element */} + +
+ )} + + {/* Text Content */} +
+ {/* Main Title */} +

+ {title} +

+ + {/* Secondary Description */} + {description && ( +

+ {description} +

+ )} +
+
+ ), + { + width: 1200, + height: 630, + fonts: fonts.length ? fonts : undefined, + // Cache for 7 days in production, 3 seconds in development + headers: { + "Cache-Control": + process.env.NODE_ENV === "production" + ? "public, max-age=604800, s-maxage=604800, stale-while-revalidate=86400" + : "public, max-age=3, s-maxage=3", + }, + }, + ) +} diff --git a/apps/web-roo-code/src/app/cloud/page.tsx b/apps/web-roo-code/src/app/cloud/page.tsx index 96d90039f43..7f70f95ffd5 100644 --- a/apps/web-roo-code/src/app/cloud/page.tsx +++ b/apps/web-roo-code/src/app/cloud/page.tsx @@ -16,14 +16,15 @@ import type { Metadata } from "next" import { Button } from "@/components/ui" import { AnimatedBackground } from "@/components/homepage" import { SEO } from "@/lib/seo" +import { ogImageUrl } from "@/lib/og" import { EXTERNAL_LINKS } from "@/lib/constants" import Image from "next/image" const TITLE = "Roo Code Cloud" const DESCRIPTION = "Roo Code Cloud gives you and your team the tools to take AI-coding to the next level with cloud agents, remote control, and more." +const OG_DESCRIPTION = "Go way beyond the IDE" const PATH = "/cloud" -const OG_IMAGE = SEO.ogImage export const metadata: Metadata = { title: TITLE, @@ -38,10 +39,10 @@ export const metadata: Metadata = { siteName: SEO.name, images: [ { - url: OG_IMAGE.url, - width: OG_IMAGE.width, - height: OG_IMAGE.height, - alt: OG_IMAGE.alt, + url: ogImageUrl(TITLE, OG_DESCRIPTION), + width: 1200, + height: 630, + alt: TITLE, }, ], locale: SEO.locale, @@ -51,7 +52,7 @@ export const metadata: Metadata = { card: SEO.twitterCard, title: TITLE, description: DESCRIPTION, - images: [OG_IMAGE.url], + images: [ogImageUrl(TITLE, OG_DESCRIPTION)], }, keywords: [...SEO.keywords, "cloud", "subscription", "cloud agents", "AI cloud development"], } diff --git a/apps/web-roo-code/src/app/enterprise/page.tsx b/apps/web-roo-code/src/app/enterprise/page.tsx index 7a1d36f0688..b4581e4f3f0 100644 --- a/apps/web-roo-code/src/app/enterprise/page.tsx +++ b/apps/web-roo-code/src/app/enterprise/page.tsx @@ -7,12 +7,13 @@ import { ContactForm } from "@/components/enterprise/contact-form" import { EXTERNAL_LINKS } from "@/lib/constants" import type { Metadata } from "next" import { SEO } from "@/lib/seo" +import { ogImageUrl } from "@/lib/og" -const TITLE = "Enterprise Solution" +const TITLE = "Roo Code Cloud Enterprise" const DESCRIPTION = "The control-plane for AI-powered software development. Gain visibility, governance, and control over your AI coding initiatives." +const OG_DESCRIPTION = "The control-plane for AI-powered software development" const PATH = "/enterprise" -const OG_IMAGE = SEO.ogImage export const metadata: Metadata = { title: TITLE, @@ -27,10 +28,10 @@ export const metadata: Metadata = { siteName: SEO.name, images: [ { - url: OG_IMAGE.url, - width: OG_IMAGE.width, - height: OG_IMAGE.height, - alt: OG_IMAGE.alt, + url: ogImageUrl(TITLE, OG_DESCRIPTION), + width: 1200, + height: 630, + alt: TITLE, }, ], locale: SEO.locale, @@ -40,7 +41,7 @@ export const metadata: Metadata = { card: SEO.twitterCard, title: TITLE, description: DESCRIPTION, - images: [OG_IMAGE.url], + images: [ogImageUrl(TITLE, OG_DESCRIPTION)], }, keywords: [ ...SEO.keywords, diff --git a/apps/web-roo-code/src/app/evals/page.tsx b/apps/web-roo-code/src/app/evals/page.tsx index a6af30d70ed..1c7fcfd38b7 100644 --- a/apps/web-roo-code/src/app/evals/page.tsx +++ b/apps/web-roo-code/src/app/evals/page.tsx @@ -2,6 +2,7 @@ import type { Metadata } from "next" import { getEvalRuns } from "@/actions/evals" import { SEO } from "@/lib/seo" +import { ogImageUrl } from "@/lib/og" import { Evals } from "./evals" @@ -10,13 +11,8 @@ export const dynamic = "force-dynamic" const TITLE = "Evals" const DESCRIPTION = "Explore quantitative evals of LLM coding skills across tasks and providers." +const OG_DESCRIPTION = "Quantitative evals of LLM coding skills" const PATH = "/evals" -const IMAGE = { - url: "https://i.imgur.com/ijP7aZm.png", - width: 1954, - height: 1088, - alt: "Roo Code Evals – LLM coding benchmarks", -} export const metadata: Metadata = { title: TITLE, @@ -29,7 +25,14 @@ export const metadata: Metadata = { description: DESCRIPTION, url: `${SEO.url}${PATH}`, siteName: SEO.name, - images: [IMAGE], + images: [ + { + url: ogImageUrl(TITLE, OG_DESCRIPTION), + width: 1200, + height: 630, + alt: TITLE, + }, + ], locale: SEO.locale, type: "website", }, @@ -37,7 +40,7 @@ export const metadata: Metadata = { card: SEO.twitterCard, title: TITLE, description: DESCRIPTION, - images: [IMAGE.url], + images: [ogImageUrl(TITLE, OG_DESCRIPTION)], }, keywords: [...SEO.keywords, "benchmarks", "LLM evals", "coding evaluations", "model comparison"], } diff --git a/apps/web-roo-code/src/app/layout.tsx b/apps/web-roo-code/src/app/layout.tsx index 08105980496..5e510eb848e 100644 --- a/apps/web-roo-code/src/app/layout.tsx +++ b/apps/web-roo-code/src/app/layout.tsx @@ -2,6 +2,7 @@ import React from "react" import type { Metadata } from "next" import { Inter } from "next/font/google" import { SEO } from "@/lib/seo" +import { ogImageUrl } from "@/lib/og" import { CookieConsentWrapper } from "@/components/CookieConsentWrapper" import { Providers } from "@/components/providers" @@ -12,6 +13,9 @@ import "./globals.css" const inter = Inter({ subsets: ["latin"] }) +const OG_TITLE = "Meet Roo Code" +const OG_DESCRIPTION = "The AI dev team that gets things done." + export const metadata: Metadata = { metadataBase: new URL(SEO.url), title: { @@ -51,10 +55,10 @@ export const metadata: Metadata = { siteName: SEO.name, images: [ { - url: SEO.ogImage.url, - width: SEO.ogImage.width, - height: SEO.ogImage.height, - alt: SEO.ogImage.alt, + url: ogImageUrl(OG_TITLE, OG_DESCRIPTION), + width: 1200, + height: 630, + alt: OG_TITLE, }, ], locale: SEO.locale, @@ -64,7 +68,7 @@ export const metadata: Metadata = { card: SEO.twitterCard, title: SEO.title, description: SEO.description, - images: [SEO.ogImage.url], + images: [ogImageUrl(OG_TITLE, OG_DESCRIPTION)], }, robots: { index: true, diff --git a/apps/web-roo-code/src/app/legal/cookies/page.tsx b/apps/web-roo-code/src/app/legal/cookies/page.tsx index cb67b8672c5..c8058a34e77 100644 --- a/apps/web-roo-code/src/app/legal/cookies/page.tsx +++ b/apps/web-roo-code/src/app/legal/cookies/page.tsx @@ -1,10 +1,11 @@ import type { Metadata } from "next" import { SEO } from "@/lib/seo" +import { ogImageUrl } from "@/lib/og" -const TITLE = "Cookie Policy" +const TITLE = "Our Cookie Policy" const DESCRIPTION = "Learn about how Roo Code uses cookies to enhance your experience and provide our services." +const OG_DESCRIPTION = "" const PATH = "/legal/cookies" -const OG_IMAGE = SEO.ogImage export const metadata: Metadata = { title: TITLE, @@ -19,10 +20,10 @@ export const metadata: Metadata = { siteName: SEO.name, images: [ { - url: OG_IMAGE.url, - width: OG_IMAGE.width, - height: OG_IMAGE.height, - alt: OG_IMAGE.alt, + url: ogImageUrl(TITLE, OG_DESCRIPTION), + width: 1200, + height: 630, + alt: TITLE, }, ], locale: SEO.locale, @@ -32,7 +33,7 @@ export const metadata: Metadata = { card: SEO.twitterCard, title: TITLE, description: DESCRIPTION, - images: [OG_IMAGE.url], + images: [ogImageUrl(TITLE, OG_DESCRIPTION)], }, keywords: [...SEO.keywords, "cookies", "privacy", "tracking", "analytics"], } diff --git a/apps/web-roo-code/src/app/legal/subprocessors/page.tsx b/apps/web-roo-code/src/app/legal/subprocessors/page.tsx index c37bb98e851..e78fa407201 100644 --- a/apps/web-roo-code/src/app/legal/subprocessors/page.tsx +++ b/apps/web-roo-code/src/app/legal/subprocessors/page.tsx @@ -1,10 +1,11 @@ import type { Metadata } from "next" import { SEO } from "@/lib/seo" +import { ogImageUrl } from "@/lib/og" const TITLE = "Subprocessors" const DESCRIPTION = "List of third-party subprocessors used by Roo Code to process customer data." +const OG_DESCRIPTION = "" const PATH = "/legal/subprocessors" -const OG_IMAGE = SEO.ogImage export const metadata: Metadata = { title: TITLE, @@ -19,10 +20,10 @@ export const metadata: Metadata = { siteName: SEO.name, images: [ { - url: OG_IMAGE.url, - width: OG_IMAGE.width, - height: OG_IMAGE.height, - alt: OG_IMAGE.alt, + url: ogImageUrl(TITLE, OG_DESCRIPTION), + width: 1200, + height: 630, + alt: TITLE, }, ], locale: SEO.locale, @@ -32,7 +33,7 @@ export const metadata: Metadata = { card: SEO.twitterCard, title: TITLE, description: DESCRIPTION, - images: [OG_IMAGE.url], + images: [ogImageUrl(TITLE, OG_DESCRIPTION)], }, keywords: [...SEO.keywords, "subprocessors", "data processing", "GDPR", "privacy", "third-party services"], } diff --git a/apps/web-roo-code/src/app/pr-fixer/PrFixerContent.tsx b/apps/web-roo-code/src/app/pr-fixer/PrFixerContent.tsx new file mode 100644 index 00000000000..3fef5a8dec9 --- /dev/null +++ b/apps/web-roo-code/src/app/pr-fixer/PrFixerContent.tsx @@ -0,0 +1,240 @@ +"use client" + +import { ArrowRight, GitPullRequest, History, Key, MessageSquareCode, Wrench, type LucideIcon } from "lucide-react" +import Image from "next/image" +import Link from "next/link" + +import { Button } from "@/components/ui" +import { AnimatedBackground } from "@/components/homepage" +import { EXTERNAL_LINKS } from "@/lib/constants" +import { trackGoogleAdsConversion } from "@/lib/analytics/google-ads" + +// Workaround for next/image choking on these for some reason +const hero = "hero" // kilocode_change from: import hero from "/public/heroes/agent-pr-fixer.png" + +interface Feature { + icon: LucideIcon + title: string + description: string | React.ReactNode + logos?: string[] +} + +const workflowSteps: Feature[] = [ + { + icon: GitPullRequest, + title: "1. Connect your GitHub repositories", + description: "Pick which repos the PR Fixer can work on by pushing to ongoing branches.", + }, + { + icon: MessageSquareCode, + title: "2. Invoke from a comment", + description: + 'Ask the agent to fix issues directly from GitHub PR comments (e.g. "@roomote: fix these review comments"). It’s fully aware of the entire comment history and latest diffs and focuses on fixing them – not random changes to your code.', + }, + { + icon: Wrench, + title: "3. Get clean scoped commits", + description: ( + <> + The agent proposes targeted changes and pushes concise commits or patch suggestions you (or{" "} + PR Reviewer) can review and merge quickly. + + ), + }, +] + +const howItWorks: Feature[] = [ + { + icon: History, + title: "Comment-history aware", + description: + "Understands the entire conversation on the PR – previous reviews, your replies, follow-ups – and uses that context to produce accurate fixes.", + }, + { + icon: Key, + title: "Bring your own key", + description: + "Use your preferred models at full strength. We optimize prompts and execution without capping your model to protect our margins.", + }, + { + icon: GitPullRequest, + title: "Repository- and diff-aware", + description: + "Analyzes the full repo context and the latest diff to ensure fixes align with project conventions and pass checks.", + }, +] + +export function PrFixerContent() { + return ( + <> +
+ +
+
+
+
+

+ + State-of-the-art fixes for the comments on your PRs. +

+ +
+

+ Roo Code{"'"}s PR Fixer applies high-quality changes to your PRs, right from + GitHub. Invoke via a PR comment and it will read the entire comment history to + understand context, agreements, and tradeoffs — then implement the right fix. +

+

+ As always, you bring the model key; we orchestrate smart, efficient workflows. +

+
+ + {/* Cross-agent link */} +
+ Works great with + + + PR Reviewer Agent + + +
+
+ +
+ + + (cancel anytime) + +
+
+ +
+
+
+ Example of a PR Fixer applying changes from review comments +
+
+
+
+
+
+ + {/* How It Works Section */} +
+
+
+
+

How It Works

+
+
+ +
+
    + {workflowSteps.map((step, index) => { + const Icon = step.icon + return ( +
  • + +

    + {step.title} +

    +
    + {step.description} +
    +
  • + ) + })} +
+
+
+
+ +
+
+
+
+

+ Why Roo Code{"'"}s PR Fixer is different +

+
+
+ +
+
    + {howItWorks.map((feature, index) => { + const Icon = feature.icon + return ( +
  • + +

    + {feature.title} +

    +
    + {feature.description} +
    +
  • + ) + })} +
+
+
+
+ + {/* CTA Section */} +
+
+
+

+ Ship fixes, not follow-ups. +

+

+ Let Roo Code{"'"}s PR Fixer turn your review feedback into clean, ready-to-merge commits. +

+
+ +
+
+
+
+ + ) +} diff --git a/apps/web-roo-code/src/app/pr-fixer/page.tsx b/apps/web-roo-code/src/app/pr-fixer/page.tsx new file mode 100644 index 00000000000..3d6e1f865de --- /dev/null +++ b/apps/web-roo-code/src/app/pr-fixer/page.tsx @@ -0,0 +1,60 @@ +import type { Metadata } from "next" + +import { SEO } from "@/lib/seo" +import { ogImageUrl } from "@/lib/og" +import { PrFixerContent } from "./PrFixerContent" + +const TITLE = "PR Fixer" +const DESCRIPTION = + "Automatically apply high-quality fixes to your pull requests with comment-aware, GitHub-native workflows." +const OG_DESCRIPTION = "Transform review feedback into clean commits" +const PATH = "/pr-fixer" + +export const metadata: Metadata = { + title: TITLE, + description: DESCRIPTION, + alternates: { + canonical: `${SEO.url}${PATH}`, + }, + openGraph: { + title: TITLE, + description: DESCRIPTION, + url: `${SEO.url}${PATH}`, + siteName: SEO.name, + images: [ + { + url: ogImageUrl(TITLE, OG_DESCRIPTION), + width: 1200, + height: 630, + alt: TITLE, + }, + ], + locale: SEO.locale, + type: "website", + }, + twitter: { + card: SEO.twitterCard, + title: TITLE, + description: DESCRIPTION, + images: [ogImageUrl(TITLE, OG_DESCRIPTION)], + }, + keywords: [ + ...SEO.keywords, + "PR fixer", + "pull request fixes", + "code fixes", + "GitHub PR", + "automated code fixes", + "comment-aware agent", + "repository-aware fixes", + "bring your own key", + "BYOK AI", + "code quality", + "cloud agents", + "AI development team", + ], +} + +export default function AgentPrFixerPage() { + return +} diff --git a/apps/web-roo-code/src/app/pricing/page.tsx b/apps/web-roo-code/src/app/pricing/page.tsx index b80dba6bc18..9985881e1d1 100644 --- a/apps/web-roo-code/src/app/pricing/page.tsx +++ b/apps/web-roo-code/src/app/pricing/page.tsx @@ -6,13 +6,14 @@ import { Button } from "@/components/ui" import { AnimatedBackground } from "@/components/homepage" import { ContactForm } from "@/components/enterprise/contact-form" import { SEO } from "@/lib/seo" +import { ogImageUrl } from "@/lib/og" import { EXTERNAL_LINKS } from "@/lib/constants" -const TITLE = "Pricing - Roo Code Cloud" +const TITLE = "Roo Code Cloud Pricing" const DESCRIPTION = "Simple, transparent pricing for Roo Code Cloud. The VS Code extension is free forever. Choose the cloud plan that fits your needs." +const OG_DESCRIPTION = "" const PATH = "/pricing" -const OG_IMAGE = SEO.ogImage const PRICE_CREDITS = 5 @@ -29,10 +30,10 @@ export const metadata: Metadata = { siteName: SEO.name, images: [ { - url: OG_IMAGE.url, - width: OG_IMAGE.width, - height: OG_IMAGE.height, - alt: OG_IMAGE.alt, + url: ogImageUrl(TITLE, OG_DESCRIPTION), + width: 1200, + height: 630, + alt: TITLE, }, ], locale: SEO.locale, @@ -42,7 +43,7 @@ export const metadata: Metadata = { card: SEO.twitterCard, title: TITLE, description: DESCRIPTION, - images: [OG_IMAGE.url], + images: [ogImageUrl(TITLE, OG_DESCRIPTION)], }, keywords: [ ...SEO.keywords, diff --git a/apps/web-roo-code/src/app/privacy/page.tsx b/apps/web-roo-code/src/app/privacy/page.tsx index 6b17c4ff8df..905e6ff1a0f 100644 --- a/apps/web-roo-code/src/app/privacy/page.tsx +++ b/apps/web-roo-code/src/app/privacy/page.tsx @@ -1,11 +1,12 @@ import type { Metadata } from "next" import { SEO } from "@/lib/seo" +import { ogImageUrl } from "@/lib/og" -const TITLE = "Privacy Policy" +const TITLE = "Our Privacy Policy" const DESCRIPTION = "Privacy policy for Roo Code Cloud and marketing website. Learn how we handle your data and protect your privacy." +const OG_DESCRIPTION = "" const PATH = "/privacy" -const OG_IMAGE = SEO.ogImage export const metadata: Metadata = { title: TITLE, @@ -20,10 +21,10 @@ export const metadata: Metadata = { siteName: SEO.name, images: [ { - url: OG_IMAGE.url, - width: OG_IMAGE.width, - height: OG_IMAGE.height, - alt: OG_IMAGE.alt, + url: ogImageUrl(TITLE, OG_DESCRIPTION), + width: 1200, + height: 630, + alt: TITLE, }, ], locale: SEO.locale, @@ -33,7 +34,7 @@ export const metadata: Metadata = { card: SEO.twitterCard, title: TITLE, description: DESCRIPTION, - images: [OG_IMAGE.url], + images: [ogImageUrl(TITLE, OG_DESCRIPTION)], }, keywords: [...SEO.keywords, "privacy", "data protection", "GDPR", "security"], } diff --git a/apps/web-roo-code/src/app/reviewer/ReviewerContent.tsx b/apps/web-roo-code/src/app/reviewer/ReviewerContent.tsx new file mode 100644 index 00000000000..c72c6cd63df --- /dev/null +++ b/apps/web-roo-code/src/app/reviewer/ReviewerContent.tsx @@ -0,0 +1,296 @@ +"use client" + +import { + ArrowRight, + Blocks, + BookMarked, + ListChecks, + LucideIcon, + GitPullRequest, + Key, + MessageSquareCode, + Wrench, +} from "lucide-react" +import Image from "next/image" +import Link from "next/link" + +import { Button } from "@/components/ui" +import { AnimatedBackground } from "@/components/homepage" +import { AgentCarousel } from "@/components/reviewer/agent-carousel" +import { EXTERNAL_LINKS } from "@/lib/constants" +import { trackGoogleAdsConversion } from "@/lib/analytics/google-ads" + +interface Feature { + icon: LucideIcon + title: string + description: string | React.ReactNode + logos?: string[] +} + +const workflowSteps: Feature[] = [ + { + icon: GitPullRequest, + title: "1. Connect Your Repository", + description: "Link your GitHub repository and configure which branches and pull requests should be reviewed.", + }, + { + icon: Key, + title: "2. Add Your API Key", + description: + "Provide your AI provider API key and set your review preferences, custom rules, and quality standards.", + }, + { + icon: MessageSquareCode, + title: "3. Get Review Comments", + description: + "Every pull request gets detailed GitHub comments in minutes from a Roo Code agent highlighting issues and suggesting improvements.", + }, +] + +const howItWorks: Feature[] = [ + { + icon: Blocks, + title: "Our agents, your provider keys", + description: ( + <> +

+ We orchestrate the review, optimize the hell out of the prompts, integrate with GitHub, keep you + properly posted. +

+

We're thoughtful about token usage, but not incentivized to skimp to grow our margins.

+ + ), + }, + { + icon: ListChecks, + title: "Advanced reasoning and workflows", + description: + "We optimize for state-of-the-art reasoning models and leverage powerful workflows (Diff analysis → Context Gathering → Impact Mapping → Contract checks) to produce crisp, actionable comments at the right level.", + }, + { + icon: BookMarked, + title: "Fully repository-aware", + description: + "Reviews traverse code ownership, dependency graphs, and historical patterns to surface risk and deviations, not noise.", + }, +] + +// Workaround for next/image choking on these for some reason +const hero = "hero" // kilocode_change from: import hero from "/public/heroes/agent-reviewer.png" + +export function ReviewerContent() { + return ( + <> +
+ +
+
+
+
+

+ + Get comprehensive code reviews that save you time, not tokens. +

+ +
+

+ Regular AI code review tools cap model usage to protect their margins from fixed + monthly prices. That leads to shallow prompts, limited context, and missed + issues. +

+

+ Roo Code's PR Reviewer flips the script: you bring your own key and + leverage it to the max – to find real issues, increase code quality and keep + your pull request queue moving. +

+
+ + {/* Cross-agent link */} +
+ Works great with + + + PR Fixer Agent + + +
+
+
+ + + (cancel anytime) + +
+
+
+
+
+ Example of a code review generated by Roo Code PR Reviewer +
+
+
+
+
+
+ + {/* How It Works Section */} +
+
+
+
+

How It Works

+
+
+ +
+
    + {workflowSteps.map((step, index) => { + const Icon = step.icon + return ( +
  • + +

    + {step.title} +

    +
    + {step.description} +
    +
  • + ) + })} +
+
+
+
+ +
+
+
+
+

+ Why Roo's PR Reviewer is so much better +

+
+
+ +
+
    + {howItWorks.map((feature, index) => { + const Icon = feature.icon + return ( +
  • + +

    + {feature.title} +

    +
    + {feature.description} +
    + {feature.logos && ( +
    + {feature.logos.map((logo) => ( + {`${logo} + ))} +
    + )} +
  • + ) + })} +
+
+
+
+ +
+
+
+
+

+ The first member of a whole new team +

+ +

+ Architecture, coding, reviewing, testing, debugging, documenting, designing –{" "} + almost everything we do today is mostly through our agents. Now we're + bringing them to you. +

+

+ Roo's PR Reviewer isn't yet another single-purpose tool to add to your already + complicated stack. +
+ It's the first member of your AI-powered development team. More agents are shipping + soon. +

+
+
+ +
+ +
+
+
+ + {/* CTA Section */} +
+
+
+

Stop wasting time.

+

+ Give Roo Code's PR Reviewer your model key and turn painful reviews into a tangible + quality advantage. +

+ +
+
+
+ + ) +} diff --git a/apps/web-roo-code/src/app/reviewer/page.tsx b/apps/web-roo-code/src/app/reviewer/page.tsx new file mode 100644 index 00000000000..7f7cce862a6 --- /dev/null +++ b/apps/web-roo-code/src/app/reviewer/page.tsx @@ -0,0 +1,61 @@ +import type { Metadata } from "next" + +import { SEO } from "@/lib/seo" +import { ogImageUrl } from "@/lib/og" +import { ReviewerContent } from "./ReviewerContent" + +const TITLE = "PR Reviewer" +const DESCRIPTION = + "Get comprehensive AI-powered PR reviews that save you time, not tokens. Bring your own API key and leverage advanced reasoning, repository-aware analysis, and actionable feedback to keep your PR queue moving." +const OG_DESCRIPTION = "AI-powered PR reviews that save you time, not tokens" +const PATH = "/reviewer" + +export const metadata: Metadata = { + title: TITLE, + description: DESCRIPTION, + alternates: { + canonical: `${SEO.url}${PATH}`, + }, + openGraph: { + title: TITLE, + description: DESCRIPTION, + url: `${SEO.url}${PATH}`, + siteName: SEO.name, + images: [ + { + url: ogImageUrl(TITLE, OG_DESCRIPTION), + width: 1200, + height: 630, + alt: TITLE, + }, + ], + locale: SEO.locale, + type: "website", + }, + twitter: { + card: SEO.twitterCard, + title: TITLE, + description: DESCRIPTION, + images: [ogImageUrl(TITLE, OG_DESCRIPTION)], + }, + keywords: [ + ...SEO.keywords, + "PR reviewer", + "code review", + "pull request review", + "AI code review", + "GitHub PR review", + "automated code review", + "repository-aware review", + "bring your own key", + "BYOK AI", + "code quality", + "development workflow", + "cloud agents", + "AI development team", + ], +} + +export default function AgentReviewerPage() { + return +} diff --git a/apps/web-roo-code/src/app/terms/page.tsx b/apps/web-roo-code/src/app/terms/page.tsx index 5939d8f7c96..b4f88bfcfee 100644 --- a/apps/web-roo-code/src/app/terms/page.tsx +++ b/apps/web-roo-code/src/app/terms/page.tsx @@ -1,16 +1,17 @@ import type { Metadata } from "next" import { SEO } from "@/lib/seo" +import { ogImageUrl } from "@/lib/og" import fs from "fs" import path from "path" import ReactMarkdown from "react-markdown" import remarkGfm from "remark-gfm" import rehypeRaw from "rehype-raw" -const TITLE = "Terms of Service" +const TITLE = "Our Terms of Service" const DESCRIPTION = "Terms of Service for Roo Code Cloud. Learn about our service terms, commercial conditions, and legal framework." +const OG_DESCRIPTION = "" const PATH = "/terms" -const OG_IMAGE = SEO.ogImage export const metadata: Metadata = { title: TITLE, @@ -25,10 +26,10 @@ export const metadata: Metadata = { siteName: SEO.name, images: [ { - url: OG_IMAGE.url, - width: OG_IMAGE.width, - height: OG_IMAGE.height, - alt: OG_IMAGE.alt, + url: ogImageUrl(TITLE, OG_DESCRIPTION), + width: 1200, + height: 630, + alt: TITLE, }, ], locale: SEO.locale, @@ -38,7 +39,7 @@ export const metadata: Metadata = { card: SEO.twitterCard, title: TITLE, description: DESCRIPTION, - images: [OG_IMAGE.url], + images: [ogImageUrl(TITLE, OG_DESCRIPTION)], }, keywords: [...SEO.keywords, "terms of service", "legal", "agreement", "subscription"], } diff --git a/apps/web-roo-code/src/components/CookieConsentWrapper.tsx b/apps/web-roo-code/src/components/CookieConsentWrapper.tsx index 23b8f5a28f5..6d91d97a9c1 100644 --- a/apps/web-roo-code/src/components/CookieConsentWrapper.tsx +++ b/apps/web-roo-code/src/components/CookieConsentWrapper.tsx @@ -5,7 +5,7 @@ import ReactCookieConsent from "react-cookie-consent" import { Cookie } from "lucide-react" import { getDomain } from "tldts" import { CONSENT_COOKIE_NAME } from "@roo-code/types" -import { dispatchConsentEvent } from "@/lib/analytics/consent-manager" +import { handleConsentAccept, handleConsentReject } from "@/lib/analytics/consent-manager" /** * GDPR-compliant cookie consent banner component @@ -23,11 +23,11 @@ export function CookieConsentWrapper() { }, []) const handleAccept = () => { - dispatchConsentEvent(true) + handleConsentAccept() } const handleDecline = () => { - dispatchConsentEvent(false) + handleConsentReject() } const extraCookieOptions = cookieDomain diff --git a/apps/web-roo-code/src/components/chromes/footer.tsx b/apps/web-roo-code/src/components/chromes/footer.tsx index 11b883d9ce2..7f0de4082f5 100644 --- a/apps/web-roo-code/src/components/chromes/footer.tsx +++ b/apps/web-roo-code/src/components/chromes/footer.tsx @@ -12,16 +12,22 @@ import { ScrollButton } from "@/components/ui" export function Footer() { const [privacyDropdownOpen, setPrivacyDropdownOpen] = useState(false) + const [cloudDropdownOpen, setCloudDropdownOpen] = useState(false) const dropdownRef = useRef(null) + const cloudDropdownRef = useRef(null) const logoSrc = useLogoSrc() const { resolvedTheme } = useTheme() // Close dropdown when clicking outside useEffect(() => { function handleClickOutside(event: MouseEvent) { - if (dropdownRef.current && !dropdownRef.current.contains(event.target as Node)) { + const target = event.target as Node + if (dropdownRef.current && !dropdownRef.current.contains(target)) { setPrivacyDropdownOpen(false) } + if (cloudDropdownRef.current && !cloudDropdownRef.current.contains(target)) { + setCloudDropdownOpen(false) + } } document.addEventListener("mousedown", handleClickOutside) @@ -69,6 +75,45 @@ export function Footer() { Features +
  • +
    + + + {cloudDropdownOpen && ( +
    +
    + setCloudDropdownOpen(false)} + className="rounded-md px-3 py-2 transition-colors hover:bg-accent/50 hover:text-foreground"> + Cloud + + setCloudDropdownOpen(false)} + className="rounded-md px-3 py-2 transition-colors hover:bg-accent/50 hover:text-foreground"> + PR Reviewer + + setCloudDropdownOpen(false)} + className="rounded-md px-3 py-2 transition-colors hover:bg-accent/50 hover:text-foreground"> + PR Fixer + +
    +
    + )} +
    +
  • { + // Initialize consent defaults BEFORE loading gtag.js (required for Consent Mode v2) + initializeConsentDefaults() + // Check initial consent status if (hasConsent()) { setShouldLoad(true) - initializeGoogleAnalytics() + updateConsentGranted() } // Listen for consent changes const unsubscribe = onConsentChange((consented) => { - if (consented && !shouldLoad) { - setShouldLoad(true) - initializeGoogleAnalytics() + if (consented) { + if (!shouldLoad) { + setShouldLoad(true) + } + updateConsentGranted() + } else { + updateConsentDenied() } }) return unsubscribe - }, [shouldLoad]) + // eslint-disable-next-line react-hooks/exhaustive-deps -- shouldLoad intentionally omitted to prevent re-initialization loop + }, []) - const initializeGoogleAnalytics = () => { - // Initialize the dataLayer and gtag function + const initializeConsentDefaults = () => { + // Set up consent defaults before gtag loads (Consent Mode v2 requirement) if (typeof window !== "undefined") { window.dataLayer = window.dataLayer || [] window.gtag = function (...args: GtagArgs) { window.dataLayer.push(args) } - window.gtag("js", new Date()) - window.gtag("config", GTM_ID) + + // Set default consent state to 'denied' with cookieless pings enabled + window.gtag("consent", "default", { + ad_storage: "denied", + ad_user_data: "denied", + ad_personalization: "denied", + analytics_storage: "denied", + functionality_storage: "denied", + personalization_storage: "denied", + security_storage: "granted", // Always granted for security + wait_for_update: 500, // Wait 500ms for consent before sending data + }) + + // Enable cookieless pings for Google Ads + window.gtag("set", "url_passthrough", true) } } - // Only render Google Analytics scripts if consent is given - if (!shouldLoad) { - return <>{children} + const updateConsentGranted = () => { + // User accepted cookies - update consent to granted + if (typeof window !== "undefined" && window.gtag) { + window.gtag("consent", "update", { + ad_storage: "granted", + ad_user_data: "granted", + ad_personalization: "granted", + analytics_storage: "granted", + functionality_storage: "granted", + personalization_storage: "granted", + }) + } } + const updateConsentDenied = () => { + // User declined cookies - keep consent denied (cookieless pings still work) + if (typeof window !== "undefined" && window.gtag) { + window.gtag("consent", "update", { + ad_storage: "denied", + ad_user_data: "denied", + ad_personalization: "denied", + analytics_storage: "denied", + functionality_storage: "denied", + personalization_storage: "denied", + }) + } + } + + // Always render scripts (Consent Mode v2 needs gtag loaded even without consent) + // Cookieless pings will work with denied consent + return ( <> - {/* Google tag (gtag.js) - Only loads after consent */} + {/* Google tag (gtag.js) - Loads immediately for Consent Mode v2 */} {children} ) } -// Type definitions for Google Analytics -type GtagArgs = ["js", Date] | ["config", string, GtagConfig?] | ["event", string, GtagEventParameters?] +// Type definitions for Google Analytics with Consent Mode v2 +type ConsentState = "granted" | "denied" + +interface ConsentParams { + ad_storage?: ConsentState + ad_user_data?: ConsentState + ad_personalization?: ConsentState + analytics_storage?: ConsentState + functionality_storage?: ConsentState + personalization_storage?: ConsentState + security_storage?: ConsentState + wait_for_update?: number +} + +type GtagArgs = + | ["js", Date] + | ["config", string, GtagConfig?] + | ["event", string, GtagEventParameters?] + | ["consent", "default" | "update", ConsentParams] + | ["set", string, unknown] interface GtagConfig { [key: string]: unknown diff --git a/apps/web-roo-code/src/components/providers/posthog-provider.tsx b/apps/web-roo-code/src/components/providers/posthog-provider.tsx index d172fd8f182..0d8932d549b 100644 --- a/apps/web-roo-code/src/components/providers/posthog-provider.tsx +++ b/apps/web-roo-code/src/components/providers/posthog-provider.tsx @@ -3,8 +3,8 @@ import { usePathname, useSearchParams } from "next/navigation" import posthog from "posthog-js" import { PostHogProvider as OriginalPostHogProvider } from "posthog-js/react" -import { useEffect, Suspense, useState } from "react" -import { hasConsent, onConsentChange } from "@/lib/analytics/consent-manager" +import { useEffect, Suspense } from "react" +import { hasConsent } from "@/lib/analytics/consent-manager" function PageViewTracker() { const pathname = usePathname() @@ -28,11 +28,9 @@ function PageViewTracker() { } export function PostHogProvider({ children }: { children: React.ReactNode }) { - const [isInitialized, setIsInitialized] = useState(false) - useEffect(() => { - // Initialize PostHog only on the client side AND when consent is given - if (typeof window !== "undefined") { + // Initialize PostHog immediately on the client side + if (typeof window !== "undefined" && !posthog.__loaded) { const posthogKey = process.env.NEXT_PUBLIC_POSTHOG_KEY const posthogHost = process.env.NEXT_PUBLIC_POSTHOG_HOST @@ -52,48 +50,32 @@ export function PostHogProvider({ children }: { children: React.ReactNode }) { ) } - const initializePosthog = () => { - if (!isInitialized) { - posthog.init(posthogKey, { - api_host: posthogHost || "https://us.i.posthog.com", - capture_pageview: false, - loaded: (posthogInstance) => { - if (process.env.NODE_ENV === "development") { - posthogInstance.debug() - } - }, - respect_dnt: true, // Respect Do Not Track - }) - setIsInitialized(true) - } - } - - // Check initial consent status - if (hasConsent()) { - initializePosthog() - } + // Check if user has already consented to cookies + const userHasConsented = hasConsent() - // Listen for consent changes - const unsubscribe = onConsentChange((consented) => { - if (consented && !isInitialized) { - initializePosthog() - } + // Initialize PostHog with appropriate persistence based on consent + posthog.init(posthogKey, { + api_host: posthogHost || "https://us.i.posthog.com", + capture_pageview: false, // We handle pageview tracking manually + loaded: (posthogInstance) => { + if (process.env.NODE_ENV === "development") { + posthogInstance.debug() + } + }, + save_referrer: true, // Save referrer information + save_campaign_params: true, // Save UTM parameters + respect_dnt: true, // Respect Do Not Track + persistence: userHasConsented ? "localStorage+cookie" : "memory", // Use localStorage if consented, otherwise memory-only + opt_out_capturing_by_default: false, // Start tracking immediately }) - - return () => { - unsubscribe() - } } - }, [isInitialized]) + }, []) - // Only provide PostHog context if it's initialized return ( - {isInitialized && ( - - - - )} + + + {children} ) diff --git a/apps/web-roo-code/src/components/reviewer/agent-carousel.tsx b/apps/web-roo-code/src/components/reviewer/agent-carousel.tsx new file mode 100644 index 00000000000..c935db6e1c7 --- /dev/null +++ b/apps/web-roo-code/src/components/reviewer/agent-carousel.tsx @@ -0,0 +1,125 @@ +"use client" + +import { useEffect } from "react" +import { motion } from "framer-motion" +import useEmblaCarousel from "embla-carousel-react" +import AutoPlay from "embla-carousel-autoplay" +import { Bug, FileText, Gauge, Languages, Microscope, PocketKnife, TestTube, type LucideIcon } from "lucide-react" + +// AI Agent types for the carousel +interface AIAgent { + icon: LucideIcon + name: string +} + +const aiAgents: AIAgent[] = [ + { icon: PocketKnife, name: "Generalist" }, + { icon: Bug, name: "Bug Fixer" }, + { icon: TestTube, name: "Test Engineer" }, + { icon: Microscope, name: "Security Auditor" }, + { icon: Gauge, name: "Performance Optimizer" }, + { icon: FileText, name: "Documentation Writer" }, + { icon: Languages, name: "String Translator" }, +] + +export function AgentCarousel() { + const [emblaRef, emblaApi] = useEmblaCarousel( + { + loop: true, + align: "start", + watchDrag: true, + dragFree: false, + containScroll: false, + duration: 10000, + }, + [ + AutoPlay({ + playOnInit: true, + delay: 0, + stopOnInteraction: false, + stopOnMouseEnter: false, + stopOnFocusIn: false, + }), + ], + ) + + // Continuous scrolling effect + useEffect(() => { + if (!emblaApi) return + + const autoPlay = emblaApi?.plugins()?.autoPlay as + | { + play?: () => void + } + | undefined + + if (autoPlay?.play) { + autoPlay.play() + } + + // Set up continuous scrolling + const interval = setInterval(() => { + if (emblaApi) { + emblaApi.scrollNext() + } + }, 30) // Smooth continuous scroll + + return () => clearInterval(interval) + }, [emblaApi]) + + const containerVariants = { + hidden: { opacity: 0 }, + visible: { + opacity: 1, + transition: { + duration: 0.6, + ease: [0.21, 0.45, 0.27, 0.9], + }, + }, + } + + // Duplicate the agents array for seamless infinite scroll + const displayAgents = [...aiAgents, ...aiAgents] + + return ( + + {/* Gradient Overlays */} +
    +
    + + {/* Embla Carousel Container */} +
    +
    + {displayAgents.map((agent, index) => { + const Icon = agent.icon + return ( +
    +
    +
    + +

    + {agent.name} +

    +
    +
    +
    + ) + })} +
    +
    + + ) +} diff --git a/apps/web-roo-code/src/lib/analytics/consent-manager.ts b/apps/web-roo-code/src/lib/analytics/consent-manager.ts index 10ef71ee702..a99f6c19f2f 100644 --- a/apps/web-roo-code/src/lib/analytics/consent-manager.ts +++ b/apps/web-roo-code/src/lib/analytics/consent-manager.ts @@ -5,6 +5,7 @@ import { getCookieConsentValue } from "react-cookie-consent" import { CONSENT_COOKIE_NAME } from "@roo-code/types" +import posthog from "posthog-js" export const CONSENT_EVENT = "cookieConsentChanged" @@ -45,3 +46,27 @@ export function onConsentChange(callback: (consented: boolean) => void): () => v window.addEventListener(CONSENT_EVENT, handler) return () => window.removeEventListener(CONSENT_EVENT, handler) } + +/** + * Handle user accepting cookies + * Opts PostHog back into cookie-based tracking + */ +export function handleConsentAccept(): void { + if (typeof window !== "undefined" && posthog.__loaded) { + // User accepted - ensure localStorage+cookie persistence is enabled + posthog.opt_in_capturing() + posthog.set_config({ + persistence: "localStorage+cookie", + }) + } + dispatchConsentEvent(true) +} + +/** + * Handle user rejecting cookies + * Switches PostHog to cookieless (memory-only) mode + */ +export function handleConsentReject(): void { + // User rejected - stick to cookieless mode + dispatchConsentEvent(false) +} diff --git a/apps/web-roo-code/src/lib/analytics/google-ads.ts b/apps/web-roo-code/src/lib/analytics/google-ads.ts new file mode 100644 index 00000000000..29ced92e99e --- /dev/null +++ b/apps/web-roo-code/src/lib/analytics/google-ads.ts @@ -0,0 +1,17 @@ +/** + * Google Ads conversion tracking utilities + */ + +/** + * Track a Google Ads conversion event + * This should only be called after user consent has been given + */ +export function trackGoogleAdsConversion() { + if (typeof window !== "undefined" && window.gtag) { + window.gtag("event", "conversion", { + send_to: "AW-17391954825/VtOZCJe_77MbEInXkOVA", + value: 10.0, + currency: "USD", + }) + } +} diff --git a/apps/web-roo-code/src/lib/constants.ts b/apps/web-roo-code/src/lib/constants.ts index c474481805c..3fd6900953c 100644 --- a/apps/web-roo-code/src/lib/constants.ts +++ b/apps/web-roo-code/src/lib/constants.ts @@ -3,7 +3,7 @@ export const EXTERNAL_LINKS = { GITHUB_DISCUSSIONS: "https://github.com/RooCodeInc/Roo-Code/discussions", DISCORD: "https://discord.gg/roocode", REDDIT: "https://reddit.com/r/RooCode", - X: "https://x.com/roo_code", + X: "https://x.com/roocode", LINKEDIN: "https://www.linkedin.com/company/roo-code", TIKTOK: "https://www.tiktok.com/@roo.code", BLUESKY: "https://bsky.app/profile/roocode.bsky.social", @@ -26,6 +26,7 @@ export const EXTERNAL_LINKS = { TESTIMONIALS: "https://roocode.com/#testimonials", CLOUD_APP_LOGIN: "https://app.roocode.com/sign-in", CLOUD_APP_SIGNUP: "https://app.roocode.com/sign-up", + CLOUD_APP_SIGNUP_PRO: "https://app.roocode.com/sign-up?redirect_url=/cloud-agents/welcome", } export const INTERNAL_LINKS = { diff --git a/apps/web-roo-code/src/lib/og.ts b/apps/web-roo-code/src/lib/og.ts new file mode 100644 index 00000000000..e4b2605b1ec --- /dev/null +++ b/apps/web-roo-code/src/lib/og.ts @@ -0,0 +1,57 @@ +/** + * Generate a dynamic OpenGraph image URL + * @param title - The title to display on the OG image + * @param description - Optional description to display (will be truncated to ~140 chars) + * @returns Absolute URL to the dynamic OG image endpoint + */ +export function ogImageUrl(title: string, description?: string): string { + const baseUrl = process.env.NEXT_PUBLIC_APP_URL || "https://roocode.com" + const params = new URLSearchParams() + + params.set("title", title) + if (description) { + params.set("description", description) + } + + return `${baseUrl}/api/og?${params.toString()}` +} + +/** + * Generate OpenGraph metadata for a page with dynamic image + * @param title - The page title + * @param description - The page description + * @returns OpenGraph metadata object with dynamic image + */ +export function getOgMetadata(title: string, description: string) { + const imageUrl = ogImageUrl(title, description) + + return { + title, + description, + images: [ + { + url: imageUrl, + width: 1200, + height: 630, + alt: title, + }, + ], + } +} + +/** + * Generate Twitter metadata for a page with dynamic image + * @param title - The page title + * @param description - The page description + * @returns Twitter metadata object with dynamic image + */ +export function getTwitterMetadata(title: string, description: string) { + const imageUrl = ogImageUrl(title, description) + + return { + card: "summary_large_image" as const, + title, + description, + images: [imageUrl], + } +} diff --git a/cli/src/commands/__tests__/model.test.ts b/cli/src/commands/__tests__/model.test.ts index f25dc8d3250..f48d50b192a 100644 --- a/cli/src/commands/__tests__/model.test.ts +++ b/cli/src/commands/__tests__/model.test.ts @@ -37,7 +37,7 @@ describe("/model command", () => { glama: {}, unbound: {}, requesty: {}, - "kilocode-openrouter": {}, + kilocode: {}, "io-intelligence": {}, deepinfra: {}, "vercel-ai-gateway": {}, @@ -400,7 +400,7 @@ describe("/model command", () => { glama: {}, unbound: {}, requesty: {}, - "kilocode-openrouter": {}, + kilocode: {}, "io-intelligence": {}, deepinfra: {}, "vercel-ai-gateway": {}, diff --git a/cli/src/constants/providers/__tests__/models.test.ts b/cli/src/constants/providers/__tests__/models.test.ts index b6b5dabaedc..d323c3049ce 100644 --- a/cli/src/constants/providers/__tests__/models.test.ts +++ b/cli/src/constants/providers/__tests__/models.test.ts @@ -30,7 +30,6 @@ describe("Static Provider Models", () => { "bedrock", "vertex", "openai-native", - "gemini", "mistral", "moonshot", "deepseek", @@ -38,13 +37,11 @@ describe("Static Provider Models", () => { "qwen-code", "xai", "groq", - "chutes", "cerebras", "sambanova", "zai", "fireworks", "featherless", - "roo", "claude-code", "gemini-cli", ] @@ -213,7 +210,7 @@ describe("Static Provider Models", () => { glama: {}, unbound: {}, requesty: {}, - "kilocode-openrouter": {}, + kilocode: {}, "io-intelligence": {}, deepinfra: {}, "vercel-ai-gateway": {}, diff --git a/cli/src/constants/providers/models.ts b/cli/src/constants/providers/models.ts index baa5e157366..13c11683d4d 100644 --- a/cli/src/constants/providers/models.ts +++ b/cli/src/constants/providers/models.ts @@ -56,7 +56,7 @@ export type RouterName = | "glama" | "unbound" | "litellm" - | "kilocode-openrouter" + | "kilocode" | "ollama" | "lmstudio" | "io-intelligence" @@ -94,7 +94,7 @@ export type RouterModels = Record * Mapping from ProviderName to RouterName for model fetching */ export const PROVIDER_TO_ROUTER_NAME: Record = { - kilocode: "kilocode-openrouter", + kilocode: "kilocode", openrouter: "openrouter", ollama: "ollama", lmstudio: "lmstudio", diff --git a/cli/src/utils/__tests__/context.test.ts b/cli/src/utils/__tests__/context.test.ts index 9634de44400..9714198c7bf 100644 --- a/cli/src/utils/__tests__/context.test.ts +++ b/cli/src/utils/__tests__/context.test.ts @@ -99,7 +99,7 @@ describe("context utilities", () => { } const routerModels: Partial = { - "kilocode-openrouter": { + kilocode: { "anthropic/claude-sonnet-4.5": { contextWindow: 200000, supportsPromptCache: true, diff --git a/packages/types/src/codebase-index.ts b/packages/types/src/codebase-index.ts index be7778f5387..8ad66cbb68b 100644 --- a/packages/types/src/codebase-index.ts +++ b/packages/types/src/codebase-index.ts @@ -22,7 +22,7 @@ export const codebaseIndexConfigSchema = z.object({ codebaseIndexEnabled: z.boolean().optional(), codebaseIndexQdrantUrl: z.string().optional(), codebaseIndexEmbedderProvider: z - .enum(["openai", "ollama", "openai-compatible", "gemini", "mistral", "vercel-ai-gateway"]) + .enum(["openai", "ollama", "openai-compatible", "gemini", "mistral", "vercel-ai-gateway", "openrouter"]) .optional(), codebaseIndexEmbedderBaseUrl: z.string().optional(), codebaseIndexEmbedderModelId: z.string().optional(), @@ -51,6 +51,7 @@ export const codebaseIndexModelsSchema = z.object({ gemini: z.record(z.string(), z.object({ dimension: z.number() })).optional(), mistral: z.record(z.string(), z.object({ dimension: z.number() })).optional(), "vercel-ai-gateway": z.record(z.string(), z.object({ dimension: z.number() })).optional(), + openrouter: z.record(z.string(), z.object({ dimension: z.number() })).optional(), }) export type CodebaseIndexModels = z.infer @@ -68,6 +69,7 @@ export const codebaseIndexProviderSchema = z.object({ codebaseIndexGeminiApiKey: z.string().optional(), codebaseIndexMistralApiKey: z.string().optional(), codebaseIndexVercelAiGatewayApiKey: z.string().optional(), + codebaseIndexOpenRouterApiKey: z.string().optional(), }) export type CodebaseIndexProvider = z.infer diff --git a/packages/types/src/global-settings.ts b/packages/types/src/global-settings.ts index 5a8ef907312..76cd4018e39 100644 --- a/packages/types/src/global-settings.ts +++ b/packages/types/src/global-settings.ts @@ -30,6 +30,21 @@ export const DEFAULT_WRITE_DELAY_MS = 1000 */ export const DEFAULT_TERMINAL_OUTPUT_CHARACTER_LIMIT = 50_000 +/** + * Minimum checkpoint timeout in seconds. + */ +export const MIN_CHECKPOINT_TIMEOUT_SECONDS = 10 + +/** + * Maximum checkpoint timeout in seconds. + */ +export const MAX_CHECKPOINT_TIMEOUT_SECONDS = 60 + +/** + * Default checkpoint timeout in seconds. + */ +export const DEFAULT_CHECKPOINT_TIMEOUT_SECONDS = 15 + /** * GlobalSettings */ @@ -82,6 +97,17 @@ export const globalSettingsSchema = z.object({ maxConcurrentFileReads: z.number().optional(), allowVeryLargeReads: z.boolean().optional(), // kilocode_change + /** + * Whether to include current time in the environment details + * @default true + */ + includeCurrentTime: z.boolean().optional(), + /** + * Whether to include current cost in the environment details + * @default true + */ + includeCurrentCost: z.boolean().optional(), + /** * Whether to include diagnostic messages (errors, warnings) in tool outputs * @default true @@ -110,6 +136,12 @@ export const globalSettingsSchema = z.object({ cachedChromeHostUrl: z.string().optional(), enableCheckpoints: z.boolean().optional(), + checkpointTimeout: z + .number() + .int() + .min(MIN_CHECKPOINT_TIMEOUT_SECONDS) + .max(MAX_CHECKPOINT_TIMEOUT_SECONDS) + .optional(), // kilocode_change start - Auto-purge settings autoPurgeEnabled: z.boolean().optional(), @@ -220,6 +252,7 @@ export const SECRET_STATE_KEYS = [ "doubaoApiKey", "moonshotApiKey", "mistralApiKey", + "minimaxApiKey", "unboundApiKey", "requestyApiKey", "xaiApiKey", @@ -239,6 +272,7 @@ export const SECRET_STATE_KEYS = [ "codebaseIndexGeminiApiKey", "codebaseIndexMistralApiKey", "codebaseIndexVercelAiGatewayApiKey", + "codebaseIndexOpenRouterApiKey", "huggingFaceApiKey", "sambaNovaApiKey", "zaiApiKey", diff --git a/packages/types/src/message.ts b/packages/types/src/message.ts index 80c0dbb0edc..5ad9e75cfe2 100644 --- a/packages/types/src/message.ts +++ b/packages/types/src/message.ts @@ -228,8 +228,6 @@ export const clineMessageSchema = z.object({ gpt5: z .object({ previous_response_id: z.string().optional(), - instructions: z.string().optional(), - reasoning_summary: z.string().optional(), }) .optional(), kiloCode: kiloCodeMetaDataSchema.optional(), diff --git a/packages/types/src/model.ts b/packages/types/src/model.ts index 4f9fd5e58bc..3ab9ee41236 100644 --- a/packages/types/src/model.ts +++ b/packages/types/src/model.ts @@ -57,15 +57,18 @@ export const modelInfoSchema = z.object({ maxThinkingTokens: z.number().nullish(), contextWindow: z.number(), supportsImages: z.boolean().optional(), - supportsComputerUse: z.boolean().optional(), supportsPromptCache: z.boolean(), // Capability flag to indicate whether the model supports an output verbosity parameter supportsVerbosity: z.boolean().optional(), supportsReasoningBudget: z.boolean().optional(), + // Capability flag to indicate whether the model supports simple on/off binary reasoning + supportsReasoningBinary: z.boolean().optional(), // Capability flag to indicate whether the model supports temperature parameter supportsTemperature: z.boolean().optional(), requiredReasoningBudget: z.boolean().optional(), supportsReasoningEffort: z.boolean().optional(), + requiredReasoningEffort: z.boolean().optional(), + preserveReasoning: z.boolean().optional(), supportedParameters: z.array(modelParametersSchema).optional(), inputPrice: z.number().optional(), outputPrice: z.number().optional(), @@ -82,6 +85,8 @@ export const modelInfoSchema = z.object({ // kilocode_change end // Flag to indicate if the model is deprecated and should not be used deprecated: z.boolean().optional(), + // Flag to indicate if the model is free (no cost) + isFree: z.boolean().optional(), /** * Service tiers with pricing information. * Each tier can have a name (for OpenAI service tiers) and pricing overrides. diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index 09fb266f58e..283cb6d1757 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -6,7 +6,6 @@ import { anthropicModels, bedrockModels, cerebrasModels, - chutesModels, claudeCodeModels, deepSeekModels, doubaoModels, @@ -22,12 +21,12 @@ import { moonshotModels, openAiNativeModels, qwenCodeModels, - rooModels, sambaNovaModels, vertexModels, vscodeLlmModels, xaiModels, internationalZAiModels, + minimaxModels, } from "./providers/index.js" import { toolUseStylesSchema } from "./kilocode/native-function-calling.js" @@ -49,7 +48,7 @@ export const dynamicProviders = [ "huggingface", "litellm", // kilocode_change start - "kilocode-openrouter", + "kilocode", "ovhcloud", "chutes", "gemini", @@ -60,6 +59,8 @@ export const dynamicProviders = [ "requesty", "unbound", "glama", + "roo", + "chutes", ] as const export type DynamicProvider = (typeof dynamicProviders)[number] @@ -131,7 +132,6 @@ export const providerNames = [ "anthropic", "bedrock", "cerebras", - "chutes", "claude-code", "doubao", "deepseek", @@ -142,6 +142,7 @@ export const providerNames = [ "groq", "mistral", "moonshot", + "minimax", "openai-native", "qwen-code", "roo", @@ -364,6 +365,13 @@ const moonshotSchema = apiModelIdProviderModelSchema.extend({ moonshotApiKey: z.string().optional(), }) +const minimaxSchema = apiModelIdProviderModelSchema.extend({ + minimaxBaseUrl: z + .union([z.literal("https://api.minimax.io/v1"), z.literal("https://api.minimaxi.com/v1")]) + .optional(), + minimaxApiKey: z.string().optional(), +}) + const unboundSchema = baseProviderSettingsSchema.extend({ unboundApiKey: z.string().optional(), unboundModelId: z.string().optional(), @@ -522,6 +530,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv deepInfraSchema.merge(z.object({ apiProvider: z.literal("deepinfra") })), doubaoSchema.merge(z.object({ apiProvider: z.literal("doubao") })), moonshotSchema.merge(z.object({ apiProvider: z.literal("moonshot") })), + minimaxSchema.merge(z.object({ apiProvider: z.literal("minimax") })), unboundSchema.merge(z.object({ apiProvider: z.literal("unbound") })), requestySchema.merge(z.object({ apiProvider: z.literal("requesty") })), humanRelaySchema.merge(z.object({ apiProvider: z.literal("human-relay") })), @@ -577,6 +586,7 @@ export const providerSettingsSchema = z.object({ ...deepInfraSchema.shape, ...doubaoSchema.shape, ...moonshotSchema.shape, + ...minimaxSchema.shape, ...unboundSchema.shape, ...requestySchema.shape, ...humanRelaySchema.shape, @@ -655,7 +665,7 @@ export const modelIdKeysByProvider: Record = { "claude-code": "apiModelId", glama: "glamaModelId", openrouter: "openRouterModelId", - "kilocode-openrouter": "openRouterModelId", + kilocode: "kilocodeModel", bedrock: "apiModelId", vertex: "apiModelId", "openai-native": "openAiModelId", @@ -665,6 +675,7 @@ export const modelIdKeysByProvider: Record = { "gemini-cli": "apiModelId", mistral: "apiModelId", moonshot: "apiModelId", + minimax: "apiModelId", deepseek: "apiModelId", deepinfra: "deepInfraModelId", doubao: "apiModelId", @@ -685,7 +696,6 @@ export const modelIdKeysByProvider: Record = { "io-intelligence": "ioIntelligenceModelId", roo: "apiModelId", "vercel-ai-gateway": "vercelAiGatewayModelId", - kilocode: "kilocodeModel", "virtual-quota-fallback": "apiModelId", ovhcloud: "ovhCloudAiEndpointsModelId", // kilocode_change inception: "inceptionLabsModelId", // kilocode_change @@ -708,7 +718,12 @@ export const getApiProtocol = (provider: ProviderName | undefined, modelId?: str } // Vercel AI Gateway uses anthropic protocol for anthropic models. - if (provider && provider === "vercel-ai-gateway" && modelId && modelId.toLowerCase().startsWith("anthropic/")) { + if ( + provider && + ["vercel-ai-gateway", "roo"].includes(provider) && + modelId && + modelId.toLowerCase().startsWith("anthropic/") + ) { return "anthropic" } @@ -738,11 +753,6 @@ export const MODELS_BY_PROVIDER: Record< label: "Cerebras", models: Object.keys(cerebrasModels), }, - chutes: { - id: "chutes", - label: "Chutes AI", - models: Object.keys(chutesModels), - }, "claude-code": { id: "claude-code", label: "Claude Code", models: Object.keys(claudeCodeModels) }, deepseek: { id: "deepseek", @@ -788,13 +798,18 @@ export const MODELS_BY_PROVIDER: Record< label: "Moonshot", models: Object.keys(moonshotModels), }, + minimax: { + id: "minimax", + label: "MiniMax", + models: Object.keys(minimaxModels), + }, "openai-native": { id: "openai-native", label: "OpenAI", models: Object.keys(openAiNativeModels), }, "qwen-code": { id: "qwen-code", label: "Qwen Code", models: Object.keys(qwenCodeModels) }, - roo: { id: "roo", label: "Roo", models: Object.keys(rooModels) }, + roo: { id: "roo", label: "Roo Code Cloud", models: [] }, sambanova: { id: "sambanova", label: "SambaNova", @@ -825,11 +840,11 @@ export const MODELS_BY_PROVIDER: Record< ovhcloud: { id: "ovhcloud", label: "OVHcloud AI Endpoints", models: [] }, inception: { id: "inception", label: "Inception", models: [] }, kilocode: { id: "kilocode", label: "Kilocode", models: [] }, - "kilocode-openrouter": { id: "kilocode-openrouter", label: "Kilocode", models: [] }, // temporarily needed to satisfy because we're using 2 inconsistent names apparently "virtual-quota-fallback": { id: "virtual-quota-fallback", label: "Virtual Quota Fallback", models: [] }, // kilocode_change end deepinfra: { id: "deepinfra", label: "DeepInfra", models: [] }, "vercel-ai-gateway": { id: "vercel-ai-gateway", label: "Vercel AI Gateway", models: [] }, + chutes: { id: "chutes", label: "Chutes AI", models: [] }, // Local providers; models discovered from localhost endpoints. lmstudio: { id: "lmstudio", label: "LM Studio", models: [] }, diff --git a/packages/types/src/providers/anthropic.ts b/packages/types/src/providers/anthropic.ts index e6a85e7393f..a5dc0fbf9b0 100644 --- a/packages/types/src/providers/anthropic.ts +++ b/packages/types/src/providers/anthropic.ts @@ -10,7 +10,6 @@ export const anthropicModels = { maxTokens: 64_000, // Overridden to 8k if `enableReasoningEffort` is false. contextWindow: 200_000, // Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07' supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 3.0, // $3 per million input tokens (≤200K context) outputPrice: 15.0, // $15 per million output tokens (≤200K context) @@ -32,7 +31,6 @@ export const anthropicModels = { maxTokens: 64_000, // Overridden to 8k if `enableReasoningEffort` is false. contextWindow: 200_000, // Default 200K, extendable to 1M with beta flag 'context-1m-2025-08-07' supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 3.0, // $3 per million input tokens (≤200K context) outputPrice: 15.0, // $15 per million output tokens (≤200K context) @@ -54,7 +52,6 @@ export const anthropicModels = { maxTokens: 32_000, // kilocode_change: https://openrouter.ai/anthropic/claude-opus-4.1/providers contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 15.0, // $15 per million input tokens outputPrice: 75.0, // $75 per million output tokens @@ -66,7 +63,6 @@ export const anthropicModels = { maxTokens: 32_000, // Overridden to 8k if `enableReasoningEffort` is false. contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 15.0, // $15 per million input tokens outputPrice: 75.0, // $75 per million output tokens @@ -78,7 +74,6 @@ export const anthropicModels = { maxTokens: 128_000, // Unlocked by passing `beta` flag to the model. Otherwise, it's 64k. contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 3.0, // $3 per million input tokens outputPrice: 15.0, // $15 per million output tokens @@ -91,7 +86,6 @@ export const anthropicModels = { maxTokens: 8192, // Since we already have a `:thinking` virtual model we aren't setting `supportsReasoningBudget: true` here. contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 3.0, // $3 per million input tokens outputPrice: 15.0, // $15 per million output tokens @@ -102,7 +96,6 @@ export const anthropicModels = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 3.0, // $3 per million input tokens outputPrice: 15.0, // $15 per million output tokens diff --git a/packages/types/src/providers/bedrock.ts b/packages/types/src/providers/bedrock.ts index 251757cad85..9935d90b127 100644 --- a/packages/types/src/providers/bedrock.ts +++ b/packages/types/src/providers/bedrock.ts @@ -17,7 +17,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, supportsReasoningBudget: true, inputPrice: 3.0, @@ -32,7 +31,6 @@ export const bedrockModels = { maxTokens: 5000, contextWindow: 300_000, supportsImages: true, - supportsComputerUse: false, supportsPromptCache: true, inputPrice: 0.8, outputPrice: 3.2, @@ -46,7 +44,6 @@ export const bedrockModels = { maxTokens: 5000, contextWindow: 300_000, supportsImages: true, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 1.0, outputPrice: 4.0, @@ -58,7 +55,6 @@ export const bedrockModels = { maxTokens: 5000, contextWindow: 300_000, supportsImages: true, - supportsComputerUse: false, supportsPromptCache: true, inputPrice: 0.06, outputPrice: 0.24, @@ -72,7 +68,6 @@ export const bedrockModels = { maxTokens: 5000, contextWindow: 128_000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: true, inputPrice: 0.035, outputPrice: 0.14, @@ -86,7 +81,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, supportsReasoningBudget: true, inputPrice: 3.0, @@ -101,7 +95,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, supportsReasoningBudget: true, inputPrice: 15.0, @@ -116,7 +109,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, supportsReasoningBudget: true, inputPrice: 15.0, @@ -131,7 +123,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, supportsReasoningBudget: true, inputPrice: 3.0, @@ -146,7 +137,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 3.0, outputPrice: 15.0, @@ -254,7 +244,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 128_000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 0.5, outputPrice: 1.5, @@ -264,7 +253,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 128_000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 2.0, outputPrice: 6.0, @@ -274,7 +262,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 128_000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 0.72, outputPrice: 0.72, @@ -284,7 +271,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 128_000, supportsImages: true, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 0.72, outputPrice: 0.72, @@ -294,7 +280,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 128_000, supportsImages: true, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 0.16, outputPrice: 0.16, @@ -304,7 +289,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 128_000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 0.15, outputPrice: 0.15, @@ -314,7 +298,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 128_000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 0.1, outputPrice: 0.1, @@ -324,7 +307,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 128_000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 2.4, outputPrice: 2.4, @@ -334,7 +316,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 128_000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 0.72, outputPrice: 0.72, @@ -344,7 +325,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 128_000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 0.9, outputPrice: 0.9, @@ -354,7 +334,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 8_000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 0.22, outputPrice: 0.22, @@ -364,7 +343,6 @@ export const bedrockModels = { maxTokens: 2048, contextWindow: 8_000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 2.65, outputPrice: 3.5, @@ -373,7 +351,6 @@ export const bedrockModels = { maxTokens: 2048, contextWindow: 4_000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 0.3, outputPrice: 0.6, @@ -382,7 +359,6 @@ export const bedrockModels = { maxTokens: 4096, contextWindow: 8_000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 0.15, outputPrice: 0.2, @@ -392,7 +368,6 @@ export const bedrockModels = { maxTokens: 4096, contextWindow: 8_000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 0.2, outputPrice: 0.6, @@ -402,7 +377,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 8_000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 0.1, description: "Amazon Titan Text Embeddings", @@ -411,7 +385,6 @@ export const bedrockModels = { maxTokens: 8192, contextWindow: 8_000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 0.02, description: "Amazon Titan Text Embeddings V2", diff --git a/packages/types/src/providers/chutes.ts b/packages/types/src/providers/chutes.ts index eb58e466666..69e6b2e68b7 100644 --- a/packages/types/src/providers/chutes.ts +++ b/packages/types/src/providers/chutes.ts @@ -34,6 +34,8 @@ export type ChutesModelId = | "zai-org/GLM-4.5-FP8" | "zai-org/GLM-4.5-turbo" | "zai-org/GLM-4.6-FP8" + | "zai-org/GLM-4.6-turbo" + | "meituan-longcat/LongCat-Flash-Thinking-FP8" | "moonshotai/Kimi-K2-Instruct-75k" | "moonshotai/Kimi-K2-Instruct-0905" | "Qwen/Qwen3-235B-A22B-Thinking-2507" @@ -41,7 +43,7 @@ export type ChutesModelId = | "Qwen/Qwen3-Next-80B-A3B-Thinking" | "Qwen/Qwen3-VL-235B-A22B-Thinking" -export const chutesDefaultModelId: ChutesModelId = "zai-org/GLM-4.6-FP8" // kilocode_change +export const chutesDefaultModelId: ChutesModelId = "deepseek-ai/DeepSeek-R1-0528" export const chutesModels = { "deepseek-ai/DeepSeek-R1-0528": { @@ -329,6 +331,25 @@ export const chutesModels = { description: "GLM-4.6 introduces major upgrades over GLM-4.5, including a longer 200K-token context window for complex tasks, stronger coding performance in benchmarks and real-world tools (such as Claude Code, Cline, Roo Code, and Kilo Code), improved reasoning with tool use during inference, more capable and efficient agent integration, and refined writing that better matches human style, readability, and natural role-play scenarios.", }, + "zai-org/GLM-4.6-turbo": { + maxTokens: 202752, // From Chutes /v1/models: max_output_length + contextWindow: 202752, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 1.15, + outputPrice: 3.25, + description: "GLM-4.6-turbo model with 200K-token context window, optimized for fast inference.", + }, + "meituan-longcat/LongCat-Flash-Thinking-FP8": { + maxTokens: 32768, + contextWindow: 128000, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0, + outputPrice: 0, + description: + "LongCat Flash Thinking FP8 model with 128K context window, optimized for complex reasoning and coding tasks.", + }, "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8": { maxTokens: 32768, contextWindow: 262144, @@ -396,3 +417,5 @@ export const chutesModels = { "Qwen3‑VL‑235B‑A22B‑Thinking is an open‑weight MoE vision‑language model (235B total, ~22B activated) optimized for deliberate multi‑step reasoning with strong text‑image‑video understanding and long‑context capabilities.", }, } as const satisfies Record + +export const chutesDefaultModelInfo: ModelInfo = chutesModels[chutesDefaultModelId] diff --git a/packages/types/src/providers/fireworks.ts b/packages/types/src/providers/fireworks.ts index 694ad0d1260..660e5680756 100644 --- a/packages/types/src/providers/fireworks.ts +++ b/packages/types/src/providers/fireworks.ts @@ -3,6 +3,7 @@ import type { ModelInfo } from "../model.js" export type FireworksModelId = | "accounts/fireworks/models/kimi-k2-instruct" | "accounts/fireworks/models/kimi-k2-instruct-0905" + | "accounts/fireworks/models/minimax-m2" | "accounts/fireworks/models/qwen3-235b-a22b-instruct-2507" | "accounts/fireworks/models/qwen3-coder-480b-a35b-instruct" | "accounts/fireworks/models/deepseek-r1-0528" @@ -10,6 +11,7 @@ export type FireworksModelId = | "accounts/fireworks/models/deepseek-v3p1" | "accounts/fireworks/models/glm-4p5" | "accounts/fireworks/models/glm-4p5-air" + | "accounts/fireworks/models/glm-4p6" | "accounts/fireworks/models/gpt-oss-20b" | "accounts/fireworks/models/gpt-oss-120b" @@ -37,6 +39,16 @@ export const fireworksModels = { description: "Kimi K2 is a state-of-the-art mixture-of-experts (MoE) language model with 32 billion activated parameters and 1 trillion total parameters. Trained with the Muon optimizer, Kimi K2 achieves exceptional performance across frontier knowledge, reasoning, and coding tasks while being meticulously optimized for agentic capabilities.", }, + "accounts/fireworks/models/minimax-m2": { + maxTokens: 4096, + contextWindow: 204800, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.3, + outputPrice: 1.2, + description: + "MiniMax M2 is a high-performance language model with 204.8K context window, optimized for long-context understanding and generation tasks.", + }, "accounts/fireworks/models/qwen3-235b-a22b-instruct-2507": { maxTokens: 32768, contextWindow: 256000, @@ -105,6 +117,16 @@ export const fireworksModels = { description: "Z.ai GLM-4.5-Air with 106B total parameters and 12B active parameters. Features unified reasoning, coding, and intelligent agent capabilities.", }, + "accounts/fireworks/models/glm-4p6": { + maxTokens: 25344, + contextWindow: 198000, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.55, + outputPrice: 2.19, + description: + "Z.ai GLM-4.6 is an advanced coding model with exceptional performance on complex programming tasks. Features improved reasoning capabilities and enhanced code generation quality, making it ideal for software development workflows.", + }, "accounts/fireworks/models/gpt-oss-20b": { maxTokens: 16384, contextWindow: 128000, diff --git a/packages/types/src/providers/gemini.ts b/packages/types/src/providers/gemini.ts index a7225c7330f..aae428d90c0 100644 --- a/packages/types/src/providers/gemini.ts +++ b/packages/types/src/providers/gemini.ts @@ -6,25 +6,73 @@ export type GeminiModelId = keyof typeof geminiModels export const geminiDefaultModelId: GeminiModelId = "gemini-2.0-flash-001" export const geminiModels = { - "gemini-2.5-flash-preview-04-17:thinking": { - maxTokens: 65_535, + // Latest models (pointing to the most recent stable versions) + "gemini-flash-latest": { + maxTokens: 65_536, contextWindow: 1_048_576, supportsImages: true, - supportsPromptCache: false, - inputPrice: 0.15, - outputPrice: 3.5, + supportsPromptCache: true, + inputPrice: 0.3, + outputPrice: 2.5, + cacheReadsPrice: 0.075, + cacheWritesPrice: 1.0, maxThinkingTokens: 24_576, supportsReasoningBudget: true, - requiredReasoningBudget: true, }, - "gemini-2.5-flash-preview-04-17": { - maxTokens: 65_535, + "gemini-flash-lite-latest": { + maxTokens: 65_536, contextWindow: 1_048_576, supportsImages: true, - supportsPromptCache: false, - inputPrice: 0.15, - outputPrice: 0.6, + supportsPromptCache: true, + inputPrice: 0.1, + outputPrice: 0.4, + cacheReadsPrice: 0.025, + cacheWritesPrice: 1.0, + supportsReasoningBudget: true, + maxThinkingTokens: 24_576, + }, + + // 2.5 Flash models (09-2025 versions - most recent) + "gemini-2.5-flash-preview-09-2025": { + maxTokens: 65_536, + contextWindow: 1_048_576, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 0.3, + outputPrice: 2.5, + cacheReadsPrice: 0.075, + cacheWritesPrice: 1.0, + maxThinkingTokens: 24_576, + supportsReasoningBudget: true, + }, + "gemini-2.5-flash-lite-preview-09-2025": { + maxTokens: 65_536, + contextWindow: 1_048_576, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 0.1, + outputPrice: 0.4, + cacheReadsPrice: 0.025, + cacheWritesPrice: 1.0, + supportsReasoningBudget: true, + maxThinkingTokens: 24_576, + }, + + // 2.5 Flash models (06-17 version) + "gemini-2.5-flash-lite-preview-06-17": { + maxTokens: 64_000, + contextWindow: 1_048_576, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 0.1, + outputPrice: 0.4, + cacheReadsPrice: 0.025, + cacheWritesPrice: 1.0, + supportsReasoningBudget: true, + maxThinkingTokens: 24_576, }, + + // 2.5 Flash models (05-20 versions) "gemini-2.5-flash-preview-05-20:thinking": { maxTokens: 65_535, contextWindow: 1_048_576, @@ -48,6 +96,29 @@ export const geminiModels = { cacheReadsPrice: 0.0375, cacheWritesPrice: 1.0, }, + + // 2.5 Flash models (04-17 versions) + "gemini-2.5-flash-preview-04-17:thinking": { + maxTokens: 65_535, + contextWindow: 1_048_576, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 0.15, + outputPrice: 3.5, + maxThinkingTokens: 24_576, + supportsReasoningBudget: true, + requiredReasoningBudget: true, + }, + "gemini-2.5-flash-preview-04-17": { + maxTokens: 65_535, + contextWindow: 1_048_576, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 0.15, + outputPrice: 0.6, + }, + + // 2.5 Flash stable "gemini-2.5-flash": { maxTokens: 64_000, contextWindow: 1_048_576, @@ -60,15 +131,9 @@ export const geminiModels = { maxThinkingTokens: 24_576, supportsReasoningBudget: true, }, - "gemini-2.5-pro-exp-03-25": { - maxTokens: 65_535, - contextWindow: 1_048_576, - supportsImages: true, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - }, - "gemini-2.5-pro-preview-03-25": { + + // 2.5 Pro models + "gemini-2.5-pro-preview-06-05": { maxTokens: 65_535, contextWindow: 1_048_576, supportsImages: true, @@ -77,6 +142,8 @@ export const geminiModels = { outputPrice: 15, cacheReadsPrice: 0.625, cacheWritesPrice: 4.5, + maxThinkingTokens: 32_768, + supportsReasoningBudget: true, tiers: [ { contextWindow: 200_000, @@ -116,7 +183,7 @@ export const geminiModels = { }, ], }, - "gemini-2.5-pro-preview-06-05": { + "gemini-2.5-pro-preview-03-25": { maxTokens: 65_535, contextWindow: 1_048_576, supportsImages: true, @@ -142,6 +209,14 @@ export const geminiModels = { }, ], }, + "gemini-2.5-pro-exp-03-25": { + maxTokens: 65_535, + contextWindow: 1_048_576, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 0, + outputPrice: 0, + }, "gemini-2.5-pro": { maxTokens: 64_000, contextWindow: 1_048_576, @@ -169,16 +244,8 @@ export const geminiModels = { }, ], }, - "gemini-2.0-flash-001": { - maxTokens: 8192, - contextWindow: 1_048_576, - supportsImages: true, - supportsPromptCache: true, - inputPrice: 0.1, - outputPrice: 0.4, - cacheReadsPrice: 0.025, - cacheWritesPrice: 1.0, - }, + + // 2.0 Flash models "gemini-2.0-flash-lite-preview-02-05": { maxTokens: 8192, contextWindow: 1_048_576, @@ -187,14 +254,6 @@ export const geminiModels = { inputPrice: 0, outputPrice: 0, }, - "gemini-2.0-pro-exp-02-05": { - maxTokens: 8192, - contextWindow: 2_097_152, - supportsImages: true, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - }, "gemini-2.0-flash-thinking-exp-01-21": { maxTokens: 65_536, contextWindow: 1_048_576, @@ -219,6 +278,28 @@ export const geminiModels = { inputPrice: 0, outputPrice: 0, }, + "gemini-2.0-flash-001": { + maxTokens: 8192, + contextWindow: 1_048_576, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 0.1, + outputPrice: 0.4, + cacheReadsPrice: 0.025, + cacheWritesPrice: 1.0, + }, + + // 2.0 Pro models + "gemini-2.0-pro-exp-02-05": { + maxTokens: 8192, + contextWindow: 2_097_152, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 0, + outputPrice: 0, + }, + + // 1.5 Flash models "gemini-1.5-flash-002": { maxTokens: 8192, contextWindow: 1_048_576, @@ -259,6 +340,8 @@ export const geminiModels = { inputPrice: 0, outputPrice: 0, }, + + // 1.5 Pro models "gemini-1.5-pro-002": { maxTokens: 8192, contextWindow: 2_097_152, @@ -275,6 +358,8 @@ export const geminiModels = { inputPrice: 0, outputPrice: 0, }, + + // Experimental models "gemini-exp-1206": { maxTokens: 8192, contextWindow: 2_097_152, @@ -283,16 +368,4 @@ export const geminiModels = { inputPrice: 0, outputPrice: 0, }, - "gemini-2.5-flash-lite-preview-06-17": { - maxTokens: 64_000, - contextWindow: 1_048_576, - supportsImages: true, - supportsPromptCache: true, - inputPrice: 0.1, - outputPrice: 0.4, - cacheReadsPrice: 0.025, - cacheWritesPrice: 1.0, - supportsReasoningBudget: true, - maxThinkingTokens: 24_576, - }, } as const satisfies Record diff --git a/packages/types/src/providers/glama.ts b/packages/types/src/providers/glama.ts index ea05d2c47fb..98aedc831b7 100644 --- a/packages/types/src/providers/glama.ts +++ b/packages/types/src/providers/glama.ts @@ -7,7 +7,6 @@ export const glamaDefaultModelInfo: ModelInfo = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 3.0, outputPrice: 15.0, diff --git a/packages/types/src/providers/index.ts b/packages/types/src/providers/index.ts index 7f1c1fc079f..097797e0f4c 100644 --- a/packages/types/src/providers/index.ts +++ b/packages/types/src/providers/index.ts @@ -36,3 +36,4 @@ export * from "./xai.js" export * from "./vercel-ai-gateway.js" export * from "./zai.js" export * from "./deepinfra.js" +export * from "./minimax.js" diff --git a/packages/types/src/providers/lite-llm.ts b/packages/types/src/providers/lite-llm.ts index 715b279c36b..14a68cfc3c3 100644 --- a/packages/types/src/providers/lite-llm.ts +++ b/packages/types/src/providers/lite-llm.ts @@ -7,48 +7,9 @@ export const litellmDefaultModelInfo: ModelInfo = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 3.0, outputPrice: 15.0, cacheWritesPrice: 3.75, cacheReadsPrice: 0.3, } - -export const LITELLM_COMPUTER_USE_MODELS = new Set([ - "claude-3-5-sonnet-latest", - "claude-opus-4-1-20250805", - "claude-opus-4-20250514", - "claude-sonnet-4-20250514", - "claude-3-7-sonnet-latest", - "claude-3-7-sonnet-20250219", - "claude-3-5-sonnet-20241022", - "vertex_ai/claude-3-5-sonnet", - "vertex_ai/claude-3-5-sonnet-v2", - "vertex_ai/claude-3-5-sonnet-v2@20241022", - "vertex_ai/claude-3-7-sonnet@20250219", - "vertex_ai/claude-opus-4-1@20250805", - "vertex_ai/claude-opus-4@20250514", - "vertex_ai/claude-sonnet-4@20250514", - "vertex_ai/claude-sonnet-4-5@20250929", - "openrouter/anthropic/claude-3.5-sonnet", - "openrouter/anthropic/claude-3.5-sonnet:beta", - "openrouter/anthropic/claude-3.7-sonnet", - "openrouter/anthropic/claude-3.7-sonnet:beta", - "anthropic.claude-opus-4-1-20250805-v1:0", - "anthropic.claude-opus-4-20250514-v1:0", - "anthropic.claude-sonnet-4-20250514-v1:0", - "anthropic.claude-3-7-sonnet-20250219-v1:0", - "anthropic.claude-3-5-sonnet-20241022-v2:0", - "us.anthropic.claude-3-5-sonnet-20241022-v2:0", - "us.anthropic.claude-3-7-sonnet-20250219-v1:0", - "us.anthropic.claude-opus-4-1-20250805-v1:0", - "us.anthropic.claude-opus-4-20250514-v1:0", - "us.anthropic.claude-sonnet-4-20250514-v1:0", - "eu.anthropic.claude-3-5-sonnet-20241022-v2:0", - "eu.anthropic.claude-3-7-sonnet-20250219-v1:0", - "eu.anthropic.claude-opus-4-1-20250805-v1:0", - "eu.anthropic.claude-opus-4-20250514-v1:0", - "eu.anthropic.claude-sonnet-4-20250514-v1:0", - "snowflake/claude-3-5-sonnet", -]) diff --git a/packages/types/src/providers/lm-studio.ts b/packages/types/src/providers/lm-studio.ts index 9e39ae56081..d0df1344702 100644 --- a/packages/types/src/providers/lm-studio.ts +++ b/packages/types/src/providers/lm-studio.ts @@ -9,7 +9,6 @@ export const lMStudioDefaultModelInfo: ModelInfo = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 0, outputPrice: 0, diff --git a/packages/types/src/providers/minimax.ts b/packages/types/src/providers/minimax.ts new file mode 100644 index 00000000000..47362e01bff --- /dev/null +++ b/packages/types/src/providers/minimax.ts @@ -0,0 +1,25 @@ +import type { ModelInfo } from "../model.js" + +// Minimax +// https://www.minimax.io/platform/document/text_api_intro +// https://www.minimax.io/platform/document/pricing +export type MinimaxModelId = keyof typeof minimaxModels +export const minimaxDefaultModelId: MinimaxModelId = "MiniMax-M2" + +export const minimaxModels = { + "MiniMax-M2": { + maxTokens: 16_384, + contextWindow: 192_000, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.3, + outputPrice: 1.2, + cacheWritesPrice: 0, + cacheReadsPrice: 0, + preserveReasoning: true, + description: + "MiniMax M2, a model born for Agents and code, featuring Top-tier Coding Capabilities, Powerful Agentic Performance, and Ultimate Cost-Effectiveness & Speed.", + }, +} as const satisfies Record + +export const MINIMAX_DEFAULT_TEMPERATURE = 1.0 diff --git a/packages/types/src/providers/mistral.ts b/packages/types/src/providers/mistral.ts index 429e508a833..c578e80f426 100644 --- a/packages/types/src/providers/mistral.ts +++ b/packages/types/src/providers/mistral.ts @@ -7,9 +7,9 @@ export const mistralDefaultModelId: MistralModelId = "codestral-latest" export const mistralModels = { "magistral-medium-latest": { - maxTokens: 41_000, - contextWindow: 41_000, - supportsImages: false, + maxTokens: 8192, + contextWindow: 128_000, + supportsImages: true, supportsPromptCache: false, inputPrice: 2.0, outputPrice: 5.0, diff --git a/packages/types/src/providers/ollama.ts b/packages/types/src/providers/ollama.ts index d269da8f4d0..160083511fa 100644 --- a/packages/types/src/providers/ollama.ts +++ b/packages/types/src/providers/ollama.ts @@ -7,7 +7,6 @@ export const ollamaDefaultModelInfo: ModelInfo = { maxTokens: 4096, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 0, outputPrice: 0, diff --git a/packages/types/src/providers/openrouter.ts b/packages/types/src/providers/openrouter.ts index 97f36a0c2b2..3a77ba14fc6 100644 --- a/packages/types/src/providers/openrouter.ts +++ b/packages/types/src/providers/openrouter.ts @@ -7,7 +7,6 @@ export const openRouterDefaultModelInfo: ModelInfo = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 3.0, outputPrice: 15.0, @@ -41,6 +40,7 @@ export const OPEN_ROUTER_PROMPT_CACHING_MODELS = new Set([ "anthropic/claude-sonnet-4.5", "anthropic/claude-opus-4", "anthropic/claude-opus-4.1", + "anthropic/claude-haiku-4.5", "google/gemini-2.5-flash-preview", "google/gemini-2.5-flash-preview:thinking", "google/gemini-2.5-flash-preview-05-20", @@ -52,19 +52,6 @@ export const OPEN_ROUTER_PROMPT_CACHING_MODELS = new Set([ "google/gemini-flash-1.5-8b", ]) -// https://www.anthropic.com/news/3-5-models-and-computer-use -export const OPEN_ROUTER_COMPUTER_USE_MODELS = new Set([ - "anthropic/claude-3.5-sonnet", - "anthropic/claude-3.5-sonnet:beta", - "anthropic/claude-3.7-sonnet", - "anthropic/claude-3.7-sonnet:beta", - "anthropic/claude-3.7-sonnet:thinking", - "anthropic/claude-sonnet-4", - "anthropic/claude-sonnet-4.5", - "anthropic/claude-opus-4", - "anthropic/claude-opus-4.1", -]) - // When we first launched these models we didn't have support for // enabling/disabling the reasoning budget for hybrid models. Now that we // do support this we should give users the option to enable/disable it diff --git a/packages/types/src/providers/requesty.ts b/packages/types/src/providers/requesty.ts index 8bc7d720d5e..d312adb3976 100644 --- a/packages/types/src/providers/requesty.ts +++ b/packages/types/src/providers/requesty.ts @@ -8,7 +8,6 @@ export const requestyDefaultModelInfo: ModelInfo = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 3.0, outputPrice: 15.0, diff --git a/packages/types/src/providers/roo.ts b/packages/types/src/providers/roo.ts index fd705b1eb97..0b7ed89bd92 100644 --- a/packages/types/src/providers/roo.ts +++ b/packages/types/src/providers/roo.ts @@ -1,53 +1,49 @@ +import { z } from "zod" + import type { ModelInfo } from "../model.js" -export type RooModelId = - | "xai/grok-code-fast-1" - | "roo/code-supernova-1-million" - | "xai/grok-4-fast" - | "deepseek/deepseek-chat-v3.1" - -export const rooDefaultModelId: RooModelId = "xai/grok-code-fast-1" - -export const rooModels = { - "xai/grok-code-fast-1": { - maxTokens: 16_384, - contextWindow: 262_144, - supportsImages: false, - supportsPromptCache: true, - inputPrice: 0, - outputPrice: 0, - description: - "A reasoning model that is blazing fast and excels at agentic coding, accessible for free through Roo Code Cloud for a limited time. (Note: the free prompts and completions are logged by xAI and used to improve the model.)", - }, - "roo/code-supernova-1-million": { - maxTokens: 30_000, - contextWindow: 1_000_000, - supportsImages: true, - supportsPromptCache: true, - inputPrice: 0, - outputPrice: 0, - description: - "A versatile agentic coding stealth model with a 1M token context window that supports image inputs, accessible for free through Roo Code Cloud for a limited time. (Note: the free prompts and completions are logged by the model provider and used to improve the model.)", - }, - "xai/grok-4-fast": { - maxTokens: 30_000, - contextWindow: 2_000_000, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: - "Grok 4 Fast is xAI's latest multimodal model with SOTA cost-efficiency and a 2M token context window. (Note: prompts and completions are logged by xAI and used to improve the model.)", - deprecated: true, - }, - "deepseek/deepseek-chat-v3.1": { - maxTokens: 16_384, - contextWindow: 163_840, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: - "DeepSeek-V3.1 is a large hybrid reasoning model (671B parameters, 37B active). It extends the DeepSeek-V3 base with a two-phase long-context training process, reaching up to 128K tokens, and uses FP8 microscaling for efficient inference.", - }, -} as const satisfies Record +/** + * Roo Code Cloud is a dynamic provider - models are loaded from the /v1/models API endpoint. + * Default model ID used as fallback when no model is specified. + */ +export const rooDefaultModelId = "xai/grok-code-fast-1" + +/** + * Empty models object maintained for type compatibility. + * All model data comes dynamically from the API. + */ +export const rooModels = {} as const satisfies Record + +/** + * Roo Code Cloud API response schemas + */ + +export const RooPricingSchema = z.object({ + input: z.string(), + output: z.string(), + input_cache_read: z.string().optional(), + input_cache_write: z.string().optional(), +}) + +export const RooModelSchema = z.object({ + id: z.string(), + object: z.literal("model"), + created: z.number(), + owned_by: z.string(), + name: z.string(), + description: z.string(), + context_window: z.number(), + max_tokens: z.number(), + type: z.literal("language"), + tags: z.array(z.string()).optional(), + pricing: RooPricingSchema, + deprecated: z.boolean().optional(), +}) + +export const RooModelsResponseSchema = z.object({ + object: z.literal("list"), + data: z.array(RooModelSchema), +}) + +export type RooModel = z.infer +export type RooModelsResponse = z.infer diff --git a/packages/types/src/providers/vercel-ai-gateway.ts b/packages/types/src/providers/vercel-ai-gateway.ts index 70cf49b4197..875b87bf8b5 100644 --- a/packages/types/src/providers/vercel-ai-gateway.ts +++ b/packages/types/src/providers/vercel-ai-gateway.ts @@ -89,7 +89,6 @@ export const vercelAiGatewayDefaultModelInfo: ModelInfo = { maxTokens: 64000, contextWindow: 200000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 3, outputPrice: 15, diff --git a/packages/types/src/providers/vertex.ts b/packages/types/src/providers/vertex.ts index 199b139ddd3..f277c58a3ef 100644 --- a/packages/types/src/providers/vertex.ts +++ b/packages/types/src/providers/vertex.ts @@ -167,7 +167,6 @@ export const vertexModels = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 3.0, outputPrice: 15.0, @@ -179,7 +178,6 @@ export const vertexModels = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 3.0, outputPrice: 15.0, @@ -202,7 +200,6 @@ export const vertexModels = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 15.0, outputPrice: 75.0, @@ -214,7 +211,6 @@ export const vertexModels = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 15.0, outputPrice: 75.0, @@ -225,7 +221,6 @@ export const vertexModels = { maxTokens: 64_000, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 3.0, outputPrice: 15.0, @@ -238,7 +233,6 @@ export const vertexModels = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 3.0, outputPrice: 15.0, @@ -249,7 +243,6 @@ export const vertexModels = { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 3.0, outputPrice: 15.0, diff --git a/packages/types/src/providers/zai.ts b/packages/types/src/providers/zai.ts index 1631b540518..8f6d8cd159f 100644 --- a/packages/types/src/providers/zai.ts +++ b/packages/types/src/providers/zai.ts @@ -2,22 +2,27 @@ import type { ModelInfo } from "../model.js" import { ZaiApiLine } from "../provider-settings.js" // Z AI -// https://docs.z.ai/guides/llm/glm-4.6 // kilocode_change: overwrite the entire file on merge conflicts +// https://docs.z.ai/guides/llm/glm-4-32b-0414-128k +// https://docs.z.ai/guides/llm/glm-4.5 +// https://docs.z.ai/guides/llm/glm-4.6 // https://docs.z.ai/guides/overview/pricing +// https://bigmodel.cn/pricing export type InternationalZAiModelId = keyof typeof internationalZAiModels -export const internationalZAiDefaultModelId: InternationalZAiModelId = "glm-4.6" // kilocode_change: overwrite the entire file on merge conflicts +export const internationalZAiDefaultModelId: InternationalZAiModelId = "glm-4.6" export const internationalZAiModels = { "glm-4.5": { maxTokens: 98_304, contextWindow: 131_072, supportsImages: false, supportsPromptCache: true, + supportsReasoningBinary: true, inputPrice: 0.6, outputPrice: 2.2, cacheWritesPrice: 0, cacheReadsPrice: 0.11, - description: "Zhipu's previous flagship model.", // kilocode_change: overwrite the entire file on merge conflicts + description: + "GLM-4.5 is Zhipu's latest featured model. Its comprehensive capabilities in reasoning, coding, and agent reach the state-of-the-art (SOTA) level among open-source models, with a context length of up to 128k.", }, "glm-4.5-air": { maxTokens: 98_304, @@ -31,22 +36,58 @@ export const internationalZAiModels = { description: "GLM-4.5-Air is the lightweight version of GLM-4.5. It balances performance and cost-effectiveness, and can flexibly switch to hybrid thinking models.", }, + "glm-4.5-x": { + maxTokens: 98_304, + contextWindow: 131_072, + supportsImages: false, + supportsPromptCache: true, + inputPrice: 2.2, + outputPrice: 8.9, + cacheWritesPrice: 0, + cacheReadsPrice: 0.45, + description: + "GLM-4.5-X is a high-performance variant optimized for strong reasoning with ultra-fast responses.", + }, + "glm-4.5-airx": { + maxTokens: 98_304, + contextWindow: 131_072, + supportsImages: false, + supportsPromptCache: true, + inputPrice: 1.1, + outputPrice: 4.5, + cacheWritesPrice: 0, + cacheReadsPrice: 0.22, + description: "GLM-4.5-AirX is a lightweight, ultra-fast variant delivering strong performance with lower cost.", + }, "glm-4.5-flash": { - maxTokens: 98304, - contextWindow: 131072, + maxTokens: 98_304, + contextWindow: 131_072, supportsImages: false, - supportsPromptCache: false, + supportsPromptCache: true, inputPrice: 0, outputPrice: 0, cacheWritesPrice: 0, cacheReadsPrice: 0, - description: "Zhipu's most advanced free model to date.", + description: "GLM-4.5-Flash is a free, high-speed model excellent for reasoning, coding, and agentic tasks.", + }, + "glm-4.5v": { + maxTokens: 16_384, + contextWindow: 131_072, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 0.6, + outputPrice: 1.8, + cacheWritesPrice: 0, + cacheReadsPrice: 0.11, + description: + "GLM-4.5V is Z.AI's multimodal visual reasoning model (image/video/text/file input), optimized for GUI tasks, grounding, and document/video understanding.", }, "glm-4.6": { maxTokens: 98_304, - contextWindow: 204_800, + contextWindow: 200_000, supportsImages: false, supportsPromptCache: true, + supportsReasoningBinary: true, inputPrice: 0.6, outputPrice: 2.2, cacheWritesPrice: 0, @@ -54,41 +95,34 @@ export const internationalZAiModels = { description: "GLM-4.6 is Zhipu's newest model with an extended context window of up to 200k tokens, providing enhanced capabilities for processing longer documents and conversations.", }, + "glm-4-32b-0414-128k": { + maxTokens: 98_304, + contextWindow: 131_072, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.1, + outputPrice: 0.1, + cacheWritesPrice: 0, + cacheReadsPrice: 0, + description: "GLM-4-32B is a 32 billion parameter model with 128k context length, optimized for efficiency.", + }, } as const satisfies Record export type MainlandZAiModelId = keyof typeof mainlandZAiModels -export const mainlandZAiDefaultModelId: MainlandZAiModelId = "glm-4.6" // kilocode_change: overwrite the entire file on merge conflicts +export const mainlandZAiDefaultModelId: MainlandZAiModelId = "glm-4.6" export const mainlandZAiModels = { "glm-4.5": { maxTokens: 98_304, contextWindow: 131_072, supportsImages: false, supportsPromptCache: true, + supportsReasoningBinary: true, inputPrice: 0.29, outputPrice: 1.14, cacheWritesPrice: 0, cacheReadsPrice: 0.057, - description: "Zhipu's previous flagship model.", // kilocode_change: overwrite the entire file on merge conflicts - tiers: [ - { - contextWindow: 32_000, - inputPrice: 0.21, - outputPrice: 1.0, - cacheReadsPrice: 0.043, - }, - { - contextWindow: 128_000, - inputPrice: 0.29, - outputPrice: 1.14, - cacheReadsPrice: 0.057, - }, - { - contextWindow: Infinity, - inputPrice: 0.29, - outputPrice: 1.14, - cacheReadsPrice: 0.057, - }, - ], + description: + "GLM-4.5 is Zhipu's latest featured model. Its comprehensive capabilities in reasoning, coding, and agent reach the state-of-the-art (SOTA) level among open-source models, with a context length of up to 128k.", }, "glm-4.5-air": { maxTokens: 98_304, @@ -101,89 +135,79 @@ export const mainlandZAiModels = { cacheReadsPrice: 0.02, description: "GLM-4.5-Air is the lightweight version of GLM-4.5. It balances performance and cost-effectiveness, and can flexibly switch to hybrid thinking models.", - tiers: [ - { - contextWindow: 32_000, - inputPrice: 0.07, - outputPrice: 0.4, - cacheReadsPrice: 0.014, - }, - { - contextWindow: 128_000, - inputPrice: 0.1, - outputPrice: 0.6, - cacheReadsPrice: 0.02, - }, - { - contextWindow: Infinity, - inputPrice: 0.1, - outputPrice: 0.6, - cacheReadsPrice: 0.02, - }, - ], + }, + "glm-4.5-x": { + maxTokens: 98_304, + contextWindow: 131_072, + supportsImages: false, + supportsPromptCache: true, + inputPrice: 0.29, + outputPrice: 1.14, + cacheWritesPrice: 0, + cacheReadsPrice: 0.057, + description: + "GLM-4.5-X is a high-performance variant optimized for strong reasoning with ultra-fast responses.", + }, + "glm-4.5-airx": { + maxTokens: 98_304, + contextWindow: 131_072, + supportsImages: false, + supportsPromptCache: true, + inputPrice: 0.1, + outputPrice: 0.6, + cacheWritesPrice: 0, + cacheReadsPrice: 0.02, + description: "GLM-4.5-AirX is a lightweight, ultra-fast variant delivering strong performance with lower cost.", }, "glm-4.5-flash": { - maxTokens: 98304, - contextWindow: 131072, + maxTokens: 98_304, + contextWindow: 131_072, supportsImages: false, - supportsPromptCache: false, + supportsPromptCache: true, inputPrice: 0, outputPrice: 0, cacheWritesPrice: 0, cacheReadsPrice: 0, - description: "Zhipu's most advanced free model to date.", + description: "GLM-4.5-Flash is a free, high-speed model excellent for reasoning, coding, and agentic tasks.", + }, + "glm-4.5v": { + maxTokens: 16_384, + contextWindow: 131_072, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 0.29, + outputPrice: 0.93, + cacheWritesPrice: 0, + cacheReadsPrice: 0.057, + description: + "GLM-4.5V is Z.AI's multimodal visual reasoning model (image/video/text/file input), optimized for GUI tasks, grounding, and document/video understanding.", }, "glm-4.6": { maxTokens: 98_304, contextWindow: 204_800, supportsImages: false, supportsPromptCache: true, + supportsReasoningBinary: true, inputPrice: 0.29, outputPrice: 1.14, cacheWritesPrice: 0, cacheReadsPrice: 0.057, description: "GLM-4.6 is Zhipu's newest model with an extended context window of up to 200k tokens, providing enhanced capabilities for processing longer documents and conversations.", - tiers: [ - { - contextWindow: 32_000, - inputPrice: 0.21, - outputPrice: 1.0, - cacheReadsPrice: 0.043, - }, - { - contextWindow: 128_000, - inputPrice: 0.29, - outputPrice: 1.14, - cacheReadsPrice: 0.057, - }, - { - contextWindow: 200_000, - inputPrice: 0.29, - outputPrice: 1.14, - cacheReadsPrice: 0.057, - }, - { - contextWindow: Infinity, - inputPrice: 0.29, - outputPrice: 1.14, - cacheReadsPrice: 0.057, - }, - ], }, } as const satisfies Record -export const ZAI_DEFAULT_TEMPERATURE = 0 +export const ZAI_DEFAULT_TEMPERATURE = 0.6 export const zaiApiLineConfigs = { international_coding: { - name: "International Coding Plan", + name: "International", baseUrl: "https://api.z.ai/api/coding/paas/v4", isChina: false, }, china_coding: { - name: "China Coding Plan", + name: "China", baseUrl: "https://open.bigmodel.cn/api/coding/paas/v4", isChina: true, }, -} satisfies Record +} satisfies Record \ No newline at end of file diff --git a/packages/types/src/tool.ts b/packages/types/src/tool.ts index 421f4d564aa..c63e15215b9 100644 --- a/packages/types/src/tool.ts +++ b/packages/types/src/tool.ts @@ -20,7 +20,6 @@ export const toolNames = [ "write_to_file", "apply_diff", "insert_content", - "search_and_replace", "search_files", "list_files", "list_code_definition_names", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 9db4181d9c2..76562fa5534 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -476,6 +476,9 @@ importers: '@tanstack/react-query': specifier: ^5.79.0 version: 5.80.2(react@18.3.1) + '@vercel/og': + specifier: ^0.6.2 + version: 0.6.8 class-variance-authority: specifier: ^0.7.1 version: 0.7.1 @@ -6073,6 +6076,10 @@ packages: peerDependencies: '@redis/client': ^5.5.5 + '@resvg/resvg-wasm@2.4.0': + resolution: {integrity: sha512-C7c51Nn4yTxXFKvgh2txJFNweaVcfUPQxwEUFw4aWsCmfiBDJsTSwviIF8EcwjQ6k8bPyMWCl1vw4BdxE569Cg==} + engines: {node: '>= 10'} + '@rollup/pluginutils@5.2.0': resolution: {integrity: sha512-qWJ2ZTbmumwiLFomfzTyt5Kng4hwPi9rwCYN4SHb6eaRU1KNO4ccxINHr/VhH4GgPlt1XfSTLX2LBTme8ne4Zw==} engines: {node: '>=14.0.0'} @@ -6227,6 +6234,11 @@ packages: '@shikijs/vscode-textmate@10.0.2': resolution: {integrity: sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg==} + '@shuding/opentype.js@1.4.0-beta.0': + resolution: {integrity: sha512-3NgmNyH3l/Hv6EvsWJbsvpcpUba6R8IREQ83nH83cyakCw7uM1arZKNfHwv1Wz6jgqrF/j4x5ELvR6PnK9nTcA==} + engines: {node: '>= 8.0.0'} + hasBin: true + '@sideway/address@4.1.5': resolution: {integrity: sha512-IqO/DUQHUkPeixNQ8n0JA6102hT9CmaljNTPmQ1u8MEhBo/R4Q8eKLN/vGZxuebwOroDB4cbpjheD4+/sKFK4Q==} @@ -7862,6 +7874,10 @@ packages: peerDependencies: react: '>= 16.8.0' + '@vercel/og@0.6.8': + resolution: {integrity: sha512-e4kQK9mP8ntpo3dACWirGod/hHv4qO5JMj9a/0a2AZto7b4persj5YP7t1Er372gTtYFTYxNhMx34jRvHooglw==} + engines: {node: '>=16'} + '@vitejs/plugin-react@4.4.1': resolution: {integrity: sha512-IpEm5ZmeXAP/osiBXVVP5KjFMzbWOonMs0NaQQl+xYnUAcq4oHUBsF2+p4MgKWG4YMmFYJU8A6sxRPuowllm6w==} engines: {node: ^14.18.0 || >=16.0.0} @@ -8755,6 +8771,10 @@ packages: bare-events: optional: true + base64-js@0.0.8: + resolution: {integrity: sha512-3XSA2cR/h/73EzlXXdU6YNycmYI7+kicTxks4eJg2g39biHR84slg2+des+p7iHYhbRg/udIS4TD53WabcOUkw==} + engines: {node: '>= 0.4'} + base64-js@1.5.1: resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} @@ -9678,12 +9698,18 @@ packages: resolution: {integrity: sha512-x8dy3RnvYdlUcPOjkEHqozhiwzKNSq7GcPuXFbnyMOCHxX8V3OgIg/pYuabl2sbUPfIJaeAQB7PMOK8DFIdoRA==} engines: {node: '>=12'} + css-background-parser@0.1.0: + resolution: {integrity: sha512-2EZLisiZQ+7m4wwur/qiYJRniHX4K5Tc9w93MT3AS0WS1u5kaZ4FKXlOTBhOjc+CgEgPiGY+fX1yWD8UwpEqUA==} + css-blank-pseudo@7.0.1: resolution: {integrity: sha512-jf+twWGDf6LDoXDUode+nc7ZlrqfaNphrBIBrcmeP3D8yw1uPaix1gCC8LUQUGQ6CycuK2opkbFFWFuq/a94ag==} engines: {node: '>=18'} peerDependencies: postcss: ^8.4 + css-box-shadow@1.0.0-3: + resolution: {integrity: sha512-9jaqR6e7Ohds+aWwmhe6wILJ99xYQbfmK9QQB9CcMjDbTxPZjwEmUQpU91OG05Xgm8BahT5fW+svbsQGjS/zPg==} + css-color-keywords@1.0.0: resolution: {integrity: sha512-FyyrDHZKEjXDpNJYvVsV960FiqQyXc/LlYmsxl2BcdMb2WPx0OGRVgTg55rPSyLSNMqP52R9r8geSp7apN3Ofg==} engines: {node: '>=4'} @@ -9694,6 +9720,10 @@ packages: peerDependencies: postcss: ^8.0.9 + css-gradient-parser@0.0.16: + resolution: {integrity: sha512-3O5QdqgFRUbXvK1x5INf1YkBz1UKSWqrd63vWsum8MNHDBYD5urm3QtxZbKU259OrEXNM26lP/MPY3d1IGkBgA==} + engines: {node: '>=16'} + css-has-pseudo@7.0.3: resolution: {integrity: sha512-oG+vKuGyqe/xvEMoxAQrhi7uY16deJR3i7wwhBerVrGQKSqUC5GiOVxTpM9F9B9hw0J+eKeOWLH7E9gZ1Dr5rA==} engines: {node: '>=18'} @@ -11205,6 +11235,9 @@ packages: fflate@0.4.8: resolution: {integrity: sha512-FJqqoDBR00Mdj9ppamLa/Y7vxm+PRmNWA67N846RvsoYVMKB4q3y/de5PA7gUmRMYK/8CMz2GDZQmCRN1wBcWA==} + fflate@0.7.4: + resolution: {integrity: sha512-5u2V/CDW15QM1XbbgS+0DfPxVB+jUKhWEKuuFuHncbk3tEEqzmoXL+2KyOFuKGqOnmdIy0/davWF1CkuwtibCw==} + fflate@0.8.2: resolution: {integrity: sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A==} @@ -12002,6 +12035,10 @@ packages: resolution: {integrity: sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==} hasBin: true + hex-rgb@4.3.0: + resolution: {integrity: sha512-Ox1pJVrDCyGHMG9CFg1tmrRUMRPRsAWYc/PinY0XzJU4K7y7vjNoLKIQ7BR5UJMCxNN8EM1MNDmHWA/B3aZUuw==} + engines: {node: '>=6'} + highlight.js@10.7.3: resolution: {integrity: sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A==} @@ -13547,6 +13584,9 @@ packages: resolution: {integrity: sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==} engines: {node: '>=14'} + linebreak@1.1.0: + resolution: {integrity: sha512-MHp03UImeVhB7XZtjd0E4n6+3xr5Dq/9xI/5FptGk5FrbDR3zagPa2DS6U8ks/3HjbKWG9Q1M2ufOzxV2qLYSQ==} + lines-and-columns@1.2.4: resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} @@ -15097,6 +15137,9 @@ packages: resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} engines: {node: '>=6'} + parse-css-color@0.2.1: + resolution: {integrity: sha512-bwS/GGIFV3b6KS4uwpzCFj4w297Yl3uqnSgIPsoQkx7GMLROXfMnWvxfNkL0oh8HVhZA4hvJoEoEIqonfJ3BWg==} + parse-entities@2.0.0: resolution: {integrity: sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==} @@ -16854,6 +16897,10 @@ packages: sanitize-filename@1.6.3: resolution: {integrity: sha512-y/52Mcy7aw3gRm7IrcGDFx/bCk4AhRh2eI9luHOQM86nZsqwiRkkq2GekHXBBD+SmPidc8i2PqtYZl+pWJ8Oeg==} + satori@0.12.2: + resolution: {integrity: sha512-3C/laIeE6UUe9A+iQ0A48ywPVCCMKCNSTU5Os101Vhgsjd3AAxGNjyq0uAA8kulMPK5n0csn8JlxPN9riXEjLA==} + engines: {node: '>=16'} + sax@1.4.1: resolution: {integrity: sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg==} @@ -17457,6 +17504,9 @@ packages: resolution: {integrity: sha512-Kxl3KJGb/gxkaUMOjRsQ8IrXiGW75O4E3RPjFIINOVH8AMl2SQ/yWdTzWwF3FevIX9LcMAjJW+GRwAlAbTSXdg==} engines: {node: '>=20'} + string.prototype.codepointat@0.2.1: + resolution: {integrity: sha512-2cBVCj6I4IOvEnjgO/hWqXjqBGsY+zwPmHl12Srk9IXSZ56Jwwmy+66XO5Iut/oQVR7t5ihYdLB0GMa4alEUcg==} + string.prototype.matchall@4.0.12: resolution: {integrity: sha512-6CC9uyBL+/48dYizRf7H7VAYCMCNTBeM78x/VTUe9bFEaxBepPJDa1Ow99LqI/1yF7kuy7Q3cQsYMrcjGUcskA==} engines: {node: '>= 0.4'} @@ -18289,6 +18339,9 @@ packages: resolution: {integrity: sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==} engines: {node: '>=4'} + unicode-trie@2.0.0: + resolution: {integrity: sha512-x7bc76x0bm4prf1VLg79uhAzKw8DVboClSN5VxJuQ+LKDOVEW9CdH+VY7SP+vX7xCYQqzzgQpFqz15zeLvAtZQ==} + unicorn-magic@0.1.0: resolution: {integrity: sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==} engines: {node: '>=18'} @@ -19271,6 +19324,9 @@ packages: yoga-layout@3.2.1: resolution: {integrity: sha512-0LPOt3AxKqMdFBZA3HBAt/t/8vIKq7VaQYbuA8WxCgung+p9TVyKRYdpvCb80HcdTN2NkbIKbhNwKUfm3tQywQ==} + yoga-wasm-web@0.3.3: + resolution: {integrity: sha512-N+d4UJSJbt/R3wqY7Coqs5pcV0aUj2j9IaQ3rNj9bVCLld8tTGKRa2USARjnvZJWVx1NDmQev8EknoczaOQDOA==} + zip-stream@4.1.1: resolution: {integrity: sha512-9qv4rlDiopXg4E69k+vMHjNN63YFMe9sZMrdlvKnCjlCRWeCBswPPMPUfx+ipsAWq1LXHe70RcbaHdJJpS6hyQ==} engines: {node: '>= 10'} @@ -24643,6 +24699,8 @@ snapshots: dependencies: '@redis/client': 5.5.5 + '@resvg/resvg-wasm@2.4.0': {} + '@rollup/pluginutils@5.2.0(rollup@4.40.2)': dependencies: '@types/estree': 1.0.8 @@ -24779,6 +24837,11 @@ snapshots: '@shikijs/vscode-textmate@10.0.2': {} + '@shuding/opentype.js@1.4.0-beta.0': + dependencies: + fflate: 0.7.4 + string.prototype.codepointat: 0.2.1 + '@sideway/address@4.1.5': dependencies: '@hapi/hoek': 9.3.0 @@ -26792,6 +26855,12 @@ snapshots: '@use-gesture/core': 10.3.1 react: 18.3.1 + '@vercel/og@0.6.8': + dependencies: + '@resvg/resvg-wasm': 2.4.0 + satori: 0.12.2 + yoga-wasm-web: 0.3.3 + '@vitejs/plugin-react@4.4.1(vite@6.3.5(@types/node@24.2.1)(jiti@2.6.1)(lightningcss@1.30.1)(terser@5.43.1)(tsx@4.19.4)(yaml@2.8.0))': dependencies: '@babel/core': 7.27.1 @@ -27919,6 +27988,8 @@ snapshots: bare-events: 2.5.4 optional: true + base64-js@0.0.8: {} + base64-js@1.5.1: {} base@0.11.2: @@ -28992,17 +29063,23 @@ snapshots: dependencies: type-fest: 2.19.0 + css-background-parser@0.1.0: {} + css-blank-pseudo@7.0.1(postcss@8.5.4): dependencies: postcss: 8.5.4 postcss-selector-parser: 7.1.0 + css-box-shadow@1.0.0-3: {} + css-color-keywords@1.0.0: {} css-declaration-sorter@7.2.0(postcss@8.5.4): dependencies: postcss: 8.5.4 + css-gradient-parser@0.0.16: {} + css-has-pseudo@7.0.3(postcss@8.5.4): dependencies: '@csstools/selector-specificity': 5.0.0(postcss-selector-parser@7.1.0) @@ -30760,6 +30837,8 @@ snapshots: fflate@0.4.8: {} + fflate@0.7.4: {} + fflate@0.8.2: {} figures@2.0.0: @@ -31870,6 +31949,8 @@ snapshots: he@1.2.0: {} + hex-rgb@4.3.0: {} + highlight.js@10.7.3: {} highlight.js@11.11.1: {} @@ -33749,6 +33830,11 @@ snapshots: lilconfig@3.1.3: {} + linebreak@1.1.0: + dependencies: + base64-js: 0.0.8 + unicode-trie: 2.0.0 + lines-and-columns@1.2.4: {} linkify-it@5.0.0: @@ -35796,6 +35882,11 @@ snapshots: dependencies: callsites: 3.1.0 + parse-css-color@0.2.1: + dependencies: + color-name: 1.1.4 + hex-rgb: 4.3.0 + parse-entities@2.0.0: dependencies: character-entities: 1.2.4 @@ -37836,6 +37927,20 @@ snapshots: dependencies: truncate-utf8-bytes: 1.0.2 + satori@0.12.2: + dependencies: + '@shuding/opentype.js': 1.4.0-beta.0 + css-background-parser: 0.1.0 + css-box-shadow: 1.0.0-3 + css-gradient-parser: 0.0.16 + css-to-react-native: 3.2.0 + emoji-regex: 10.4.0 + escape-html: 1.0.3 + linebreak: 1.1.0 + parse-css-color: 0.2.1 + postcss-value-parser: 4.2.0 + yoga-wasm-web: 0.3.3 + sax@1.4.1: {} saxes@5.0.1: @@ -38617,6 +38722,8 @@ snapshots: get-east-asian-width: 1.3.0 strip-ansi: 7.1.2 + string.prototype.codepointat@0.2.1: {} + string.prototype.matchall@4.0.12: dependencies: call-bind: 1.0.8 @@ -39524,6 +39631,11 @@ snapshots: unicode-property-aliases-ecmascript@2.1.0: {} + unicode-trie@2.0.0: + dependencies: + pako: 0.2.9 + tiny-inflate: 1.0.3 + unicorn-magic@0.1.0: {} unicorn-magic@0.3.0: {} @@ -40871,6 +40983,8 @@ snapshots: yoga-layout@3.2.1: {} + yoga-wasm-web@0.3.3: {} + zip-stream@4.1.1: dependencies: archiver-utils: 3.0.4 diff --git a/releases/3.29.0-release.png b/releases/3.29.0-release.png new file mode 100644 index 00000000000..8f9381fbae7 Binary files /dev/null and b/releases/3.29.0-release.png differ diff --git a/releases/3.29.1-release.png b/releases/3.29.1-release.png new file mode 100644 index 00000000000..71dcd474b0e Binary files /dev/null and b/releases/3.29.1-release.png differ diff --git a/releases/3.30.0-release.png b/releases/3.30.0-release.png new file mode 100644 index 00000000000..8050aa49ce6 Binary files /dev/null and b/releases/3.30.0-release.png differ diff --git a/src/activate/handleUri.ts b/src/activate/handleUri.ts index dc1867ae037..9803897c7c1 100644 --- a/src/activate/handleUri.ts +++ b/src/activate/handleUri.ts @@ -49,8 +49,9 @@ export const handleUri = async (uri: vscode.Uri) => { // kilocode_change end case "/requesty": { const code = query.get("code") + const baseUrl = query.get("baseUrl") if (code) { - await visibleProvider.handleRequestyCallback(code) + await visibleProvider.handleRequestyCallback(code, baseUrl) } break } diff --git a/src/api/index.ts b/src/api/index.ts index 8fe96700fa9..78e52e1a74d 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -47,6 +47,7 @@ import { VercelAiGatewayHandler, DeepInfraHandler, OVHcloudAIEndpointsHandler, // kilocode_change + MiniMaxHandler, } from "./providers" // kilocode_change start import { KilocodeOpenrouterHandler } from "./providers/kilocode-openrouter" @@ -120,8 +121,6 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler { // kilocode_change start case "kilocode": return new KilocodeOpenrouterHandler(options) - case "kilocode-openrouter": // temp typing fix - return new KilocodeOpenrouterHandler(options) case "gemini-cli": return new GeminiCliHandler(options) case "virtual-quota-fallback": @@ -209,6 +208,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler { return new FeatherlessHandler(options) case "vercel-ai-gateway": return new VercelAiGatewayHandler(options) + case "minimax": + return new MiniMaxHandler(options) default: apiProvider satisfies "gemini-cli" | undefined return new AnthropicHandler(options) diff --git a/src/api/providers/__tests__/base-openai-compatible-provider.spec.ts b/src/api/providers/__tests__/base-openai-compatible-provider.spec.ts new file mode 100644 index 00000000000..667083ee5f6 --- /dev/null +++ b/src/api/providers/__tests__/base-openai-compatible-provider.spec.ts @@ -0,0 +1,386 @@ +// npx vitest run api/providers/__tests__/base-openai-compatible-provider.spec.ts + +import { Anthropic } from "@anthropic-ai/sdk" +import OpenAI from "openai" + +import type { ModelInfo } from "@roo-code/types" + +import { BaseOpenAiCompatibleProvider } from "../base-openai-compatible-provider" + +// Create mock functions +const mockCreate = vi.fn() + +// Mock OpenAI module +vi.mock("openai", () => ({ + default: vi.fn(() => ({ + chat: { + completions: { + create: mockCreate, + }, + }, + })), +})) + +// Create a concrete test implementation of the abstract base class +class TestOpenAiCompatibleProvider extends BaseOpenAiCompatibleProvider<"test-model"> { + constructor(apiKey: string) { + const testModels: Record<"test-model", ModelInfo> = { + "test-model": { + maxTokens: 4096, + contextWindow: 128000, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.5, + outputPrice: 1.5, + }, + } + + super({ + providerName: "TestProvider", + baseURL: "https://test.example.com/v1", + defaultProviderModelId: "test-model", + providerModels: testModels, + apiKey, + }) + } +} + +describe("BaseOpenAiCompatibleProvider", () => { + let handler: TestOpenAiCompatibleProvider + + beforeEach(() => { + vi.clearAllMocks() + handler = new TestOpenAiCompatibleProvider("test-api-key") + }) + + afterEach(() => { + vi.restoreAllMocks() + }) + + describe("XmlMatcher reasoning tags", () => { + it("should handle reasoning tags () from stream", async () => { + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + next: vi + .fn() + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { content: "Let me think" } }] }, + }) + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { content: " about this" } }] }, + }) + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { content: "The answer is 42" } }] }, + }) + .mockResolvedValueOnce({ done: true }), + }), + } + }) + + const stream = handler.createMessage("system prompt", []) + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + // XmlMatcher yields chunks as they're processed + expect(chunks).toEqual([ + { type: "reasoning", text: "Let me think" }, + { type: "reasoning", text: " about this" }, + { type: "text", text: "The answer is 42" }, + ]) + }) + + it("should handle complete tag in a single chunk", async () => { + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + next: vi + .fn() + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { content: "Regular text before " } }] }, + }) + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { content: "Complete thought" } }] }, + }) + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { content: " regular text after" } }] }, + }) + .mockResolvedValueOnce({ done: true }), + }), + } + }) + + const stream = handler.createMessage("system prompt", []) + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + // When a complete tag arrives in one chunk, XmlMatcher may not parse it + // This test documents the actual behavior + expect(chunks.length).toBeGreaterThan(0) + expect(chunks[0]).toEqual({ type: "text", text: "Regular text before " }) + }) + + it("should handle incomplete tag at end of stream", async () => { + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + next: vi + .fn() + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { content: "Incomplete thought" } }] }, + }) + .mockResolvedValueOnce({ done: true }), + }), + } + }) + + const stream = handler.createMessage("system prompt", []) + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + // XmlMatcher should handle incomplete tags and flush remaining content + expect(chunks.length).toBeGreaterThan(0) + expect( + chunks.some( + (c) => (c.type === "text" || c.type === "reasoning") && c.text.includes("Incomplete thought"), + ), + ).toBe(true) + }) + + it("should handle text without any tags", async () => { + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + next: vi + .fn() + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { content: "Just regular text" } }] }, + }) + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { content: " without reasoning" } }] }, + }) + .mockResolvedValueOnce({ done: true }), + }), + } + }) + + const stream = handler.createMessage("system prompt", []) + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(chunks).toEqual([ + { type: "text", text: "Just regular text" }, + { type: "text", text: " without reasoning" }, + ]) + }) + + it("should handle tags that start at beginning of stream", async () => { + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + next: vi + .fn() + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { content: "reasoning" } }] }, + }) + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { content: " content" } }] }, + }) + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { content: " normal text" } }] }, + }) + .mockResolvedValueOnce({ done: true }), + }), + } + }) + + const stream = handler.createMessage("system prompt", []) + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(chunks).toEqual([ + { type: "reasoning", text: "reasoning" }, + { type: "reasoning", text: " content" }, + { type: "text", text: " normal text" }, + ]) + }) + }) + + describe("reasoning_content field", () => { + it("should filter out whitespace-only reasoning_content", async () => { + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + next: vi + .fn() + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { reasoning_content: "\n" } }] }, + }) + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { reasoning_content: " " } }] }, + }) + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { reasoning_content: "\t\n " } }] }, + }) + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { content: "Regular content" } }] }, + }) + .mockResolvedValueOnce({ done: true }), + }), + } + }) + + const stream = handler.createMessage("system prompt", []) + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + // Should only have the regular content, not the whitespace-only reasoning + expect(chunks).toEqual([{ type: "text", text: "Regular content" }]) + }) + + it("should yield non-empty reasoning_content", async () => { + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + next: vi + .fn() + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { reasoning_content: "Thinking step 1" } }] }, + }) + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { reasoning_content: "\n" } }] }, + }) + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { reasoning_content: "Thinking step 2" } }] }, + }) + .mockResolvedValueOnce({ done: true }), + }), + } + }) + + const stream = handler.createMessage("system prompt", []) + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + // Should only yield the non-empty reasoning content + expect(chunks).toEqual([ + { type: "reasoning", text: "Thinking step 1" }, + { type: "reasoning", text: "Thinking step 2" }, + ]) + }) + + it("should handle reasoning_content with leading/trailing whitespace", async () => { + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + next: vi + .fn() + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { reasoning_content: " content with spaces " } }] }, + }) + .mockResolvedValueOnce({ done: true }), + }), + } + }) + + const stream = handler.createMessage("system prompt", []) + const chunks = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + // Should yield reasoning with spaces (only pure whitespace is filtered) + expect(chunks).toEqual([{ type: "reasoning", text: " content with spaces " }]) + }) + }) + + describe("Basic functionality", () => { + it("should create stream with correct parameters", async () => { + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), + } + }) + + const systemPrompt = "Test system prompt" + const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message" }] + + const messageGenerator = handler.createMessage(systemPrompt, messages) + await messageGenerator.next() + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + model: "test-model", + temperature: 0, + messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]), + stream: true, + stream_options: { include_usage: true }, + }), + undefined, + ) + }) + + it("should yield usage data from stream", async () => { + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + next: vi + .fn() + .mockResolvedValueOnce({ + done: false, + value: { + choices: [{ delta: {} }], + usage: { prompt_tokens: 100, completion_tokens: 50 }, + }, + }) + .mockResolvedValueOnce({ done: true }), + }), + } + }) + + const stream = handler.createMessage("system prompt", []) + const firstChunk = await stream.next() + + expect(firstChunk.done).toBe(false) + expect(firstChunk.value).toEqual({ type: "usage", inputTokens: 100, outputTokens: 50 }) + }) + }) +}) diff --git a/src/api/providers/__tests__/chutes.spec.ts b/src/api/providers/__tests__/chutes.spec.ts index 70ee06a923c..b4c933d4cc5 100644 --- a/src/api/providers/__tests__/chutes.spec.ts +++ b/src/api/providers/__tests__/chutes.spec.ts @@ -3,12 +3,13 @@ import { Anthropic } from "@anthropic-ai/sdk" import OpenAI from "openai" -import { type ChutesModelId, chutesDefaultModelId, chutesModels, DEEP_SEEK_DEFAULT_TEMPERATURE } from "@roo-code/types" +import { chutesDefaultModelId, chutesDefaultModelInfo, DEEP_SEEK_DEFAULT_TEMPERATURE } from "@roo-code/types" import { ChutesHandler } from "../chutes" // Create mock functions const mockCreate = vi.fn() +const mockFetchModel = vi.fn() // Mock OpenAI module vi.mock("openai", () => ({ @@ -54,6 +55,12 @@ describe("ChutesHandler", () => { }, })) handler = new ChutesHandler({ chutesApiKey: "test-key" }) + // Mock fetchModel to return default model + mockFetchModel.mockResolvedValue({ + id: chutesDefaultModelId, + info: chutesDefaultModelInfo, + }) + handler.fetchModel = mockFetchModel }) afterEach(() => { @@ -107,10 +114,10 @@ describe("ChutesHandler", () => { const systemPrompt = "You are a helpful assistant." const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hi" }] - vi.spyOn(handler, "getModel").mockReturnValue({ + mockFetchModel.mockResolvedValueOnce({ id: "deepseek-ai/DeepSeek-R1-0528", info: { maxTokens: 1024, temperature: 0.7 }, - } as any) + }) const stream = handler.createMessage(systemPrompt, messages) const chunks = [] @@ -125,14 +132,14 @@ describe("ChutesHandler", () => { ]) }) - it("should fall back to base provider for non-DeepSeek models", async () => { + it("should handle non-DeepSeek models", async () => { // Use default mock implementation which returns text content const systemPrompt = "You are a helpful assistant." const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hi" }] - vi.spyOn(handler, "getModel").mockReturnValue({ + mockFetchModel.mockResolvedValueOnce({ id: "some-other-model", info: { maxTokens: 1024, temperature: 0.7 }, - } as any) + }) const stream = handler.createMessage(systemPrompt, messages) const chunks = [] @@ -146,199 +153,25 @@ describe("ChutesHandler", () => { ]) }) - it("should return default model when no model is specified", () => { - const model = handler.getModel() + it("should return default model when no model is specified", async () => { + const model = await handler.fetchModel() expect(model.id).toBe(chutesDefaultModelId) - expect(model.info).toEqual(expect.objectContaining(chutesModels[chutesDefaultModelId])) - }) - - it("should return specified model when valid model is provided", () => { - const testModelId: ChutesModelId = "deepseek-ai/DeepSeek-R1" - const handlerWithModel = new ChutesHandler({ - apiModelId: testModelId, - chutesApiKey: "test-chutes-api-key", - }) - const model = handlerWithModel.getModel() - expect(model.id).toBe(testModelId) - expect(model.info).toEqual(expect.objectContaining(chutesModels[testModelId])) - }) - - it("should return DeepSeek V3.1 model with correct configuration", () => { - const testModelId: ChutesModelId = "deepseek-ai/DeepSeek-V3.1" - const handlerWithModel = new ChutesHandler({ - apiModelId: testModelId, - chutesApiKey: "test-chutes-api-key", - }) - const model = handlerWithModel.getModel() - expect(model.id).toBe(testModelId) - expect(model.info).toEqual( - expect.objectContaining({ - maxTokens: 32768, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "DeepSeek V3.1 model.", - temperature: 0.5, // Non-R1 DeepSeek models use default temperature - }), - ) - }) - - it("should return Qwen3-235B-A22B-Instruct-2507 model with correct configuration", () => { - const testModelId: ChutesModelId = "Qwen/Qwen3-235B-A22B-Instruct-2507" - const handlerWithModel = new ChutesHandler({ - apiModelId: testModelId, - chutesApiKey: "test-chutes-api-key", - }) - const model = handlerWithModel.getModel() - expect(model.id).toBe(testModelId) - expect(model.info).toEqual( - expect.objectContaining({ - maxTokens: 32768, - contextWindow: 262144, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Qwen3 235B A22B Instruct 2507 model with 262K context window.", - temperature: 0.5, // Default temperature for non-DeepSeek models - }), - ) - }) - - it("should return zai-org/GLM-4.5-Air model with correct configuration", () => { - const testModelId: ChutesModelId = "zai-org/GLM-4.5-Air" - const handlerWithModel = new ChutesHandler({ - apiModelId: testModelId, - chutesApiKey: "test-chutes-api-key", - }) - const model = handlerWithModel.getModel() - expect(model.id).toBe(testModelId) - expect(model.info).toEqual( - expect.objectContaining({ - maxTokens: 32768, - contextWindow: 151329, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: - "GLM-4.5-Air model with 151,329 token context window and 106B total parameters with 12B activated.", - temperature: 0.5, // Default temperature for non-DeepSeek models - }), - ) - }) - - it("should return zai-org/GLM-4.5-FP8 model with correct configuration", () => { - const testModelId: ChutesModelId = "zai-org/GLM-4.5-FP8" - const handlerWithModel = new ChutesHandler({ - apiModelId: testModelId, - chutesApiKey: "test-chutes-api-key", - }) - const model = handlerWithModel.getModel() - expect(model.id).toBe(testModelId) - expect(model.info).toEqual( - expect.objectContaining({ - maxTokens: 32768, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: - "GLM-4.5-FP8 model with 128k token context window, optimized for agent-based applications with MoE architecture.", - temperature: 0.5, // Default temperature for non-DeepSeek models - }), - ) - }) - - it("should return zai-org/GLM-4.5-turbo model with correct configuration", () => { - const testModelId: ChutesModelId = "zai-org/GLM-4.5-turbo" - const handlerWithModel = new ChutesHandler({ - apiModelId: testModelId, - chutesApiKey: "test-chutes-api-key", - }) - const model = handlerWithModel.getModel() - expect(model.id).toBe(testModelId) - expect(model.info).toEqual( - expect.objectContaining({ - maxTokens: 32768, - contextWindow: 131072, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 1, - outputPrice: 3, - description: "GLM-4.5-turbo model with 128K token context window, optimized for fast inference.", - temperature: 0.5, // Default temperature for non-DeepSeek models - }), - ) - }) - - it("should return Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8 model with correct configuration", () => { - const testModelId: ChutesModelId = "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8" - const handlerWithModel = new ChutesHandler({ - apiModelId: testModelId, - chutesApiKey: "test-chutes-api-key", - }) - const model = handlerWithModel.getModel() - expect(model.id).toBe(testModelId) - expect(model.info).toEqual( - expect.objectContaining({ - maxTokens: 32768, - contextWindow: 262144, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0, - outputPrice: 0, - description: "Qwen3 Coder 480B A35B Instruct FP8 model, optimized for coding tasks.", - temperature: 0.5, // Default temperature for non-DeepSeek models - }), - ) + expect(model.info).toEqual(expect.objectContaining(chutesDefaultModelInfo)) }) - it("should return moonshotai/Kimi-K2-Instruct-75k model with correct configuration", () => { - const testModelId: ChutesModelId = "moonshotai/Kimi-K2-Instruct-75k" + it("should return specified model when valid model is provided", async () => { + const testModelId = "deepseek-ai/DeepSeek-R1" const handlerWithModel = new ChutesHandler({ apiModelId: testModelId, chutesApiKey: "test-chutes-api-key", }) - const model = handlerWithModel.getModel() - expect(model.id).toBe(testModelId) - expect(model.info).toEqual( - expect.objectContaining({ - maxTokens: 32768, - contextWindow: 75000, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.1481, - outputPrice: 0.5926, - description: "Moonshot AI Kimi K2 Instruct model with 75k context window.", - temperature: 0.5, // Default temperature for non-DeepSeek models - }), - ) - }) - - it("should return moonshotai/Kimi-K2-Instruct-0905 model with correct configuration", () => { - const testModelId: ChutesModelId = "moonshotai/Kimi-K2-Instruct-0905" - const handlerWithModel = new ChutesHandler({ - apiModelId: testModelId, - chutesApiKey: "test-chutes-api-key", + // Mock fetchModel for this handler to return the test model from dynamic fetch + handlerWithModel.fetchModel = vi.fn().mockResolvedValue({ + id: testModelId, + info: { maxTokens: 32768, contextWindow: 163840, supportsImages: false, supportsPromptCache: false }, }) - const model = handlerWithModel.getModel() + const model = await handlerWithModel.fetchModel() expect(model.id).toBe(testModelId) - expect(model.info).toEqual( - expect.objectContaining({ - maxTokens: 32768, - contextWindow: 262144, - supportsImages: false, - supportsPromptCache: false, - inputPrice: 0.1999, - outputPrice: 0.8001, - description: "Moonshot AI Kimi K2 Instruct 0905 model with 256k context window.", - temperature: 0.5, // Default temperature for non-DeepSeek models - }), - ) }) it("completePrompt method should return text from Chutes API", async () => { @@ -400,81 +233,8 @@ describe("ChutesHandler", () => { expect(firstChunk.value).toEqual({ type: "usage", inputTokens: 10, outputTokens: 20 }) }) - it("createMessage should pass correct parameters to Chutes client for DeepSeek R1", async () => { - const modelId: ChutesModelId = "deepseek-ai/DeepSeek-R1" - - // Clear previous mocks and set up new implementation - mockCreate.mockClear() - mockCreate.mockImplementationOnce(async () => ({ - [Symbol.asyncIterator]: async function* () { - // Empty stream for this test - }, - })) - - const handlerWithModel = new ChutesHandler({ - apiModelId: modelId, - chutesApiKey: "test-chutes-api-key", - }) - - const systemPrompt = "Test system prompt for Chutes" - const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message for Chutes" }] - - const messageGenerator = handlerWithModel.createMessage(systemPrompt, messages) - await messageGenerator.next() - - expect(mockCreate).toHaveBeenCalledWith( - expect.objectContaining({ - model: modelId, - messages: [ - { - role: "user", - content: `${systemPrompt}\n${messages[0].content}`, - }, - ], - max_tokens: 32768, - temperature: 0.6, - stream: true, - stream_options: { include_usage: true }, - }), - ) - }) - - it("createMessage should pass correct parameters to Chutes client for non-DeepSeek models", async () => { - const modelId: ChutesModelId = "unsloth/Llama-3.3-70B-Instruct" - const modelInfo = chutesModels[modelId] - const handlerWithModel = new ChutesHandler({ apiModelId: modelId, chutesApiKey: "test-chutes-api-key" }) - - mockCreate.mockImplementationOnce(() => { - return { - [Symbol.asyncIterator]: () => ({ - async next() { - return { done: true } - }, - }), - } - }) - - const systemPrompt = "Test system prompt for Chutes" - const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message for Chutes" }] - - const messageGenerator = handlerWithModel.createMessage(systemPrompt, messages) - await messageGenerator.next() - - expect(mockCreate).toHaveBeenCalledWith( - expect.objectContaining({ - model: modelId, - max_tokens: modelInfo.maxTokens, - temperature: 0.5, - messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]), - stream: true, - stream_options: { include_usage: true }, - }), - undefined, - ) - }) - it("should apply DeepSeek default temperature for R1 models", () => { - const testModelId: ChutesModelId = "deepseek-ai/DeepSeek-R1" + const testModelId = "deepseek-ai/DeepSeek-R1" const handlerWithModel = new ChutesHandler({ apiModelId: testModelId, chutesApiKey: "test-chutes-api-key", @@ -484,12 +244,16 @@ describe("ChutesHandler", () => { }) it("should use default temperature for non-DeepSeek models", () => { - const testModelId: ChutesModelId = "unsloth/Llama-3.3-70B-Instruct" + const testModelId = "unsloth/Llama-3.3-70B-Instruct" const handlerWithModel = new ChutesHandler({ apiModelId: testModelId, chutesApiKey: "test-chutes-api-key", }) + // Note: getModel() returns fallback default without calling fetchModel + // Since we haven't called fetchModel, it returns the default chutesDefaultModelId + // which is DeepSeek-R1-0528, therefore temperature will be DEEP_SEEK_DEFAULT_TEMPERATURE const model = handlerWithModel.getModel() - expect(model.info.temperature).toBe(0.5) + // The default model is DeepSeek-R1, so it returns DEEP_SEEK_DEFAULT_TEMPERATURE + expect(model.info.temperature).toBe(DEEP_SEEK_DEFAULT_TEMPERATURE) }) }) diff --git a/src/api/providers/__tests__/fireworks.spec.ts b/src/api/providers/__tests__/fireworks.spec.ts index f07c1797a05..da0a8cf9a4c 100644 --- a/src/api/providers/__tests__/fireworks.spec.ts +++ b/src/api/providers/__tests__/fireworks.spec.ts @@ -115,6 +115,27 @@ describe("FireworksHandler", () => { ) }) + it("should return MiniMax M2 model with correct configuration", () => { + const testModelId: FireworksModelId = "accounts/fireworks/models/minimax-m2" + const handlerWithModel = new FireworksHandler({ + apiModelId: testModelId, + fireworksApiKey: "test-fireworks-api-key", + }) + const model = handlerWithModel.getModel() + expect(model.id).toBe(testModelId) + expect(model.info).toEqual( + expect.objectContaining({ + maxTokens: 4096, + contextWindow: 204800, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.3, + outputPrice: 1.2, + description: expect.stringContaining("MiniMax M2 is a high-performance language model"), + }), + ) + }) + it("should return Qwen3 235B model with correct configuration", () => { const testModelId: FireworksModelId = "accounts/fireworks/models/qwen3-235b-a22b-instruct-2507" const handlerWithModel = new FireworksHandler({ @@ -242,6 +263,27 @@ describe("FireworksHandler", () => { ) }) + it("should return GLM-4.6 model with correct configuration", () => { + const testModelId: FireworksModelId = "accounts/fireworks/models/glm-4p6" + const handlerWithModel = new FireworksHandler({ + apiModelId: testModelId, + fireworksApiKey: "test-fireworks-api-key", + }) + const model = handlerWithModel.getModel() + expect(model.id).toBe(testModelId) + expect(model.info).toEqual( + expect.objectContaining({ + maxTokens: 25344, + contextWindow: 198000, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.55, + outputPrice: 2.19, + description: expect.stringContaining("Z.ai GLM-4.6 is an advanced coding model"), + }), + ) + }) + it("should return gpt-oss-20b model with correct configuration", () => { const testModelId: FireworksModelId = "accounts/fireworks/models/gpt-oss-20b" const handlerWithModel = new FireworksHandler({ diff --git a/src/api/providers/__tests__/glama.spec.ts b/src/api/providers/__tests__/glama.spec.ts index d42491321f1..9f82cad3ba4 100644 --- a/src/api/providers/__tests__/glama.spec.ts +++ b/src/api/providers/__tests__/glama.spec.ts @@ -20,7 +20,6 @@ vitest.mock("../fetchers/modelCache", () => ({ cacheReadsPrice: 0.3, description: "Claude 3.7 Sonnet", thinking: false, - supportsComputerUse: true, }, "openai/gpt-4o": { maxTokens: 4096, diff --git a/src/api/providers/__tests__/minimax.spec.ts b/src/api/providers/__tests__/minimax.spec.ts new file mode 100644 index 00000000000..1033626d0ea --- /dev/null +++ b/src/api/providers/__tests__/minimax.spec.ts @@ -0,0 +1,277 @@ +// npx vitest run src/api/providers/__tests__/minimax.spec.ts + +vitest.mock("vscode", () => ({ + workspace: { + getConfiguration: vitest.fn().mockReturnValue({ + get: vitest.fn().mockReturnValue(600), // Default timeout in seconds + }), + }, +})) + +import OpenAI from "openai" +import { Anthropic } from "@anthropic-ai/sdk" + +import { type MinimaxModelId, minimaxDefaultModelId, minimaxModels } from "@roo-code/types" + +import { MiniMaxHandler } from "../minimax" + +vitest.mock("openai", () => { + const createMock = vitest.fn() + return { + default: vitest.fn(() => ({ chat: { completions: { create: createMock } } })), + } +}) + +describe("MiniMaxHandler", () => { + let handler: MiniMaxHandler + let mockCreate: any + + beforeEach(() => { + vitest.clearAllMocks() + mockCreate = (OpenAI as unknown as any)().chat.completions.create + }) + + describe("International MiniMax (default)", () => { + beforeEach(() => { + handler = new MiniMaxHandler({ + minimaxApiKey: "test-minimax-api-key", + minimaxBaseUrl: "https://api.minimax.io/v1", + }) + }) + + it("should use the correct international MiniMax base URL by default", () => { + new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" }) + expect(OpenAI).toHaveBeenCalledWith( + expect.objectContaining({ + baseURL: "https://api.minimax.io/v1", + }), + ) + }) + + it("should use the provided API key", () => { + const minimaxApiKey = "test-minimax-api-key" + new MiniMaxHandler({ minimaxApiKey }) + expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: minimaxApiKey })) + }) + + it("should return default model when no model is specified", () => { + const model = handler.getModel() + expect(model.id).toBe(minimaxDefaultModelId) + expect(model.info).toEqual(minimaxModels[minimaxDefaultModelId]) + }) + + it("should return specified model when valid model is provided", () => { + const testModelId: MinimaxModelId = "MiniMax-M2" + const handlerWithModel = new MiniMaxHandler({ + apiModelId: testModelId, + minimaxApiKey: "test-minimax-api-key", + }) + const model = handlerWithModel.getModel() + expect(model.id).toBe(testModelId) + expect(model.info).toEqual(minimaxModels[testModelId]) + }) + + it("should return MiniMax-M2 model with correct configuration", () => { + const testModelId: MinimaxModelId = "MiniMax-M2" + const handlerWithModel = new MiniMaxHandler({ + apiModelId: testModelId, + minimaxApiKey: "test-minimax-api-key", + }) + const model = handlerWithModel.getModel() + expect(model.id).toBe(testModelId) + expect(model.info).toEqual(minimaxModels[testModelId]) + expect(model.info.contextWindow).toBe(192_000) + expect(model.info.maxTokens).toBe(16_384) + expect(model.info.supportsPromptCache).toBe(false) + }) + }) + + describe("China MiniMax", () => { + beforeEach(() => { + handler = new MiniMaxHandler({ + minimaxApiKey: "test-minimax-api-key", + minimaxBaseUrl: "https://api.minimaxi.com/v1", + }) + }) + + it("should use the correct China MiniMax base URL", () => { + new MiniMaxHandler({ + minimaxApiKey: "test-minimax-api-key", + minimaxBaseUrl: "https://api.minimaxi.com/v1", + }) + expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ baseURL: "https://api.minimaxi.com/v1" })) + }) + + it("should use the provided API key for China", () => { + const minimaxApiKey = "test-minimax-api-key" + new MiniMaxHandler({ minimaxApiKey, minimaxBaseUrl: "https://api.minimaxi.com/v1" }) + expect(OpenAI).toHaveBeenCalledWith(expect.objectContaining({ apiKey: minimaxApiKey })) + }) + + it("should return default model when no model is specified", () => { + const model = handler.getModel() + expect(model.id).toBe(minimaxDefaultModelId) + expect(model.info).toEqual(minimaxModels[minimaxDefaultModelId]) + }) + }) + + describe("Default behavior", () => { + it("should default to international base URL when none is specified", () => { + const handlerDefault = new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" }) + expect(OpenAI).toHaveBeenCalledWith( + expect.objectContaining({ + baseURL: "https://api.minimax.io/v1", + }), + ) + + const model = handlerDefault.getModel() + expect(model.id).toBe(minimaxDefaultModelId) + expect(model.info).toEqual(minimaxModels[minimaxDefaultModelId]) + }) + + it("should default to MiniMax-M2 model", () => { + const handlerDefault = new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" }) + const model = handlerDefault.getModel() + expect(model.id).toBe("MiniMax-M2") + }) + }) + + describe("API Methods", () => { + beforeEach(() => { + handler = new MiniMaxHandler({ minimaxApiKey: "test-minimax-api-key" }) + }) + + it("completePrompt method should return text from MiniMax API", async () => { + const expectedResponse = "This is a test response from MiniMax" + mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: expectedResponse } }] }) + const result = await handler.completePrompt("test prompt") + expect(result).toBe(expectedResponse) + }) + + it("should handle errors in completePrompt", async () => { + const errorMessage = "MiniMax API error" + mockCreate.mockRejectedValueOnce(new Error(errorMessage)) + await expect(handler.completePrompt("test prompt")).rejects.toThrow() + }) + + it("createMessage should yield text content from stream", async () => { + const testContent = "This is test content from MiniMax stream" + + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + next: vitest + .fn() + .mockResolvedValueOnce({ + done: false, + value: { choices: [{ delta: { content: testContent } }] }, + }) + .mockResolvedValueOnce({ done: true }), + }), + } + }) + + const stream = handler.createMessage("system prompt", []) + const firstChunk = await stream.next() + + expect(firstChunk.done).toBe(false) + expect(firstChunk.value).toEqual({ type: "text", text: testContent }) + }) + + it("createMessage should yield usage data from stream", async () => { + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + next: vitest + .fn() + .mockResolvedValueOnce({ + done: false, + value: { + choices: [{ delta: {} }], + usage: { prompt_tokens: 10, completion_tokens: 20 }, + }, + }) + .mockResolvedValueOnce({ done: true }), + }), + } + }) + + const stream = handler.createMessage("system prompt", []) + const firstChunk = await stream.next() + + expect(firstChunk.done).toBe(false) + expect(firstChunk.value).toEqual({ type: "usage", inputTokens: 10, outputTokens: 20 }) + }) + + it("createMessage should pass correct parameters to MiniMax client", async () => { + const modelId: MinimaxModelId = "MiniMax-M2" + const modelInfo = minimaxModels[modelId] + const handlerWithModel = new MiniMaxHandler({ + apiModelId: modelId, + minimaxApiKey: "test-minimax-api-key", + }) + + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), + } + }) + + const systemPrompt = "Test system prompt for MiniMax" + const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message for MiniMax" }] + + const messageGenerator = handlerWithModel.createMessage(systemPrompt, messages) + await messageGenerator.next() + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + model: modelId, + max_tokens: Math.min(modelInfo.maxTokens, Math.ceil(modelInfo.contextWindow * 0.2)), + temperature: 1, + messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]), + stream: true, + stream_options: { include_usage: true }, + }), + undefined, + ) + }) + + it("should use temperature 1 by default", async () => { + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), + } + }) + + const messageGenerator = handler.createMessage("test", []) + await messageGenerator.next() + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + temperature: 1, + }), + undefined, + ) + }) + }) + + describe("Model Configuration", () => { + it("should correctly configure MiniMax-M2 model properties", () => { + const model = minimaxModels["MiniMax-M2"] + expect(model.maxTokens).toBe(16_384) + expect(model.contextWindow).toBe(192_000) + expect(model.supportsImages).toBe(false) + expect(model.supportsPromptCache).toBe(false) + expect(model.inputPrice).toBe(0.3) + expect(model.outputPrice).toBe(1.2) + }) + }) +}) diff --git a/src/api/providers/__tests__/openrouter.spec.ts b/src/api/providers/__tests__/openrouter.spec.ts index 9807986fa97..9976343aac1 100644 --- a/src/api/providers/__tests__/openrouter.spec.ts +++ b/src/api/providers/__tests__/openrouter.spec.ts @@ -27,7 +27,6 @@ vitest.mock("../fetchers/modelCache", () => ({ cacheReadsPrice: 0.3, description: "Claude 3.7 Sonnet", thinking: false, - supportsComputerUse: true, }, "anthropic/claude-3.7-sonnet:thinking": { maxTokens: 128000, @@ -39,7 +38,6 @@ vitest.mock("../fetchers/modelCache", () => ({ cacheWritesPrice: 3.75, cacheReadsPrice: 0.3, description: "Claude 3.7 Sonnet with thinking", - supportsComputerUse: true, }, }) }), diff --git a/src/api/providers/__tests__/ovhcloud.spec.ts b/src/api/providers/__tests__/ovhcloud.spec.ts index 962002ef224..8ffce520c4b 100644 --- a/src/api/providers/__tests__/ovhcloud.spec.ts +++ b/src/api/providers/__tests__/ovhcloud.spec.ts @@ -180,7 +180,7 @@ describe("OVHcloudAIEndpointsHandler", () => { type: "usage", inputTokens: 15, outputTokens: 25, - totalCost: calculateApiCostOpenAI(info, 15, 25), + totalCost: calculateApiCostOpenAI(info, 15, 25).totalCost || undefined, }) }) diff --git a/src/api/providers/__tests__/requesty.spec.ts b/src/api/providers/__tests__/requesty.spec.ts index a4eef6c3e45..a82a34dd372 100644 --- a/src/api/providers/__tests__/requesty.spec.ts +++ b/src/api/providers/__tests__/requesty.spec.ts @@ -31,7 +31,6 @@ vitest.mock("../fetchers/modelCache", () => ({ contextWindow: 200000, supportsImages: true, supportsPromptCache: true, - supportsComputerUse: true, inputPrice: 3, outputPrice: 15, cacheWritesPrice: 3.75, @@ -94,7 +93,6 @@ describe("RequestyHandler", () => { contextWindow: 200000, supportsImages: true, supportsPromptCache: true, - supportsComputerUse: true, inputPrice: 3, outputPrice: 15, cacheWritesPrice: 3.75, @@ -115,7 +113,6 @@ describe("RequestyHandler", () => { contextWindow: 200000, supportsImages: true, supportsPromptCache: true, - supportsComputerUse: true, inputPrice: 3, outputPrice: 15, cacheWritesPrice: 3.75, diff --git a/src/api/providers/__tests__/roo.spec.ts b/src/api/providers/__tests__/roo.spec.ts index d4affa2beaf..7555a49d498 100644 --- a/src/api/providers/__tests__/roo.spec.ts +++ b/src/api/providers/__tests__/roo.spec.ts @@ -1,7 +1,7 @@ // npx vitest run api/providers/__tests__/roo.spec.ts import { Anthropic } from "@anthropic-ai/sdk" -import { rooDefaultModelId, rooModels } from "@roo-code/types" +import { rooDefaultModelId } from "@roo-code/types" import { ApiHandlerOptions } from "../../../shared/api" @@ -86,6 +86,28 @@ vitest.mock("../../../i18n", () => ({ }), })) +// Mock model cache +vitest.mock("../../providers/fetchers/modelCache", () => ({ + getModels: vitest.fn(), + flushModels: vitest.fn(), + getModelsFromCache: vitest.fn((provider: string) => { + if (provider === "roo") { + return { + "xai/grok-code-fast-1": { + maxTokens: 16_384, + contextWindow: 262_144, + supportsImages: false, + supportsReasoningEffort: true, // Enable reasoning for tests + supportsPromptCache: true, + inputPrice: 0, + outputPrice: 0, + }, + } + } + return {} + }), +})) + // Import after mocks are set up import { RooHandler } from "../roo" import { CloudService } from "@roo-code/cloud" @@ -160,6 +182,21 @@ describe("RooHandler", () => { handler = new RooHandler(mockOptions) }) + it("should update API key before making request", async () => { + // Set up a fresh token that will be returned when createMessage is called + const freshToken = "fresh-session-token" + mockGetSessionTokenFn.mockReturnValue(freshToken) + + const stream = handler.createMessage(systemPrompt, messages) + // Consume the stream to trigger the API call + for await (const _chunk of stream) { + // Just consume + } + + // Verify getSessionToken was called to get the fresh token + expect(mockGetSessionTokenFn).toHaveBeenCalled() + }) + it("should handle streaming responses", async () => { const stream = handler.createMessage(systemPrompt, messages) const chunks: any[] = [] @@ -268,6 +305,25 @@ describe("RooHandler", () => { }) }) + it("should update API key before making request", async () => { + // Set up a fresh token that will be returned when completePrompt is called + const freshToken = "fresh-session-token" + mockGetSessionTokenFn.mockReturnValue(freshToken) + + // Access the client's apiKey property to verify it gets updated + const clientApiKeyGetter = vitest.fn() + Object.defineProperty(handler["client"], "apiKey", { + get: clientApiKeyGetter, + set: vitest.fn(), + configurable: true, + }) + + await handler.completePrompt("Test prompt") + + // Verify getSessionToken was called to get the fresh token + expect(mockGetSessionTokenFn).toHaveBeenCalled() + }) + it("should handle API errors", async () => { mockCreate.mockRejectedValueOnce(new Error("API Error")) await expect(handler.completePrompt("Test prompt")).rejects.toThrow( @@ -301,8 +357,9 @@ describe("RooHandler", () => { const modelInfo = handler.getModel() expect(modelInfo.id).toBe(mockOptions.apiModelId) expect(modelInfo.info).toBeDefined() - // xai/grok-code-fast-1 is a valid model in rooModels - expect(modelInfo.info).toBe(rooModels["xai/grok-code-fast-1"]) + // Models are loaded dynamically, so we just verify the structure + expect(modelInfo.info.maxTokens).toBeDefined() + expect(modelInfo.info.contextWindow).toBeDefined() }) it("should return default model when no model specified", () => { @@ -310,7 +367,9 @@ describe("RooHandler", () => { const modelInfo = handlerWithoutModel.getModel() expect(modelInfo.id).toBe(rooDefaultModelId) expect(modelInfo.info).toBeDefined() - expect(modelInfo.info).toBe(rooModels[rooDefaultModelId]) + // Models are loaded dynamically + expect(modelInfo.info.maxTokens).toBeDefined() + expect(modelInfo.info.contextWindow).toBeDefined() }) it("should handle unknown model ID with fallback info", () => { @@ -320,24 +379,27 @@ describe("RooHandler", () => { const modelInfo = handlerWithUnknownModel.getModel() expect(modelInfo.id).toBe("unknown-model-id") expect(modelInfo.info).toBeDefined() - // Should return fallback info for unknown models - expect(modelInfo.info.maxTokens).toBe(16_384) - expect(modelInfo.info.contextWindow).toBe(262_144) - expect(modelInfo.info.supportsImages).toBe(false) - expect(modelInfo.info.supportsPromptCache).toBe(true) - expect(modelInfo.info.inputPrice).toBe(0) - expect(modelInfo.info.outputPrice).toBe(0) + // Should return fallback info for unknown models (dynamic models will be merged in real usage) + expect(modelInfo.info.maxTokens).toBeDefined() + expect(modelInfo.info.contextWindow).toBeDefined() + expect(modelInfo.info.supportsImages).toBeDefined() + expect(modelInfo.info.supportsPromptCache).toBeDefined() + expect(modelInfo.info.inputPrice).toBeDefined() + expect(modelInfo.info.outputPrice).toBeDefined() }) - it("should return correct model info for all Roo models", () => { - // Test each model in rooModels - const modelIds = Object.keys(rooModels) as Array + it("should handle any model ID since models are loaded dynamically", () => { + // Test with various model IDs - they should all work since models are loaded dynamically + const testModelIds = ["xai/grok-code-fast-1", "roo/sonic", "deepseek/deepseek-chat-v3.1"] - for (const modelId of modelIds) { + for (const modelId of testModelIds) { const handlerWithModel = new RooHandler({ apiModelId: modelId }) const modelInfo = handlerWithModel.getModel() expect(modelInfo.id).toBe(modelId) - expect(modelInfo.info).toBe(rooModels[modelId]) + expect(modelInfo.info).toBeDefined() + // Verify the structure has required fields + expect(modelInfo.info.maxTokens).toBeDefined() + expect(modelInfo.info.contextWindow).toBeDefined() } }) }) @@ -440,4 +502,132 @@ describe("RooHandler", () => { expect(handler).toBeInstanceOf(RooHandler) }) }) + + describe("reasoning effort support", () => { + it("should include reasoning with enabled: false when not enabled", async () => { + handler = new RooHandler(mockOptions) + const stream = handler.createMessage(systemPrompt, messages) + for await (const _chunk of stream) { + // Consume stream + } + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + model: mockOptions.apiModelId, + messages: expect.any(Array), + stream: true, + stream_options: { include_usage: true }, + reasoning: { enabled: false }, + }), + undefined, + ) + }) + + it("should include reasoning with enabled: false when explicitly disabled", async () => { + handler = new RooHandler({ + ...mockOptions, + enableReasoningEffort: false, + }) + const stream = handler.createMessage(systemPrompt, messages) + for await (const _chunk of stream) { + // Consume stream + } + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + reasoning: { enabled: false }, + }), + undefined, + ) + }) + + it("should include reasoning with enabled: true and effort: low", async () => { + handler = new RooHandler({ + ...mockOptions, + reasoningEffort: "low", + }) + const stream = handler.createMessage(systemPrompt, messages) + for await (const _chunk of stream) { + // Consume stream + } + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + reasoning: { enabled: true, effort: "low" }, + }), + undefined, + ) + }) + + it("should include reasoning with enabled: true and effort: medium", async () => { + handler = new RooHandler({ + ...mockOptions, + reasoningEffort: "medium", + }) + const stream = handler.createMessage(systemPrompt, messages) + for await (const _chunk of stream) { + // Consume stream + } + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + reasoning: { enabled: true, effort: "medium" }, + }), + undefined, + ) + }) + + it("should include reasoning with enabled: true and effort: high", async () => { + handler = new RooHandler({ + ...mockOptions, + reasoningEffort: "high", + }) + const stream = handler.createMessage(systemPrompt, messages) + for await (const _chunk of stream) { + // Consume stream + } + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + reasoning: { enabled: true, effort: "high" }, + }), + undefined, + ) + }) + + it("should not include reasoning for minimal (treated as none)", async () => { + handler = new RooHandler({ + ...mockOptions, + reasoningEffort: "minimal", + }) + const stream = handler.createMessage(systemPrompt, messages) + for await (const _chunk of stream) { + // Consume stream + } + + // minimal should result in no reasoning parameter + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs.reasoning).toBeUndefined() + }) + + it("should handle enableReasoningEffort: false overriding reasoningEffort setting", async () => { + handler = new RooHandler({ + ...mockOptions, + enableReasoningEffort: false, + reasoningEffort: "high", + }) + const stream = handler.createMessage(systemPrompt, messages) + for await (const _chunk of stream) { + // Consume stream + } + + // When explicitly disabled, should send enabled: false regardless of effort setting + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + reasoning: { enabled: false }, + }), + undefined, + ) + }) + }) }) diff --git a/src/api/providers/__tests__/synthetic.spec.ts b/src/api/providers/__tests__/synthetic.spec.ts index b0ff6ed4c2b..13d7b8e6202 100644 --- a/src/api/providers/__tests__/synthetic.spec.ts +++ b/src/api/providers/__tests__/synthetic.spec.ts @@ -201,7 +201,7 @@ describe("SyntheticHandler", () => { expect(mockCreate).toHaveBeenCalledWith( expect.objectContaining({ model: modelId, - max_tokens: modelInfo.maxTokens, + max_tokens: 0.2 * modelInfo.maxTokens, temperature: 0.5, messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]), stream: true, diff --git a/src/api/providers/__tests__/unbound.spec.ts b/src/api/providers/__tests__/unbound.spec.ts index b81b74626fe..d2716928e58 100644 --- a/src/api/providers/__tests__/unbound.spec.ts +++ b/src/api/providers/__tests__/unbound.spec.ts @@ -21,7 +21,6 @@ vitest.mock("../fetchers/modelCache", () => ({ cacheReadsPrice: 0.3, description: "Claude 3.5 Sonnet", thinking: false, - supportsComputerUse: true, }, "anthropic/claude-3-7-sonnet-20250219": { maxTokens: 8192, @@ -34,7 +33,6 @@ vitest.mock("../fetchers/modelCache", () => ({ cacheReadsPrice: 0.3, description: "Claude 3.7 Sonnet", thinking: false, - supportsComputerUse: true, }, "openai/gpt-4o": { maxTokens: 4096, diff --git a/src/api/providers/__tests__/vercel-ai-gateway.spec.ts b/src/api/providers/__tests__/vercel-ai-gateway.spec.ts index 46007f687cf..e567c6e4e02 100644 --- a/src/api/providers/__tests__/vercel-ai-gateway.spec.ts +++ b/src/api/providers/__tests__/vercel-ai-gateway.spec.ts @@ -26,7 +26,6 @@ vitest.mock("../fetchers/modelCache", () => ({ cacheWritesPrice: 3.75, cacheReadsPrice: 0.3, description: "Claude Sonnet 4", - supportsComputerUse: true, }, "anthropic/claude-3.5-haiku": { maxTokens: 32000, @@ -38,7 +37,6 @@ vitest.mock("../fetchers/modelCache", () => ({ cacheWritesPrice: 1.25, cacheReadsPrice: 0.1, description: "Claude 3.5 Haiku", - supportsComputerUse: false, }, "openai/gpt-4o": { maxTokens: 16000, @@ -50,7 +48,6 @@ vitest.mock("../fetchers/modelCache", () => ({ cacheWritesPrice: 3.125, cacheReadsPrice: 0.25, description: "GPT-4o", - supportsComputerUse: true, }, }) }), @@ -117,7 +114,6 @@ describe("VercelAiGatewayHandler", () => { expect(result.info.contextWindow).toBe(200000) expect(result.info.supportsImages).toBe(true) expect(result.info.supportsPromptCache).toBe(true) - expect(result.info.supportsComputerUse).toBe(true) }) it("returns default model info when options are not provided", async () => { diff --git a/src/api/providers/__tests__/zai.spec.ts b/src/api/providers/__tests__/zai.spec.ts index b14dc604ee3..3e377a86545 100644 --- a/src/api/providers/__tests__/zai.spec.ts +++ b/src/api/providers/__tests__/zai.spec.ts @@ -89,7 +89,22 @@ describe("ZAiHandler", () => { const model = handlerWithModel.getModel() expect(model.id).toBe(testModelId) expect(model.info).toEqual(internationalZAiModels[testModelId]) - expect(model.info.contextWindow).toBe(204_800) + expect(model.info.contextWindow).toBe(200_000) + }) + + it("should return GLM-4.5v international model with vision support", () => { + const testModelId: InternationalZAiModelId = "glm-4.5v" + const handlerWithModel = new ZAiHandler({ + apiModelId: testModelId, + zaiApiKey: "test-zai-api-key", + zaiApiLine: "international_coding", + }) + const model = handlerWithModel.getModel() + expect(model.id).toBe(testModelId) + expect(model.info).toEqual(internationalZAiModels[testModelId]) + expect(model.info.supportsImages).toBe(true) + expect(model.info.maxTokens).toBe(16_384) + expect(model.info.contextWindow).toBe(131_072) }) }) @@ -141,6 +156,21 @@ describe("ZAiHandler", () => { expect(model.info).toEqual(mainlandZAiModels[testModelId]) expect(model.info.contextWindow).toBe(204_800) }) + + it("should return GLM-4.5v China model with vision support", () => { + const testModelId: MainlandZAiModelId = "glm-4.5v" + const handlerWithModel = new ZAiHandler({ + apiModelId: testModelId, + zaiApiKey: "test-zai-api-key", + zaiApiLine: "china_coding", + }) + const model = handlerWithModel.getModel() + expect(model.id).toBe(testModelId) + expect(model.info).toEqual(mainlandZAiModels[testModelId]) + expect(model.info.supportsImages).toBe(true) + expect(model.info.maxTokens).toBe(16_384) + expect(model.info.contextWindow).toBe(131_072) + }) }) describe("Default behavior", () => { @@ -257,10 +287,13 @@ describe("ZAiHandler", () => { const messageGenerator = handlerWithModel.createMessage(systemPrompt, messages) await messageGenerator.next() + // Centralized 20% cap should apply to OpenAI-compatible providers like Z AI + const expectedMaxTokens = Math.min(modelInfo.maxTokens, Math.ceil(modelInfo.contextWindow * 0.2)) + expect(mockCreate).toHaveBeenCalledWith( expect.objectContaining({ model: modelId, - max_tokens: modelInfo.maxTokens, + max_tokens: expectedMaxTokens, temperature: ZAI_DEFAULT_TEMPERATURE, messages: expect.arrayContaining([{ role: "system", content: systemPrompt }]), stream: true, @@ -269,5 +302,143 @@ describe("ZAiHandler", () => { undefined, ) }) + + describe("Reasoning functionality", () => { + it("should include thinking parameter when enableReasoningEffort is true and model supports reasoning in createMessage", async () => { + const handlerWithReasoning = new ZAiHandler({ + apiModelId: "glm-4.6", // GLM-4.6 has supportsReasoningBinary: true + zaiApiKey: "test-zai-api-key", + zaiApiLine: "international_coding", + enableReasoningEffort: true, + }) + + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), + } + }) + + const systemPrompt = "Test system prompt" + const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message" }] + + const messageGenerator = handlerWithReasoning.createMessage(systemPrompt, messages) + await messageGenerator.next() + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + thinking: { type: "enabled" }, + }), + undefined, + ) + }) + + it("should not include thinking parameter when enableReasoningEffort is false in createMessage", async () => { + const handlerWithoutReasoning = new ZAiHandler({ + apiModelId: "glm-4.6", // GLM-4.6 has supportsReasoningBinary: true + zaiApiKey: "test-zai-api-key", + zaiApiLine: "international_coding", + enableReasoningEffort: false, + }) + + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), + } + }) + + const systemPrompt = "Test system prompt" + const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message" }] + + const messageGenerator = handlerWithoutReasoning.createMessage(systemPrompt, messages) + await messageGenerator.next() + + expect(mockCreate).toHaveBeenCalledWith( + expect.not.objectContaining({ + thinking: expect.anything(), + }), + undefined, + ) + }) + + it("should not include thinking parameter when model does not support reasoning in createMessage", async () => { + const handlerWithNonReasoningModel = new ZAiHandler({ + apiModelId: "glm-4-32b-0414-128k", // This model doesn't have supportsReasoningBinary: true + zaiApiKey: "test-zai-api-key", + zaiApiLine: "international_coding", + enableReasoningEffort: true, + }) + + mockCreate.mockImplementationOnce(() => { + return { + [Symbol.asyncIterator]: () => ({ + async next() { + return { done: true } + }, + }), + } + }) + + const systemPrompt = "Test system prompt" + const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Test message" }] + + const messageGenerator = handlerWithNonReasoningModel.createMessage(systemPrompt, messages) + await messageGenerator.next() + + expect(mockCreate).toHaveBeenCalledWith( + expect.not.objectContaining({ + thinking: expect.anything(), + }), + undefined, + ) + }) + + it("should include thinking parameter when enableReasoningEffort is true and model supports reasoning in completePrompt", async () => { + const handlerWithReasoning = new ZAiHandler({ + apiModelId: "glm-4.5", // GLM-4.5 has supportsReasoningBinary: true + zaiApiKey: "test-zai-api-key", + zaiApiLine: "international_coding", + enableReasoningEffort: true, + }) + + const expectedResponse = "This is a test response" + mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: expectedResponse } }] }) + + await handlerWithReasoning.completePrompt("test prompt") + + expect(mockCreate).toHaveBeenCalledWith( + expect.objectContaining({ + thinking: { type: "enabled" }, + }), + ) + }) + + it("should not include thinking parameter when enableReasoningEffort is false in completePrompt", async () => { + const handlerWithoutReasoning = new ZAiHandler({ + apiModelId: "glm-4.5", // GLM-4.5 has supportsReasoningBinary: true + zaiApiKey: "test-zai-api-key", + zaiApiLine: "international_coding", + enableReasoningEffort: false, + }) + + const expectedResponse = "This is a test response" + mockCreate.mockResolvedValueOnce({ choices: [{ message: { content: expectedResponse } }] }) + + await handlerWithoutReasoning.completePrompt("test prompt") + + expect(mockCreate).toHaveBeenCalledWith( + expect.not.objectContaining({ + thinking: expect.anything(), + }), + ) + }) + }) }) }) diff --git a/src/api/providers/anthropic.ts b/src/api/providers/anthropic.ts index b2e158eca53..0e767ce2379 100644 --- a/src/api/providers/anthropic.ts +++ b/src/api/providers/anthropic.ts @@ -230,17 +230,19 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa } if (inputTokens > 0 || outputTokens > 0 || cacheWriteTokens > 0 || cacheReadTokens > 0) { + const { totalCost } = calculateApiCostAnthropic( + this.getModel().info, + inputTokens, + outputTokens, + cacheWriteTokens, + cacheReadTokens, + ) + yield { type: "usage", inputTokens: 0, outputTokens: 0, - totalCost: calculateApiCostAnthropic( - this.getModel().info, - inputTokens, - outputTokens, - cacheWriteTokens, - cacheReadTokens, - ), + totalCost, } } } diff --git a/src/api/providers/base-openai-compatible-provider.ts b/src/api/providers/base-openai-compatible-provider.ts index c62a26227ac..20140e90bec 100644 --- a/src/api/providers/base-openai-compatible-provider.ts +++ b/src/api/providers/base-openai-compatible-provider.ts @@ -6,7 +6,8 @@ import { type ModelInfo, } from "@roo-code/types" -import type { ApiHandlerOptions } from "../../shared/api" +import { type ApiHandlerOptions, getModelMaxOutputTokens } from "../../shared/api" +import { XmlMatcher } from "../../utils/xml-matcher" import { ApiStream } from "../transform/stream" import { convertToOpenAiMessages } from "../transform/openai-format" @@ -81,10 +82,16 @@ export abstract class BaseOpenAiCompatibleProvider metadata?: ApiHandlerCreateMessageMetadata, requestOptions?: OpenAI.RequestOptions, ) { - const { - id: model, - info: { maxTokens: max_tokens }, - } = this.getModel() + const { id: model, info } = this.getModel() + + // Centralized cap: clamp to 20% of the context window (unless provider-specific exceptions apply) + const max_tokens = + getModelMaxOutputTokens({ + modelId: model, + model: info, + settings: this.options, + format: "openai", + }) ?? undefined const temperature = this.options.modelTemperature ?? this.defaultTemperature @@ -114,6 +121,15 @@ export abstract class BaseOpenAiCompatibleProvider ): ApiStream { const stream = await this.createStream(systemPrompt, messages, metadata) + const matcher = new XmlMatcher( + "think", + (chunk) => + ({ + type: chunk.matched ? "reasoning" : "text", + text: chunk.data, + }) as const, + ) + for await (const chunk of stream) { verifyFinishReason(chunk.choices[0]) // kilocode_change const delta = chunk.choices[0]?.delta @@ -121,9 +137,15 @@ export abstract class BaseOpenAiCompatibleProvider yield* processNativeToolCallsFromDelta(delta, getActiveToolUseStyle(this.options)) // kilocode_change if (delta?.content) { - yield { - type: "text", - text: delta.content, + for (const processedChunk of matcher.update(delta.content)) { + yield processedChunk + } + } + + if (delta && "reasoning_content" in delta) { + const reasoning_content = (delta.reasoning_content as string | undefined) || "" + if (reasoning_content?.trim()) { + yield { type: "reasoning", text: reasoning_content } } } @@ -135,6 +157,11 @@ export abstract class BaseOpenAiCompatibleProvider } } } + + // Process any remaining content + for (const processedChunk of matcher.final()) { + yield processedChunk + } } async completePrompt(prompt: string): Promise { diff --git a/src/api/providers/cerebras.ts b/src/api/providers/cerebras.ts index a0421844e81..16dfa282adb 100644 --- a/src/api/providers/cerebras.ts +++ b/src/api/providers/cerebras.ts @@ -331,6 +331,7 @@ export class CerebrasHandler extends BaseProvider implements SingleCompletionHan const { info } = this.getModel() // Use actual token usage from the last request const { inputTokens, outputTokens } = this.lastUsage - return calculateApiCostOpenAI(info, inputTokens, outputTokens) + const { totalCost } = calculateApiCostOpenAI(info, inputTokens, outputTokens) + return totalCost } } diff --git a/src/api/providers/chutes.ts b/src/api/providers/chutes.ts index 11a4ae18faa..e232d8718f1 100644 --- a/src/api/providers/chutes.ts +++ b/src/api/providers/chutes.ts @@ -1,33 +1,33 @@ import { DEEP_SEEK_DEFAULT_TEMPERATURE, - type ChutesModelId, chutesDefaultModelId, - chutesModels, + chutesDefaultModelInfo, getActiveToolUseStyle, // kilocode_change } from "@roo-code/types" import { Anthropic } from "@anthropic-ai/sdk" import OpenAI from "openai" import type { ApiHandlerOptions } from "../../shared/api" +import { getModelMaxOutputTokens } from "../../shared/api" import { XmlMatcher } from "../../utils/xml-matcher" import { convertToR1Format } from "../transform/r1-format" import { convertToOpenAiMessages } from "../transform/openai-format" import { ApiStream } from "../transform/stream" +import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" -import { BaseOpenAiCompatibleProvider } from "./base-openai-compatible-provider" +import { RouterProvider } from "./router-provider" import { addNativeToolCallsToParams, processNativeToolCallsFromDelta } from "./kilocode/nativeToolCallHelpers" -import { ApiHandlerCreateMessageMetadata } from ".." // kilocode_change -export class ChutesHandler extends BaseOpenAiCompatibleProvider { +export class ChutesHandler extends RouterProvider implements SingleCompletionHandler { constructor(options: ApiHandlerOptions) { super({ - ...options, - providerName: "Chutes", + options, + name: "chutes", baseURL: "https://llm.chutes.ai/v1", apiKey: options.chutesApiKey, - defaultProviderModelId: chutesDefaultModelId, - providerModels: chutesModels, - defaultTemperature: 0.5, + modelId: options.apiModelId, + defaultModelId: chutesDefaultModelId, + defaultModelInfo: chutesDefaultModelInfo, }) } @@ -36,35 +36,41 @@ export class ChutesHandler extends BaseOpenAiCompatibleProvider { messages: Anthropic.Messages.MessageParam[], metadata?: ApiHandlerCreateMessageMetadata, // kilocode_change ): OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming { - const { - id: model, - info: { maxTokens: max_tokens }, - } = this.getModel() + const { id: model, info } = this.getModel() - const temperature = this.options.modelTemperature ?? this.getModel().info.temperature + // Centralized cap: clamp to 20% of the context window (unless provider-specific exceptions apply) + const max_tokens = + getModelMaxOutputTokens({ + modelId: model, + model: info, + settings: this.options, + format: "openai", + }) ?? undefined - // kilocode_change start: addNativeToolCallsToParams - return addNativeToolCallsToParams( - { - model, - max_tokens, - temperature, - messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)], - stream: true, - stream_options: { include_usage: true }, - }, - this.options, - metadata, - ) - // kilocode_change end + const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { + model, + max_tokens, + messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)], + stream: true, + stream_options: { include_usage: true }, + } + + // Only add temperature if model supports it + if (this.supportsTemperature(model)) { + params.temperature = this.options.modelTemperature ?? info.temperature + } + + addNativeToolCallsToParams(params, this.options, metadata) // kilocode_change + + return params } override async *createMessage( systemPrompt: string, messages: Anthropic.Messages.MessageParam[], - metadata?: ApiHandlerCreateMessageMetadata, // kilocode_change + metadata?: ApiHandlerCreateMessageMetadata, ): ApiStream { - const model = this.getModel() + const model = await this.fetchModel() if (model.id.includes("DeepSeek-R1")) { const stream = await this.client.chat.completions.create({ @@ -110,11 +116,73 @@ export class ChutesHandler extends BaseOpenAiCompatibleProvider { yield processedChunk } } else { - yield* super.createMessage( - systemPrompt, - messages, - metadata, // kilocode_change + // For non-DeepSeek-R1 models, use standard OpenAI streaming + const stream = await this.client.chat.completions.create( + this.getCompletionParams( + systemPrompt, + messages, + metadata, // kilocode_change + ), ) + + for await (const chunk of stream) { + const delta = chunk.choices[0]?.delta + + yield* processNativeToolCallsFromDelta(delta, getActiveToolUseStyle(this.options)) // kilocode_change + + if (delta?.content) { + yield { type: "text", text: delta.content } + } + + if (delta && "reasoning_content" in delta && delta.reasoning_content) { + yield { type: "reasoning", text: (delta.reasoning_content as string | undefined) || "" } + } + + if (chunk.usage) { + yield { + type: "usage", + inputTokens: chunk.usage.prompt_tokens || 0, + outputTokens: chunk.usage.completion_tokens || 0, + } + } + } + } + } + + async completePrompt(prompt: string): Promise { + const model = await this.fetchModel() + const { id: modelId, info } = model + + try { + // Centralized cap: clamp to 20% of the context window (unless provider-specific exceptions apply) + const max_tokens = + getModelMaxOutputTokens({ + modelId, + model: info, + settings: this.options, + format: "openai", + }) ?? undefined + + const requestParams: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { + model: modelId, + messages: [{ role: "user", content: prompt }], + max_tokens, + } + + // Only add temperature if model supports it + if (this.supportsTemperature(modelId)) { + const isDeepSeekR1 = modelId.includes("DeepSeek-R1") + const defaultTemperature = isDeepSeekR1 ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0.5 + requestParams.temperature = this.options.modelTemperature ?? defaultTemperature + } + + const response = await this.client.chat.completions.create(requestParams) + return response.choices[0]?.message.content || "" + } catch (error) { + if (error instanceof Error) { + throw new Error(`Chutes completion error: ${error.message}`) + } + throw error } } @@ -125,7 +193,7 @@ export class ChutesHandler extends BaseOpenAiCompatibleProvider { ...model, info: { ...model.info, - temperature: isDeepSeekR1 ? DEEP_SEEK_DEFAULT_TEMPERATURE : this.defaultTemperature, + temperature: isDeepSeekR1 ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0.5, }, } } diff --git a/src/api/providers/deepinfra.ts b/src/api/providers/deepinfra.ts index 0dce01e3dfd..f7e5b7f0c42 100644 --- a/src/api/providers/deepinfra.ts +++ b/src/api/providers/deepinfra.ts @@ -142,9 +142,9 @@ export class DeepInfraHandler extends RouterProvider implements SingleCompletion const cacheWriteTokens = usage?.prompt_tokens_details?.cache_write_tokens || 0 const cacheReadTokens = usage?.prompt_tokens_details?.cached_tokens || 0 - const totalCost = modelInfo + const { totalCost } = modelInfo ? calculateApiCostOpenAI(modelInfo, inputTokens, outputTokens, cacheWriteTokens, cacheReadTokens) - : 0 + : { totalCost: 0 } return { type: "usage", diff --git a/src/api/providers/fetchers/__tests__/chutes.test.ts b/src/api/providers/fetchers/__tests__/chutes.test.ts deleted file mode 100644 index 367907aa5f1..00000000000 --- a/src/api/providers/fetchers/__tests__/chutes.test.ts +++ /dev/null @@ -1,391 +0,0 @@ -// kilocode_change - file added -// npx vitest run api/providers/fetchers/__tests__/chutes.test.ts - -import { describe, it, expect, vi, beforeEach, afterEach } from "vitest" -import axios from "axios" -import { getChutesModels } from "../chutes" -import type { ChutesModel } from "../chutes" - -// Mock axios -vi.mock("axios") -const mockedAxios = axios as any - -describe("getChutesModels", () => { - let consoleErrorSpy: any - - beforeEach(() => { - consoleErrorSpy = vi.spyOn(console, "error").mockImplementation(() => {}) - }) - - afterEach(() => { - vi.restoreAllMocks() - vi.clearAllMocks() - }) - - it("should successfully fetch and parse models from Chutes.AI API", async () => { - const mockChutesResponse = { - object: "list", - data: [ - { - id: "deepseek-ai/DeepSeek-R1-0528", - root: "deepseek-ai/DeepSeek-R1-0528", - price: { - input: { tao: 1.0, usd: 0.55 }, - output: { tao: 2.0, usd: 2.19 }, - }, - object: "model", - created: 1704067200, - pricing: { prompt: 0.55, completion: 2.19 }, - owned_by: "deepseek", - quantization: "fp8", - max_model_len: 163840, - context_length: 163840, - input_modalities: ["text"], - max_output_length: 163840, - output_modalities: ["text"], - supported_features: ["json_mode", "structured_outputs", "reasoning"], - supported_sampling_parameters: [ - "temperature", - "top_p", - "top_k", - "repetition_penalty", - "frequency_penalty", - "presence_penalty", - "stop", - "seed", - ], - }, - { - id: "zai-org/GLM-4.5V", - root: "zai-org/GLM-4.5V", - price: { - input: { tao: 0.5, usd: 0.08 }, - output: { tao: 1.5, usd: 0.33 }, - }, - object: "model", - created: 1704067200, - pricing: { prompt: 0.08, completion: 0.33 }, - owned_by: "zai-org", - quantization: null, - max_model_len: 131072, - context_length: 131072, - input_modalities: ["text", "image"], - max_output_length: 131072, - output_modalities: ["text"], - supported_features: ["json_mode", "structured_outputs", "vision"], - supported_sampling_parameters: [ - "temperature", - "top_p", - "top_k", - "repetition_penalty", - "frequency_penalty", - "presence_penalty", - "stop", - "seed", - ], - }, - ], - } - - mockedAxios.get.mockResolvedValueOnce({ data: mockChutesResponse }) - - const result = await getChutesModels("test-api-key") - - expect(mockedAxios.get).toHaveBeenCalledWith("https://llm.chutes.ai/v1/models", { - headers: { - "Content-Type": "application/json", - Authorization: "Bearer test-api-key", - }, - timeout: 10000, - }) - - expect(Object.keys(result)).toHaveLength(2) - - // Test DeepSeek R1 model (reasoning model) - const deepseekModel = result["deepseek-ai/DeepSeek-R1-0528"] - expect(deepseekModel).toEqual({ - maxTokens: 32768, - contextWindow: 163840, - supportsImages: false, - supportsPromptCache: true, - supportsComputerUse: false, - inputPrice: 0.55, // $0.55 per 1M tokens - outputPrice: 2.19, // $2.19 per 1M tokens - description: "deepseek-ai/DeepSeek-R1-0528 via Chutes.AI", - supportsReasoningEffort: false, - supportsReasoningBudget: true, - supportedParameters: [], - }) - - // Test GLM vision model - const glmModel = result["zai-org/GLM-4.5V"] - expect(glmModel).toEqual({ - maxTokens: 26215, // 20% of 131072 = 26214.4, rounded up - contextWindow: 131072, - supportsImages: true, - supportsPromptCache: true, - supportsComputerUse: false, - inputPrice: 0.08, // $0.08 per 1M tokens - outputPrice: 0.33, // $0.33 per 1M tokens - description: "zai-org/GLM-4.5V via Chutes.AI", - supportsReasoningEffort: false, - supportsReasoningBudget: false, - supportedParameters: [], - }) - }) - - it("should handle models without USD pricing", async () => { - const mockChutesResponse = { - object: "list", - data: [ - { - id: "test/free-model", - root: "test/free-model", - price: null, // No pricing available - object: "model", - created: 1704067200, - pricing: null, - owned_by: "test", - quantization: "fp8", - max_model_len: 8192, - context_length: 8192, - input_modalities: ["text"], - max_output_length: 8192, - output_modalities: ["text"], - supported_features: ["text"], - supported_sampling_parameters: ["temperature", "top_p", "seed", "stop"], - }, - ], - } - - mockedAxios.get.mockResolvedValueOnce({ data: mockChutesResponse }) - - const result = await getChutesModels() - - const model = result["test/free-model"] - expect(model).toEqual({ - maxTokens: 1639, // 20% of 8192, rounded up - contextWindow: 8192, - supportsImages: false, - supportsPromptCache: true, - supportsComputerUse: false, - inputPrice: 0, // Default pricing when none available - outputPrice: 0, - description: "test/free-model via Chutes.AI", - supportsReasoningEffort: false, - supportsReasoningBudget: false, - supportedParameters: [], - }) - }) - - it("should work without API key", async () => { - const mockChutesResponse = { - object: "list", - data: [], // Empty model list - } - - mockedAxios.get.mockResolvedValueOnce({ data: mockChutesResponse }) - - const result = await getChutesModels() - - expect(mockedAxios.get).toHaveBeenCalledWith("https://llm.chutes.ai/v1/models", { - headers: { - "Content-Type": "application/json", - }, - timeout: 10000, - }) - - expect(result).toEqual({}) - }) - - it("should handle axios network error", async () => { - const networkError = new Error("Network error") - mockedAxios.get.mockRejectedValueOnce(networkError) - - await expect(getChutesModels()).rejects.toThrow("Failed to fetch Chutes.AI models: Network error") - }) - - it("should handle axios timeout error", async () => { - const timeoutError = new Error("Request timeout") as any - timeoutError.isAxiosError = true // Make it look like an axios error - timeoutError.code = "ECONNABORTED" - mockedAxios.get.mockRejectedValueOnce(timeoutError) - - await expect(getChutesModels()).rejects.toThrow("Failed to fetch Chutes.AI models: Request timeout") - }) - - it("should handle axios response error", async () => { - const responseError = new Error("Request failed") - mockedAxios.get.mockRejectedValueOnce(responseError) - - // Should propagate the network error through our generic error handling - await expect(getChutesModels()).rejects.toThrow("Failed to fetch Chutes.AI models: Request failed") - }) - - it("should handle axios no response error", async () => { - const noResponseError = new Error("No response") as any - noResponseError.isAxiosError = true // Make it look like an axios error - noResponseError.request = {} - mockedAxios.get.mockRejectedValueOnce(noResponseError) - - await expect(getChutesModels()).rejects.toThrow("Failed to fetch Chutes.AI models: No response") - }) - - it("should throw error for invalid response schema", async () => { - const invalidResponse = { - object: "invalid", // Wrong object type - data: "not an array", // Invalid data type - } - - mockedAxios.get.mockResolvedValueOnce({ data: invalidResponse }) - - // Schema validation should fail and throw an error with details - await expect(getChutesModels()).rejects.toThrow( - "Chutes.AI API returned invalid response format. This indicates an API contract change.", - ) - - // Error should be logged - expect(consoleErrorSpy).toHaveBeenCalledWith("Chutes.AI models response validation failed:", expect.any(Object)) - }) - - it("should handle models with minimal fields", async () => { - const minimalResponse = { - object: "list", - data: [ - { - // Only required field is id, all others are optional - id: "test/model", - }, - ], - } - - mockedAxios.get.mockResolvedValueOnce({ data: minimalResponse }) - - // With flexible schema, should successfully parse models with minimal fields - const result = await getChutesModels() - expect(result).toHaveProperty("test/model") - expect(result["test/model"]).toHaveProperty("contextWindow", 8192) // Default fallback - expect(result["test/model"]).toHaveProperty("description", "test/model via Chutes.AI") - expect(result["test/model"]).toHaveProperty("maxTokens", 1639) // 20% of 8192 - expect(result["test/model"]).toHaveProperty("inputPrice", 0) // Default when no pricing - expect(result["test/model"]).toHaveProperty("outputPrice", 0) - - // Should not log validation errors with the flexible schema - expect(consoleErrorSpy).not.toHaveBeenCalled() - }) - - it("should cap maxTokens correctly for large context windows", async () => { - const mockChutesResponse = { - object: "list", - data: [ - { - id: "test/large-context", - object: "model", - created: 1704067200, - owned_by: "test", - root: "test/large-context", - parent: null, - pricing: null, - quantization: "fp8", - max_model_len: 512000, - context_length: 512000, - input_modalities: ["text"], - output_modalities: ["text"], - supported_features: { - text: true, - vision: false, - audio: false, - function_calling: false, - json_mode: false, - json_object: false, - seed: true, - temperature: true, - top_p: true, - top_k: false, - repetition_penalty: false, - frequency_penalty: false, - presence_penalty: false, - stop: true, - max_tokens: true, - logit_bias: false, - }, - sampling_params: { - temperature: { min: 0.0, max: 2.0, default: 0.7 }, - top_p: { min: 0.0, max: 1.0, default: 0.95 }, - top_k: { min: 1, max: 100, default: 40 }, - repetition_penalty: { min: 1.0, max: 2.0, default: 1.05 }, - frequency_penalty: { min: -2.0, max: 2.0, default: 0.0 }, - presence_penalty: { min: -2.0, max: 2.0, default: 0.0 }, - }, - }, - ], - } - - mockedAxios.get.mockResolvedValueOnce({ data: mockChutesResponse }) - - const result = await getChutesModels() - - const model = result["test/large-context"] - expect(model.maxTokens).toBe(32768) // Capped at 32k - expect(model.contextWindow).toBe(512000) - }) - - it("should handle zero pricing values correctly", async () => { - const mockChutesResponse = { - object: "list", - data: [ - { - id: "test/zero-price", - object: "model", - created: 1704067200, - owned_by: "test", - root: "test/zero-price", - parent: null, - pricing: { - tao: { input: 0, output: 0 }, - usd: { input: 0, output: 0 }, - }, - quantization: "fp8", - max_model_len: 4096, - context_length: 4096, - input_modalities: ["text"], - output_modalities: ["text"], - supported_features: { - text: true, - vision: false, - audio: false, - function_calling: false, - json_mode: false, - json_object: false, - seed: true, - temperature: true, - top_p: true, - top_k: false, - repetition_penalty: false, - frequency_penalty: false, - presence_penalty: false, - stop: true, - max_tokens: true, - logit_bias: false, - }, - sampling_params: { - temperature: { min: 0.0, max: 2.0, default: 0.7 }, - top_p: { min: 0.0, max: 1.0, default: 0.95 }, - top_k: { min: 1, max: 100, default: 40 }, - repetition_penalty: { min: 1.0, max: 2.0, default: 1.05 }, - frequency_penalty: { min: -2.0, max: 2.0, default: 0.0 }, - presence_penalty: { min: -2.0, max: 2.0, default: 0.0 }, - }, - }, - ], - } - - mockedAxios.get.mockResolvedValueOnce({ data: mockChutesResponse }) - - const result = await getChutesModels() - - const model = result["test/zero-price"] - expect(model.inputPrice).toBe(0) - expect(model.outputPrice).toBe(0) - }) -}) diff --git a/src/api/providers/fetchers/__tests__/litellm.spec.ts b/src/api/providers/fetchers/__tests__/litellm.spec.ts index f3a9d9971ec..a93c21ee1b0 100644 --- a/src/api/providers/fetchers/__tests__/litellm.spec.ts +++ b/src/api/providers/fetchers/__tests__/litellm.spec.ts @@ -221,7 +221,6 @@ describe("getLiteLLMModels", () => { maxTokens: 4096, contextWindow: 200000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: false, inputPrice: 3, outputPrice: 15, @@ -231,7 +230,6 @@ describe("getLiteLLMModels", () => { maxTokens: 8192, contextWindow: 128000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 10, outputPrice: 30, @@ -300,7 +298,6 @@ describe("getLiteLLMModels", () => { maxTokens: 4096, contextWindow: 200000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: false, inputPrice: undefined, outputPrice: undefined, @@ -311,7 +308,6 @@ describe("getLiteLLMModels", () => { maxTokens: 4096, contextWindow: 200000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: undefined, outputPrice: undefined, @@ -446,7 +442,6 @@ describe("getLiteLLMModels", () => { maxTokens: 4096, contextWindow: 200000, supportsImages: true, - supportsComputerUse: true, // Should be true due to fallback supportsPromptCache: false, inputPrice: undefined, outputPrice: undefined, @@ -457,7 +452,6 @@ describe("getLiteLLMModels", () => { maxTokens: 8192, contextWindow: 128000, supportsImages: false, - supportsComputerUse: false, // Should be false as it's not in fallback list supportsPromptCache: false, inputPrice: undefined, outputPrice: undefined, @@ -520,7 +514,6 @@ describe("getLiteLLMModels", () => { maxTokens: 4096, contextWindow: 200000, supportsImages: true, - supportsComputerUse: false, // False because explicitly set to false (fallback ignored) supportsPromptCache: false, inputPrice: undefined, outputPrice: undefined, @@ -531,7 +524,6 @@ describe("getLiteLLMModels", () => { maxTokens: 8192, contextWindow: 128000, supportsImages: false, - supportsComputerUse: true, // True because explicitly set to true supportsPromptCache: false, inputPrice: undefined, outputPrice: undefined, @@ -542,7 +534,6 @@ describe("getLiteLLMModels", () => { maxTokens: 8192, contextWindow: 128000, supportsImages: false, - supportsComputerUse: false, // False because explicitly set to false supportsPromptCache: false, inputPrice: undefined, outputPrice: undefined, @@ -597,9 +588,95 @@ describe("getLiteLLMModels", () => { mockedAxios.get.mockResolvedValue(mockResponse) const result = await getLiteLLMModels("test-api-key", "http://localhost:4000") + }) - expect(result["vertex-claude"].supportsComputerUse).toBe(true) - expect(result["openrouter-claude"].supportsComputerUse).toBe(true) - expect(result["bedrock-claude"].supportsComputerUse).toBe(true) + it("prefers max_output_tokens over max_tokens when both are present", async () => { + const mockResponse = { + data: { + data: [ + { + model_name: "claude-3-5-sonnet-4-5", + model_info: { + max_tokens: 200000, // This should be ignored + max_output_tokens: 64000, // This should be used + max_input_tokens: 200000, + supports_vision: true, + supports_prompt_caching: false, + supports_computer_use: true, + }, + litellm_params: { + model: "anthropic/claude-3-5-sonnet-4-5", + }, + }, + { + model_name: "model-with-only-max-tokens", + model_info: { + max_tokens: 8192, // This should be used as fallback + // No max_output_tokens + max_input_tokens: 128000, + supports_vision: false, + }, + litellm_params: { + model: "test/model-with-only-max-tokens", + }, + }, + { + model_name: "model-with-only-max-output-tokens", + model_info: { + // No max_tokens + max_output_tokens: 16384, // This should be used + max_input_tokens: 100000, + supports_vision: false, + }, + litellm_params: { + model: "test/model-with-only-max-output-tokens", + }, + }, + ], + }, + } + + mockedAxios.get.mockResolvedValue(mockResponse) + + const result = await getLiteLLMModels("test-api-key", "http://localhost:4000") + + // Should use max_output_tokens (64000) instead of max_tokens (200000) + expect(result["claude-3-5-sonnet-4-5"]).toEqual({ + maxTokens: 64000, + contextWindow: 200000, + supportsImages: true, + supportsPromptCache: false, + inputPrice: undefined, + outputPrice: undefined, + cacheWritesPrice: undefined, + cacheReadsPrice: undefined, + description: "claude-3-5-sonnet-4-5 via LiteLLM proxy", + }) + + // Should fall back to max_tokens when max_output_tokens is not present + expect(result["model-with-only-max-tokens"]).toEqual({ + maxTokens: 8192, + contextWindow: 128000, + supportsImages: false, + supportsPromptCache: false, + inputPrice: undefined, + outputPrice: undefined, + cacheWritesPrice: undefined, + cacheReadsPrice: undefined, + description: "model-with-only-max-tokens via LiteLLM proxy", + }) + + // Should use max_output_tokens when max_tokens is not present + expect(result["model-with-only-max-output-tokens"]).toEqual({ + maxTokens: 16384, + contextWindow: 100000, + supportsImages: false, + supportsPromptCache: false, + inputPrice: undefined, + outputPrice: undefined, + cacheWritesPrice: undefined, + cacheReadsPrice: undefined, + description: "model-with-only-max-output-tokens via LiteLLM proxy", + }) }) }) diff --git a/src/api/providers/fetchers/__tests__/lmstudio.test.ts b/src/api/providers/fetchers/__tests__/lmstudio.test.ts index 286ec41d676..a1f06d2e251 100644 --- a/src/api/providers/fetchers/__tests__/lmstudio.test.ts +++ b/src/api/providers/fetchers/__tests__/lmstudio.test.ts @@ -60,7 +60,6 @@ describe("LMStudio Fetcher", () => { contextWindow: rawModel.contextLength, supportsPromptCache: true, supportsImages: rawModel.vision, - supportsComputerUse: false, maxTokens: rawModel.contextLength, inputPrice: 0, outputPrice: 0, diff --git a/src/api/providers/fetchers/__tests__/ollama.test.ts b/src/api/providers/fetchers/__tests__/ollama.test.ts index 5f53facef6e..e9b6a967672 100644 --- a/src/api/providers/fetchers/__tests__/ollama.test.ts +++ b/src/api/providers/fetchers/__tests__/ollama.test.ts @@ -21,7 +21,6 @@ describe("Ollama Fetcher", () => { maxTokens: 4096, // kilocode_change contextWindow: 4096, // kilocode_change supportsImages: false, - supportsComputerUse: false, supportsPromptCache: true, inputPrice: 0, outputPrice: 0, @@ -46,7 +45,6 @@ describe("Ollama Fetcher", () => { maxTokens: 4096, // kilocode_change contextWindow: 4096, // kilocode_change supportsImages: false, - supportsComputerUse: false, supportsPromptCache: true, inputPrice: 0, outputPrice: 0, diff --git a/src/api/providers/fetchers/__tests__/openrouter.spec.ts b/src/api/providers/fetchers/__tests__/openrouter.spec.ts index 0ce8caae6d4..88287e07c40 100644 --- a/src/api/providers/fetchers/__tests__/openrouter.spec.ts +++ b/src/api/providers/fetchers/__tests__/openrouter.spec.ts @@ -4,13 +4,6 @@ import * as path from "path" import { back as nockBack } from "nock" -import { - OPEN_ROUTER_PROMPT_CACHING_MODELS, - OPEN_ROUTER_COMPUTER_USE_MODELS, - OPEN_ROUTER_REASONING_BUDGET_MODELS, - OPEN_ROUTER_REQUIRED_REASONING_BUDGET_MODELS, -} from "@roo-code/types" - import { getOpenRouterModelEndpoints, getOpenRouterModels, parseOpenRouterModel } from "../openrouter" nockBack.fixtures = path.join(__dirname, "fixtures") @@ -24,147 +17,6 @@ describe("OpenRouter API", () => { const models = await getOpenRouterModels() - const openRouterSupportedCaching = Object.entries(models) - .filter(([id, _]) => id.startsWith("anthropic/claude") || id.startsWith("google/gemini")) // only these support cache_control breakpoints (https://openrouter.ai/docs/features/prompt-caching) - .filter(([_, model]) => model.supportsPromptCache) - .map(([id, _]) => id) - - // Define models that are intentionally excluded - const excludedModels = new Set([ - "google/gemini-2.5-pro-preview", // Excluded due to lag issue (#4487) - "google/gemini-2.5-flash", // OpenRouter doesn't report this as supporting prompt caching - "google/gemini-2.5-flash-lite-preview-06-17", // OpenRouter doesn't report this as supporting prompt caching - "anthropic/claude-opus-4.1", // Not yet available in OpenRouter API - "anthropic/claude-sonnet-4.5", // Not yet available in OpenRouter API - ]) - - const ourCachingModels = Array.from(OPEN_ROUTER_PROMPT_CACHING_MODELS).filter( - (id) => !excludedModels.has(id), - ) - - // Verify all our caching models are actually supported by OpenRouter - for (const modelId of ourCachingModels) { - expect(openRouterSupportedCaching).toContain(modelId) - } - - // Verify we have all supported models except intentionally excluded ones - const expectedCachingModels = openRouterSupportedCaching.filter((id) => !excludedModels.has(id)).sort() - - expect(ourCachingModels.sort()).toEqual(expectedCachingModels) - - const excludedComputerUseModels = new Set([ - "anthropic/claude-opus-4.1", // Not yet available in OpenRouter API - "anthropic/claude-sonnet-4.5", // Not yet available in OpenRouter API - ]) - - const expectedComputerUseModels = Array.from(OPEN_ROUTER_COMPUTER_USE_MODELS) - .filter((id) => !excludedComputerUseModels.has(id)) - .sort() - - expect( - Object.entries(models) - .filter(([_, model]) => model.supportsComputerUse) - .map(([id, _]) => id) - .sort(), - ).toEqual(expectedComputerUseModels) - - expect( - Object.entries(models) - .filter(([_, model]) => model.supportsReasoningEffort) - .map(([id, _]) => id) - .sort(), - ).toEqual([ - "agentica-org/deepcoder-14b-preview:free", - "aion-labs/aion-1.0", - "aion-labs/aion-1.0-mini", - "anthropic/claude-3.7-sonnet:beta", - "anthropic/claude-3.7-sonnet:thinking", - "anthropic/claude-opus-4", - // "anthropic/claude-opus-4.1", // Not yet available in OpenRouter API - "anthropic/claude-sonnet-4", - "arliai/qwq-32b-arliai-rpr-v1:free", - "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", - "deepseek/deepseek-r1", - "deepseek/deepseek-r1-distill-llama-70b", - "deepseek/deepseek-r1-distill-llama-70b:free", - "deepseek/deepseek-r1-distill-llama-8b", - "deepseek/deepseek-r1-distill-qwen-1.5b", - "deepseek/deepseek-r1-distill-qwen-14b", - "deepseek/deepseek-r1-distill-qwen-14b:free", - "deepseek/deepseek-r1-distill-qwen-32b", - "deepseek/deepseek-r1-distill-qwen-32b:free", - "deepseek/deepseek-r1-zero:free", - "deepseek/deepseek-r1:free", - "google/gemini-2.5-flash-preview-05-20", - "google/gemini-2.5-flash-preview-05-20:thinking", - "microsoft/mai-ds-r1:free", - "microsoft/phi-4-reasoning-plus", - "microsoft/phi-4-reasoning-plus:free", - "microsoft/phi-4-reasoning:free", - "moonshotai/kimi-vl-a3b-thinking:free", - "nousresearch/deephermes-3-mistral-24b-preview:free", - "open-r1/olympiccoder-32b:free", - "openai/codex-mini", - "openai/o1-pro", - "perplexity/r1-1776", - "perplexity/sonar-deep-research", - "perplexity/sonar-reasoning", - "perplexity/sonar-reasoning-pro", - "qwen/qwen3-14b", - "qwen/qwen3-14b:free", - "qwen/qwen3-235b-a22b", - "qwen/qwen3-235b-a22b:free", - "qwen/qwen3-30b-a3b", - "qwen/qwen3-30b-a3b:free", - "qwen/qwen3-32b", - "qwen/qwen3-32b:free", - "qwen/qwen3-4b:free", - "qwen/qwen3-8b", - "qwen/qwen3-8b:free", - "qwen/qwq-32b", - "qwen/qwq-32b:free", - "rekaai/reka-flash-3:free", - "thudm/glm-z1-32b", - "thudm/glm-z1-32b:free", - "thudm/glm-z1-9b:free", - "thudm/glm-z1-rumination-32b", - "tngtech/deepseek-r1t-chimera:free", - "x-ai/grok-3-mini-beta", - ]) - // OpenRouter is taking a while to update their models, so we exclude some known models - const excludedReasoningBudgetModels = new Set([ - "google/gemini-2.5-flash", - "google/gemini-2.5-flash-lite-preview-06-17", - "google/gemini-2.5-pro", - "anthropic/claude-opus-4.1", // Not yet available in OpenRouter API - "anthropic/claude-sonnet-4.5", // Not yet available in OpenRouter API - "anthropic/claude-haiku-4.5", // Not yet available in OpenRouter API - ]) - - const expectedReasoningBudgetModels = Array.from(OPEN_ROUTER_REASONING_BUDGET_MODELS) - .filter((id) => !excludedReasoningBudgetModels.has(id)) - .sort() - - expect( - Object.entries(models) - .filter(([_, model]) => model.supportsReasoningBudget) - .map(([id, _]) => id) - .sort(), - ).toEqual(expectedReasoningBudgetModels) - - const excludedRequiredReasoningBudgetModels = new Set(["google/gemini-2.5-pro"]) - - const expectedRequiredReasoningBudgetModels = Array.from(OPEN_ROUTER_REQUIRED_REASONING_BUDGET_MODELS) - .filter((id) => !excludedRequiredReasoningBudgetModels.has(id)) - .sort() - - expect( - Object.entries(models) - .filter(([_, model]) => model.requiredReasoningBudget) - .map(([id, _]) => id) - .sort(), - ).toEqual(expectedRequiredReasoningBudgetModels) - expect(models["anthropic/claude-3.7-sonnet"]).toEqual({ maxTokens: 8192, contextWindow: 200000, @@ -176,7 +28,6 @@ describe("OpenRouter API", () => { cacheReadsPrice: 0.3, description: expect.any(String), displayName: expect.any(String), // kilocode_change - supportsComputerUse: true, supportsReasoningBudget: false, supportsReasoningEffort: false, supportedParameters: ["max_tokens", "temperature", "reasoning", "include_reasoning"], @@ -193,7 +44,6 @@ describe("OpenRouter API", () => { cacheReadsPrice: 0.3, description: expect.any(String), displayName: expect.any(String), // kilocode_change - supportsComputerUse: true, supportsReasoningBudget: true, requiredReasoningBudget: true, supportsReasoningEffort: true, diff --git a/src/api/providers/fetchers/__tests__/roo.spec.ts b/src/api/providers/fetchers/__tests__/roo.spec.ts new file mode 100644 index 00000000000..dcc79e941fa --- /dev/null +++ b/src/api/providers/fetchers/__tests__/roo.spec.ts @@ -0,0 +1,477 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest" +import { getRooModels } from "../roo" + +// Mock fetch globally +const mockFetch = vi.fn() +global.fetch = mockFetch as any + +describe("getRooModels", () => { + const baseUrl = "https://api.roocode.com/proxy" + const apiKey = "test-api-key" + + beforeEach(() => { + vi.clearAllMocks() + vi.useFakeTimers() + }) + + afterEach(() => { + vi.useRealTimers() + }) + + it("should fetch and parse models successfully", async () => { + const mockResponse = { + object: "list", + data: [ + { + id: "xai/grok-code-fast-1", + object: "model", + created: 1234567890, + owned_by: "xai", + name: "Grok Code Fast 1", + description: "Fast coding model", + context_window: 262144, + max_tokens: 16384, + type: "language", + tags: ["vision", "reasoning"], + pricing: { + input: "0.0001", + output: "0.0002", + input_cache_read: "0.00005", + input_cache_write: "0.0001", + }, + deprecated: false, + }, + ], + } + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: async () => mockResponse, + }) + + const models = await getRooModels(baseUrl, apiKey) + + expect(mockFetch).toHaveBeenCalledWith( + "https://api.roocode.com/proxy/v1/models", + expect.objectContaining({ + headers: expect.objectContaining({ + "Content-Type": "application/json", + Authorization: `Bearer ${apiKey}`, + }), + }), + ) + + expect(models).toEqual({ + "xai/grok-code-fast-1": { + maxTokens: 16384, + contextWindow: 262144, + supportsImages: true, + supportsReasoningEffort: true, + requiredReasoningEffort: false, + supportsPromptCache: true, + inputPrice: 100, // 0.0001 * 1_000_000 + outputPrice: 200, // 0.0002 * 1_000_000 + cacheWritesPrice: 100, // 0.0001 * 1_000_000 + cacheReadsPrice: 50, // 0.00005 * 1_000_000 + description: "Fast coding model", + deprecated: false, + isFree: false, + }, + }) + }) + + it("should handle reasoning-required tag", async () => { + const mockResponse = { + object: "list", + data: [ + { + id: "test/reasoning-required-model", + object: "model", + created: 1234567890, + owned_by: "test", + name: "Reasoning Required Model", + description: "Model that requires reasoning", + context_window: 128000, + max_tokens: 8192, + type: "language", + tags: ["reasoning", "reasoning-required"], + pricing: { + input: "0.0001", + output: "0.0002", + }, + }, + ], + } + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: async () => mockResponse, + }) + + const models = await getRooModels(baseUrl, apiKey) + + expect(models["test/reasoning-required-model"]).toEqual({ + maxTokens: 8192, + contextWindow: 128000, + supportsImages: false, + supportsReasoningEffort: true, + requiredReasoningEffort: true, + supportsPromptCache: false, + inputPrice: 100, // 0.0001 * 1_000_000 + outputPrice: 200, // 0.0002 * 1_000_000 + cacheWritesPrice: undefined, + cacheReadsPrice: undefined, + description: "Model that requires reasoning", + deprecated: false, + isFree: false, + }) + }) + + it("should handle models without required_reasoning_effort field", async () => { + const mockResponse = { + object: "list", + data: [ + { + id: "test/normal-model", + object: "model", + created: 1234567890, + owned_by: "test", + name: "Normal Model", + description: "Normal model without reasoning", + context_window: 128000, + max_tokens: 8192, + type: "language", + pricing: { + input: "0.0001", + output: "0.0002", + }, + }, + ], + } + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: async () => mockResponse, + }) + + const models = await getRooModels(baseUrl, apiKey) + + expect(models["test/normal-model"]).toEqual({ + maxTokens: 8192, + contextWindow: 128000, + supportsImages: false, + supportsReasoningEffort: false, + requiredReasoningEffort: false, + supportsPromptCache: false, + inputPrice: 100, // 0.0001 * 1_000_000 + outputPrice: 200, // 0.0002 * 1_000_000 + cacheWritesPrice: undefined, + cacheReadsPrice: undefined, + description: "Normal model without reasoning", + deprecated: false, + isFree: false, + }) + }) + + it("should work without API key", async () => { + const mockResponse = { + object: "list", + data: [ + { + id: "test/public-model", + object: "model", + created: 1234567890, + owned_by: "test", + name: "Public Model", + description: "Public model", + context_window: 128000, + max_tokens: 8192, + type: "language", + pricing: { + input: "0.0001", + output: "0.0002", + }, + }, + ], + } + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: async () => mockResponse, + }) + + const models = await getRooModels(baseUrl) + + expect(mockFetch).toHaveBeenCalledWith( + "https://api.roocode.com/proxy/v1/models", + expect.objectContaining({ + headers: expect.not.objectContaining({ + Authorization: expect.anything(), + }), + }), + ) + + expect(models["test/public-model"]).toBeDefined() + }) + + it("should handle HTTP errors", async () => { + mockFetch.mockResolvedValueOnce({ + ok: false, + status: 401, + statusText: "Unauthorized", + }) + + await expect(getRooModels(baseUrl, apiKey)).rejects.toThrow( + "Failed to fetch Roo Code Cloud models: HTTP 401: Unauthorized", + ) + }) + + it("should handle timeout", async () => { + const abortError = new Error("AbortError") + abortError.name = "AbortError" + + mockFetch.mockRejectedValueOnce(abortError) + + await expect(getRooModels(baseUrl, apiKey)).rejects.toThrow( + "Failed to fetch Roo Code Cloud models: Request timed out", + ) + }) + + it("should handle invalid response format", async () => { + const invalidResponse = { + invalid: "data", + } + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: async () => invalidResponse, + }) + + await expect(getRooModels(baseUrl, apiKey)).rejects.toThrow( + "Failed to fetch Roo Code Cloud models: Unexpected response format", + ) + }) + + it("should normalize base URL correctly", async () => { + const mockResponse = { + object: "list", + data: [], + } + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: async () => mockResponse, + }) + + await getRooModels("https://api.roocode.com/proxy/v1", apiKey) + + expect(mockFetch).toHaveBeenCalledWith("https://api.roocode.com/proxy/v1/models", expect.any(Object)) + }) + + it("should handle deprecated models", async () => { + const mockResponse = { + object: "list", + data: [ + { + id: "test/deprecated-model", + object: "model", + created: 1234567890, + owned_by: "test", + name: "Deprecated Model", + description: "Old model", + context_window: 128000, + max_tokens: 8192, + type: "language", + pricing: { + input: "0.0001", + output: "0.0002", + }, + deprecated: true, + }, + ], + } + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: async () => mockResponse, + }) + + const models = await getRooModels(baseUrl, apiKey) + + expect(models["test/deprecated-model"].deprecated).toBe(true) + }) + + it("should detect vision support from tags", async () => { + const mockResponse = { + object: "list", + data: [ + { + id: "test/vision-model", + object: "model", + created: 1234567890, + owned_by: "test", + name: "Vision Model", + description: "Model with vision", + context_window: 128000, + max_tokens: 8192, + type: "language", + tags: ["vision"], + pricing: { + input: "0.0001", + output: "0.0002", + }, + }, + ], + } + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: async () => mockResponse, + }) + + const models = await getRooModels(baseUrl, apiKey) + + expect(models["test/vision-model"].supportsImages).toBe(true) + }) + + it("should detect reasoning support from tags", async () => { + const mockResponse = { + object: "list", + data: [ + { + id: "test/reasoning-model", + object: "model", + created: 1234567890, + owned_by: "test", + name: "Reasoning Model", + description: "Model with reasoning", + context_window: 128000, + max_tokens: 8192, + type: "language", + tags: ["reasoning"], + pricing: { + input: "0.0001", + output: "0.0002", + }, + }, + ], + } + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: async () => mockResponse, + }) + + const models = await getRooModels(baseUrl, apiKey) + + expect(models["test/reasoning-model"].supportsReasoningEffort).toBe(true) + }) + + it("should handle models with cache pricing", async () => { + const mockResponse = { + object: "list", + data: [ + { + id: "test/cache-model", + object: "model", + created: 1234567890, + owned_by: "test", + name: "Cache Model", + description: "Model with cache", + context_window: 128000, + max_tokens: 8192, + type: "language", + pricing: { + input: "0.0001", + output: "0.0002", + input_cache_read: "0.00005", + input_cache_write: "0.0001", + }, + }, + ], + } + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: async () => mockResponse, + }) + + const models = await getRooModels(baseUrl, apiKey) + + expect(models["test/cache-model"].supportsPromptCache).toBe(true) + expect(models["test/cache-model"].cacheReadsPrice).toBe(50) // 0.00005 * 1_000_000 + expect(models["test/cache-model"].cacheWritesPrice).toBe(100) // 0.0001 * 1_000_000 + }) + + it("should skip models without ID", async () => { + const mockResponse = { + object: "list", + data: [ + { + id: "", + object: "model", + created: 1234567890, + owned_by: "test", + name: "Invalid Model", + description: "Model without ID", + context_window: 128000, + max_tokens: 8192, + type: "language", + pricing: { + input: "0.0001", + output: "0.0002", + }, + }, + ], + } + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: async () => mockResponse, + }) + + const models = await getRooModels(baseUrl, apiKey) + + expect(Object.keys(models)).toHaveLength(0) + }) + + it("should use model name as description fallback", async () => { + const mockResponse = { + object: "list", + data: [ + { + id: "test/no-description", + object: "model", + created: 1234567890, + owned_by: "test", + name: "Model Name", + description: "", + context_window: 128000, + max_tokens: 8192, + type: "language", + pricing: { + input: "0.0001", + output: "0.0002", + }, + }, + ], + } + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: async () => mockResponse, + }) + + const models = await getRooModels(baseUrl, apiKey) + + expect(models["test/no-description"].description).toBe("Model Name") + }) + + it("should handle network errors", async () => { + mockFetch.mockRejectedValueOnce(new TypeError("Network error")) + + await expect(getRooModels(baseUrl, apiKey)).rejects.toThrow( + "Failed to fetch Roo Code Cloud models: No response from server", + ) + }) +}) diff --git a/src/api/providers/fetchers/__tests__/vercel-ai-gateway.spec.ts b/src/api/providers/fetchers/__tests__/vercel-ai-gateway.spec.ts index 657d335b61b..30ad2f41d5b 100644 --- a/src/api/providers/fetchers/__tests__/vercel-ai-gateway.spec.ts +++ b/src/api/providers/fetchers/__tests__/vercel-ai-gateway.spec.ts @@ -176,7 +176,6 @@ describe("Vercel AI Gateway Fetchers", () => { maxTokens: 8000, contextWindow: 100000, supportsImages: false, - supportsComputerUse: false, supportsPromptCache: false, inputPrice: 2500000, outputPrice: 10000000, @@ -222,7 +221,6 @@ describe("Vercel AI Gateway Fetchers", () => { }) expect(result.supportsImages).toBe(VERCEL_AI_GATEWAY_VISION_ONLY_MODELS.has("anthropic/claude-3.5-haiku")) - expect(result.supportsComputerUse).toBe(false) }) it("detects vision and tools models", () => { @@ -240,9 +238,6 @@ describe("Vercel AI Gateway Fetchers", () => { expect(result.supportsImages).toBe( VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS.has("anthropic/claude-sonnet-4"), ) - expect(result.supportsComputerUse).toBe( - VERCEL_AI_GATEWAY_VISION_AND_TOOLS_MODELS.has("anthropic/claude-sonnet-4"), - ) }) it("handles missing cache pricing", () => { @@ -298,7 +293,6 @@ describe("Vercel AI Gateway Fetchers", () => { model: { ...baseModel, id: modelId }, }) expect(result.supportsImages).toBe(true) - expect(result.supportsComputerUse).toBe(false) } }) @@ -309,7 +303,6 @@ describe("Vercel AI Gateway Fetchers", () => { model: { ...baseModel, id: modelId }, }) expect(result.supportsImages).toBe(true) - expect(result.supportsComputerUse).toBe(true) } }) }) diff --git a/src/api/providers/fetchers/chutes.ts b/src/api/providers/fetchers/chutes.ts index 2c32bcaf7f9..c19172537c2 100644 --- a/src/api/providers/fetchers/chutes.ts +++ b/src/api/providers/fetchers/chutes.ts @@ -1,215 +1,60 @@ -// kilocode_change - file added import axios from "axios" import { z } from "zod" -import { type ModelInfo } from "@roo-code/types" +import { type ModelInfo, chutesModels } from "@roo-code/types" -/** - * Chutes.AI individual model schema - flexible to handle API variations - */ -const chutesModelSchema = z.object({ +import { DEFAULT_HEADERS } from "../constants" + +// Chutes models endpoint follows OpenAI /models shape with additional fields +const ChutesModelSchema = z.object({ id: z.string(), - root: z.string().optional(), - price: z - .object({ - input: z.object({ - tao: z.number(), - usd: z.number(), - }), - output: z.object({ - tao: z.number(), - usd: z.number(), - }), - }) - .optional() - .nullable(), - object: z.string().optional(), - created: z.number().optional(), - pricing: z - .object({ - prompt: z.number().optional(), - completion: z.number().optional(), - tao: z - .object({ - input: z.number().optional(), - output: z.number().optional(), - }) - .optional(), - usd: z - .object({ - input: z.number().optional(), - output: z.number().optional(), - }) - .optional(), - }) - .optional() - .nullable(), + object: z.literal("model").optional(), owned_by: z.string().optional(), - quantization: z.string().optional().nullable(), - max_model_len: z.number().optional(), - context_length: z.number().optional(), - input_modalities: z.array(z.string()).optional(), - max_output_length: z.number().optional(), - output_modalities: z.array(z.string()).optional(), - supported_features: z.union([z.array(z.string()), z.record(z.any())]).optional(), - supported_sampling_parameters: z.array(z.string()).optional(), - parent: z.string().optional().nullable(), - permission: z.array(z.any()).optional(), - sampling_params: z.record(z.any()).optional(), -}) - -export type ChutesModel = z.infer - -/** - * Chutes.API response schema - */ -const chutesApiResponseSchema = z.object({ - object: z.literal("list"), - data: z.array(chutesModelSchema), + created: z.number().optional(), + context_length: z.number(), + max_model_len: z.number(), + input_modalities: z.array(z.string()), }) -type ChutesApiResponse = z.infer - -/** - * Special model ID patterns that require specific handling - * - * These hardcoded sets are necessary because: - * - DEEPSEEK_R1_MODELS: These reasoning models require special handling (supportsReasoningBudget: true) - * and higher maxTokens (32768) regardless of their context window. The API doesn't provide a "reasoning" - * flag in a consistent location, so we identify them by ID. - * - GLM_MODELS: These vision models need explicit supportsImages: true because the API doesn't always - * include "vision" in supported_features or "image" in input_modalities consistently. - */ -const DEEPSEEK_R1_MODELS = new Set([ - "deepseek-ai/DeepSeek-R1-0528", - "deepseek-ai/DeepSeek-R1", - "deepseek-ai/DeepSeek-R1-Zero", -]) - -const GLM_MODELS = new Set(["zai-org/GLM-4.5V"]) - -/** - * Parse a Chutes.AI model response into our ModelInfo format - */ -function parseChutesModel(model: ChutesModel): ModelInfo { - const { id, context_length, max_model_len, supported_features, price, input_modalities, output_modalities } = model - - // Use context_length for the context window, fallback to max_model_len if needed - const contextWindow = context_length || max_model_len || 8192 - - // Calculate maxTokens with the following logic: - // - Take 20% of context window (ceil) as a reasonable default output limit - // - Cap at 32,768 tokens to avoid excessive output lengths that could impact performance - // - Take the minimum of these two values and the context window itself - // This ensures reasonable output limits while respecting model constraints - const maxTokens = Math.min(contextWindow, Math.ceil(contextWindow * 0.2), 32768) - - // Handle reasoning models - const isReasoning = DEEPSEEK_R1_MODELS.has(id) - - // Parse pricing - Chutes API returns prices already in dollars per million tokens - // (not per token like OpenRouter), so we use the values directly - const inputPrice = price?.input?.usd ?? 0 - const outputPrice = price?.output?.usd ?? 0 - - // Determine capabilities from modalities and features - // supported_features can be either an array or an object, handle both - const featuresArray = Array.isArray(supported_features) ? supported_features : [] - const supportsImages = featuresArray.includes("vision") || input_modalities?.includes("image") || false - const supportsPromptCache = true // Most models support some form of caching - - // Build model info object - const modelInfo: ModelInfo = { - maxTokens: isReasoning ? 32768 : maxTokens, // Reasoning models need higher token limits - contextWindow, - supportsImages, - supportsPromptCache, - supportsComputerUse: false, // Chutes doesn't expose computer use capability - inputPrice, - outputPrice, - description: `${id} via Chutes.AI`, - supportsReasoningEffort: false, - supportsReasoningBudget: isReasoning, - supportedParameters: [], - } - - // Set special properties for specific model families - if (DEEPSEEK_R1_MODELS.has(id)) { - // DeepSeek R1 models already have special handling in the provider - // Keep default behavior but ensure reasonable token limits - } - - if (GLM_MODELS.has(id)) { - // GLM-4.5V is a vision model - ensure vision support is properly marked - modelInfo.supportsImages = true - } +const ChutesModelsResponseSchema = z.object({ data: z.array(ChutesModelSchema) }) - return modelInfo -} - -/** - * Fetches available models from Chutes.AI - * - * @param apiKey - Optional API key for Chutes.AI - * @returns Promise resolving to a record of model information - */ export async function getChutesModels(apiKey?: string): Promise> { - const models: Record = {} - - try { - const headers: Record = { - "Content-Type": "application/json", - } - - if (apiKey) { - headers.Authorization = `Bearer ${apiKey}` - } + const headers: Record = { ...DEFAULT_HEADERS } + if (apiKey) headers["Authorization"] = `Bearer ${apiKey}` - const response = await axios.get("https://llm.chutes.ai/v1/models", { - headers, - timeout: 10_000, // 10 second timeout - }) + const url = "https://llm.chutes.ai/v1/models" - const result = chutesApiResponseSchema.safeParse(response.data) + // Start with hardcoded models as the base + const models: Record = { ...chutesModels } - if (!result.success) { - console.error("Chutes.AI models response validation failed:", result.error.format()) - throw new Error( - `Chutes.AI API returned invalid response format. This indicates an API contract change. Validation errors: ${JSON.stringify(result.error.format())}`, - ) - } + try { + const response = await axios.get(url, { headers }) + const parsed = ChutesModelsResponseSchema.safeParse(response.data) + const data = parsed.success ? parsed.data.data : response.data?.data || [] + + for (const m of data as Array>) { + // Extract from API response (all fields are required) + const contextWindow = m.context_length + const maxTokens = m.max_model_len + const supportsImages = m.input_modalities.includes("image") + + const info: ModelInfo = { + maxTokens, + contextWindow, + supportsImages, + supportsPromptCache: false, + inputPrice: 0, + outputPrice: 0, + description: `Chutes AI model: ${m.id}`, + } - for (const model of result.data.data) { - models[model.id] = parseChutesModel(model) + // Union: dynamic models override hardcoded ones if they have the same ID + models[m.id] = info } - - return models } catch (error) { - console.error("Error fetching Chutes.AI models:", error) - - // Provide more specific error messages for common failure scenarios - if (axios.isAxiosError(error)) { - if (error.code === "ECONNABORTED") { - const timeoutError = new Error("Failed to fetch Chutes.AI models: Request timeout") - ;(timeoutError as any).cause = error - throw timeoutError - } else if (error.response) { - const responseError = new Error( - `Failed to fetch Chutes.AI models: ${error.response.status} ${error.response.statusText}`, - ) - ;(responseError as any).cause = error - throw responseError - } else if (error.request) { - const requestError = new Error("Failed to fetch Chutes.AI models: No response") - ;(requestError as any).cause = error - throw requestError - } - } - - const fetchError = new Error( - `Failed to fetch Chutes.AI models: ${error instanceof Error ? error.message : "Unknown error"}`, - ) - ;(fetchError as any).cause = error - throw fetchError + console.error(`Error fetching Chutes models: ${error instanceof Error ? error.message : String(error)}`) + // On error, still return hardcoded models } + + return models } diff --git a/src/api/providers/fetchers/glama.ts b/src/api/providers/fetchers/glama.ts index 9fd57e2c680..ae36c751fb8 100644 --- a/src/api/providers/fetchers/glama.ts +++ b/src/api/providers/fetchers/glama.ts @@ -16,7 +16,6 @@ export async function getGlamaModels(): Promise> { maxTokens: rawModel.maxTokensOutput, contextWindow: rawModel.maxTokensInput, supportsImages: rawModel.capabilities?.includes("input:image"), - supportsComputerUse: rawModel.capabilities?.includes("computer_use"), supportsPromptCache: rawModel.capabilities?.includes("caching"), inputPrice: parseApiPrice(rawModel.pricePerToken?.input), outputPrice: parseApiPrice(rawModel.pricePerToken?.output), diff --git a/src/api/providers/fetchers/huggingface.ts b/src/api/providers/fetchers/huggingface.ts index c92525a4b95..1a7a995bc6e 100644 --- a/src/api/providers/fetchers/huggingface.ts +++ b/src/api/providers/fetchers/huggingface.ts @@ -95,7 +95,6 @@ function parseHuggingFaceModel(model: HuggingFaceModel, provider?: HuggingFacePr contextWindow: contextLength, supportsImages: false, // HuggingFace API doesn't provide this info yet. supportsPromptCache: false, - supportsComputerUse: false, inputPrice: pricing?.input, outputPrice: pricing?.output, description, diff --git a/src/api/providers/fetchers/io-intelligence.ts b/src/api/providers/fetchers/io-intelligence.ts index 3dad769a85b..42d88083b96 100644 --- a/src/api/providers/fetchers/io-intelligence.ts +++ b/src/api/providers/fetchers/io-intelligence.ts @@ -75,7 +75,6 @@ function parseIOIntelligenceModel(model: IOIntelligenceModel): ModelInfo { contextWindow: contextLength, supportsImages, supportsPromptCache: false, - supportsComputerUse: false, description: `${model.id} via IO Intelligence`, } } diff --git a/src/api/providers/fetchers/litellm.ts b/src/api/providers/fetchers/litellm.ts index e4e16c30e50..e6c96f3c2d1 100644 --- a/src/api/providers/fetchers/litellm.ts +++ b/src/api/providers/fetchers/litellm.ts @@ -1,7 +1,5 @@ import axios from "axios" -import { LITELLM_COMPUTER_USE_MODELS } from "@roo-code/types" - import type { ModelRecord } from "../../../shared/api" import { DEFAULT_HEADERS } from "../constants" @@ -33,8 +31,6 @@ export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise const response = await axios.get(url, { headers, timeout: 5000 }) const models: ModelRecord = {} - const computerModels = Array.from(LITELLM_COMPUTER_USE_MODELS) - // Process the model info from the response if (response.data && response.data.data && Array.isArray(response.data.data)) { for (const model of response.data.data) { @@ -44,23 +40,10 @@ export async function getLiteLLMModels(apiKey: string, baseUrl: string): Promise if (!modelName || !modelInfo || !litellmModelName) continue - // Use explicit supports_computer_use if available, otherwise fall back to hardcoded list - let supportsComputerUse: boolean - if (modelInfo.supports_computer_use !== undefined) { - supportsComputerUse = Boolean(modelInfo.supports_computer_use) - } else { - // Fallback for older LiteLLM versions that don't have supports_computer_use field - supportsComputerUse = computerModels.some((computer_model) => - litellmModelName.endsWith(computer_model), - ) - } - models[modelName] = { - maxTokens: modelInfo.max_tokens || 8192, + maxTokens: modelInfo.max_output_tokens || modelInfo.max_tokens || 8192, contextWindow: modelInfo.max_input_tokens || 200000, supportsImages: Boolean(modelInfo.supports_vision), - // litellm_params.model may have a prefix like openrouter/ - supportsComputerUse, supportsPromptCache: Boolean(modelInfo.supports_prompt_caching), inputPrice: modelInfo.input_cost_per_token ? modelInfo.input_cost_per_token * 1000000 : undefined, outputPrice: modelInfo.output_cost_per_token diff --git a/src/api/providers/fetchers/lmstudio.ts b/src/api/providers/fetchers/lmstudio.ts index ccfc6934e62..de3f804c28a 100644 --- a/src/api/providers/fetchers/lmstudio.ts +++ b/src/api/providers/fetchers/lmstudio.ts @@ -43,7 +43,6 @@ export const parseLMStudioModel = (rawModel: LLMInstanceInfo | LLMInfo): ModelIn contextWindow: contextLength, supportsPromptCache: true, supportsImages: rawModel.vision, - supportsComputerUse: false, maxTokens: contextLength, }) diff --git a/src/api/providers/fetchers/modelCache.ts b/src/api/providers/fetchers/modelCache.ts index 3777a26d878..6332f8b2e79 100644 --- a/src/api/providers/fetchers/modelCache.ts +++ b/src/api/providers/fetchers/modelCache.ts @@ -25,13 +25,14 @@ import { getLMStudioModels } from "./lmstudio" import { getIOIntelligenceModels } from "./io-intelligence" // kilocode_change start import { getOvhCloudAiEndpointsModels } from "./ovhcloud" -import { getChutesModels } from "./chutes" import { getGeminiModels } from "./gemini" import { getInceptionModels } from "./inception" // kilocode_change end import { getDeepInfraModels } from "./deepinfra" import { getHuggingFaceModels } from "./huggingface" +import { getRooModels } from "./roo" +import { getChutesModels } from "./chutes" const memoryCache = new NodeCache({ stdTTL: 5 * 60, checkperiod: 5 * 60 }) @@ -95,7 +96,7 @@ export const getModels = async (options: GetModelsOptions): Promise models = await getLiteLLMModels(options.apiKey, options.baseUrl) break // kilocode_change start - case "kilocode-openrouter": { + case "kilocode": { const backendUrl = options.kilocodeOrganizationId ? `https://api.kilocode.ai/api/organizations/${options.kilocodeOrganizationId}` : "https://api.kilocode.ai/api/openrouter" @@ -106,9 +107,6 @@ export const getModels = async (options: GetModelsOptions): Promise }) break } - case "chutes": - models = await getChutesModels(options.apiKey) - break case "gemini": models = await getGeminiModels({ apiKey: options.apiKey, @@ -142,6 +140,16 @@ export const getModels = async (options: GetModelsOptions): Promise models = await getOvhCloudAiEndpointsModels() break // kilocode_change end + case "roo": { + // Roo Code Cloud provider requires baseUrl and optional apiKey + const rooBaseUrl = + options.baseUrl ?? process.env.ROO_CODE_PROVIDER_URL ?? "https://api.roocode.com/proxy" + models = await getRooModels(rooBaseUrl, options.apiKey) + break + } + case "chutes": + models = await getChutesModels(options.apiKey) + break default: { // Ensures router is exhaustively checked if RouterName is a strict union. const exhaustiveCheck: never = provider diff --git a/src/api/providers/fetchers/ollama.ts b/src/api/providers/fetchers/ollama.ts index c5668d74014..8581b71d7b7 100644 --- a/src/api/providers/fetchers/ollama.ts +++ b/src/api/providers/fetchers/ollama.ts @@ -69,7 +69,6 @@ export const parseOllamaModel = ( contextWindow: contextWindow || ollamaDefaultModelInfo.contextWindow, supportsPromptCache: true, supportsImages: rawModel.capabilities?.includes("vision"), - supportsComputerUse: false, maxTokens: contextWindow || ollamaDefaultModelInfo.contextWindow, }) diff --git a/src/api/providers/fetchers/openrouter.ts b/src/api/providers/fetchers/openrouter.ts index e33730f726f..ad70c5d8d53 100644 --- a/src/api/providers/fetchers/openrouter.ts +++ b/src/api/providers/fetchers/openrouter.ts @@ -4,7 +4,6 @@ import { z } from "zod" import { type ModelInfo, isModelParameter, - OPEN_ROUTER_COMPUTER_USE_MODELS, OPEN_ROUTER_REASONING_BUDGET_MODELS, OPEN_ROUTER_REQUIRED_REASONING_BUDGET_MODELS, anthropicModels, @@ -243,12 +242,6 @@ export const parseOpenRouterModel = ({ // kilocode_change end } - // The OpenRouter model definition doesn't give us any hints about - // computer use, so we need to set that manually. - if (OPEN_ROUTER_COMPUTER_USE_MODELS.has(id)) { - modelInfo.supportsComputerUse = true - } - if (OPEN_ROUTER_REASONING_BUDGET_MODELS.has(id)) { modelInfo.supportsReasoningBudget = true } diff --git a/src/api/providers/fetchers/requesty.ts b/src/api/providers/fetchers/requesty.ts index 6db962ccb7c..64c7de66892 100644 --- a/src/api/providers/fetchers/requesty.ts +++ b/src/api/providers/fetchers/requesty.ts @@ -36,7 +36,6 @@ export async function getRequestyModels(baseUrl?: string, apiKey?: string): Prom contextWindow: rawModel.context_window, supportsPromptCache: rawModel.supports_caching, supportsImages: rawModel.supports_vision, - supportsComputerUse: rawModel.supports_computer_use, supportsReasoningBudget: reasoningBudget, supportsReasoningEffort: reasoningEffort, inputPrice: parseApiPrice(rawModel.input_price), diff --git a/src/api/providers/fetchers/roo.ts b/src/api/providers/fetchers/roo.ts new file mode 100644 index 00000000000..17aec4253b3 --- /dev/null +++ b/src/api/providers/fetchers/roo.ts @@ -0,0 +1,129 @@ +import { RooModelsResponseSchema } from "@roo-code/types" + +import type { ModelRecord } from "../../../shared/api" +import { parseApiPrice } from "../../../shared/cost" + +import { DEFAULT_HEADERS } from "../constants" + +/** + * Fetches available models from the Roo Code Cloud provider + * + * @param baseUrl The base URL of the Roo Code Cloud provider + * @param apiKey The API key (session token) for the Roo Code Cloud provider + * @returns A promise that resolves to a record of model IDs to model info + * @throws Will throw an error if the request fails or the response is not as expected. + */ +export async function getRooModels(baseUrl: string, apiKey?: string): Promise { + try { + const headers: Record = { + "Content-Type": "application/json", + ...DEFAULT_HEADERS, + } + + if (apiKey) { + headers["Authorization"] = `Bearer ${apiKey}` + } + + // Construct the models endpoint URL + // Strip trailing /v1 or /v1/ to avoid /v1/v1/models + const normalizedBase = baseUrl.replace(/\/?v1\/?$/, "") + const url = `${normalizedBase}/v1/models` + + // Use fetch with AbortController for better timeout handling + const controller = new AbortController() + const timeoutId = setTimeout(() => controller.abort(), 10000) + + try { + const response = await fetch(url, { + headers, + signal: controller.signal, + }) + + if (!response.ok) { + throw new Error(`HTTP ${response.status}: ${response.statusText}`) + } + + const data = await response.json() + const models: ModelRecord = {} + + // Validate response against schema + const parsed = RooModelsResponseSchema.safeParse(data) + + if (!parsed.success) { + console.error("Error fetching Roo Code Cloud models: Unexpected response format", data) + console.error("Validation errors:", parsed.error.format()) + throw new Error("Failed to fetch Roo Code Cloud models: Unexpected response format.") + } + + // Process the validated model data + for (const model of parsed.data.data) { + const modelId = model.id + + if (!modelId) continue + + // Extract model data from the validated API response + // All required fields are guaranteed by the schema + const contextWindow = model.context_window + const maxTokens = model.max_tokens + const tags = model.tags || [] + const pricing = model.pricing + + // Determine if the model supports images based on tags + const supportsImages = tags.includes("vision") + + // Determine if the model supports reasoning effort based on tags + const supportsReasoningEffort = tags.includes("reasoning") + + // Determine if the model requires reasoning effort based on tags + const requiredReasoningEffort = tags.includes("reasoning-required") + + // Parse pricing (API returns strings, convert to numbers) + const inputPrice = parseApiPrice(pricing.input) + const outputPrice = parseApiPrice(pricing.output) + const cacheReadPrice = pricing.input_cache_read ? parseApiPrice(pricing.input_cache_read) : undefined + const cacheWritePrice = pricing.input_cache_write ? parseApiPrice(pricing.input_cache_write) : undefined + + models[modelId] = { + maxTokens, + contextWindow, + supportsImages, + supportsReasoningEffort, + requiredReasoningEffort, + supportsPromptCache: Boolean(cacheReadPrice !== undefined), + inputPrice, + outputPrice, + cacheWritesPrice: cacheWritePrice, + cacheReadsPrice: cacheReadPrice, + description: model.description || model.name, + deprecated: model.deprecated || false, + isFree: tags.includes("free"), + } + } + + return models + } finally { + clearTimeout(timeoutId) + } + } catch (error: any) { + console.error("Error fetching Roo Code Cloud models:", error.message ? error.message : error) + + // Handle abort/timeout + if (error.name === "AbortError") { + throw new Error("Failed to fetch Roo Code Cloud models: Request timed out after 10 seconds.") + } + + // Handle fetch errors + if (error.message?.includes("HTTP")) { + throw new Error(`Failed to fetch Roo Code Cloud models: ${error.message}. Check base URL and API key.`) + } + + // Handle network errors + if (error instanceof TypeError) { + throw new Error( + "Failed to fetch Roo Code Cloud models: No response from server. Check Roo Code Cloud server status and base URL.", + ) + } + + throw new Error(`Failed to fetch Roo Code Cloud models: ${error.message || "An unknown error occurred."}`) + } +} diff --git a/src/api/providers/fetchers/unbound.ts b/src/api/providers/fetchers/unbound.ts index 98c0c58fa57..354c0fde58a 100644 --- a/src/api/providers/fetchers/unbound.ts +++ b/src/api/providers/fetchers/unbound.ts @@ -23,7 +23,6 @@ export async function getUnboundModels(apiKey?: string | null): Promise { const cacheWriteTokens = 0 // Calculate cost using OpenAI-compatible cost calculation - const totalCost = calculateApiCostOpenAI(info, inputTokens, outputTokens, cacheWriteTokens, cacheReadTokens) + const { totalCost } = calculateApiCostOpenAI(info, inputTokens, outputTokens, cacheWriteTokens, cacheReadTokens) yield { type: "usage", diff --git a/src/api/providers/human-relay.ts b/src/api/providers/human-relay.ts index c1dc3506e9b..54446bd3625 100644 --- a/src/api/providers/human-relay.ts +++ b/src/api/providers/human-relay.ts @@ -70,7 +70,6 @@ export class HumanRelayHandler implements ApiHandler, SingleCompletionHandler { contextWindow: 100000, supportsImages: true, supportsPromptCache: false, - supportsComputerUse: true, inputPrice: 0, outputPrice: 0, description: "Calling web-side AI model through human relay", diff --git a/src/api/providers/inception.ts b/src/api/providers/inception.ts index 62e93167820..360283c5f80 100644 --- a/src/api/providers/inception.ts +++ b/src/api/providers/inception.ts @@ -133,7 +133,7 @@ export class InceptionLabsHandler extends RouterProvider implements SingleComple const totalCost = modelInfo ? calculateApiCostOpenAI(modelInfo, inputTokens, outputTokens, cacheWriteTokens, cacheReadTokens) - : 0 + : undefined return { type: "usage", @@ -141,7 +141,7 @@ export class InceptionLabsHandler extends RouterProvider implements SingleComple outputTokens, cacheWriteTokens: cacheWriteTokens || undefined, cacheReadTokens: cacheReadTokens || undefined, - totalCost, + totalCost: totalCost?.totalCost || undefined, } } } diff --git a/src/api/providers/index.ts b/src/api/providers/index.ts index e91e4e5b6f5..6ef6f2e804a 100644 --- a/src/api/providers/index.ts +++ b/src/api/providers/index.ts @@ -41,3 +41,4 @@ export { RooHandler } from "./roo" export { FeatherlessHandler } from "./featherless" export { VercelAiGatewayHandler } from "./vercel-ai-gateway" export { DeepInfraHandler } from "./deepinfra" +export { MiniMaxHandler } from "./minimax" diff --git a/src/api/providers/kilocode-openrouter.ts b/src/api/providers/kilocode-openrouter.ts index cabe0adf4e3..83f947e0ef4 100644 --- a/src/api/providers/kilocode-openrouter.ts +++ b/src/api/providers/kilocode-openrouter.ts @@ -113,7 +113,7 @@ export class KilocodeOpenrouterHandler extends OpenRouterHandler { const [models, endpoints, defaultModel] = await Promise.all([ getModels({ - provider: "kilocode-openrouter", + provider: "kilocode", kilocodeToken: this.options.kilocodeToken, kilocodeOrganizationId: this.options.kilocodeOrganizationId, }), diff --git a/src/api/providers/lite-llm.ts b/src/api/providers/lite-llm.ts index 6950b1792b8..bf5dcc7b4a0 100644 --- a/src/api/providers/lite-llm.ts +++ b/src/api/providers/lite-llm.ts @@ -166,22 +166,23 @@ export class LiteLLMHandler extends RouterProvider implements SingleCompletionHa (lastUsage as any).prompt_cache_hit_tokens || 0 + const { totalCost } = calculateApiCostOpenAI( + info, + lastUsage.prompt_tokens || 0, + lastUsage.completion_tokens || 0, + cacheWriteTokens, + cacheReadTokens, + ) + const usageData: ApiStreamUsageChunk = { type: "usage", inputTokens: lastUsage.prompt_tokens || 0, outputTokens: lastUsage.completion_tokens || 0, cacheWriteTokens: cacheWriteTokens > 0 ? cacheWriteTokens : undefined, cacheReadTokens: cacheReadTokens > 0 ? cacheReadTokens : undefined, + totalCost, } - usageData.totalCost = calculateApiCostOpenAI( - info, - usageData.inputTokens, - usageData.outputTokens, - usageData.cacheWriteTokens || 0, - usageData.cacheReadTokens || 0, - ) - yield usageData } } catch (error) { diff --git a/src/api/providers/minimax.ts b/src/api/providers/minimax.ts new file mode 100644 index 00000000000..8a8e8c14e5b --- /dev/null +++ b/src/api/providers/minimax.ts @@ -0,0 +1,19 @@ +import { type MinimaxModelId, minimaxDefaultModelId, minimaxModels } from "@roo-code/types" + +import type { ApiHandlerOptions } from "../../shared/api" + +import { BaseOpenAiCompatibleProvider } from "./base-openai-compatible-provider" + +export class MiniMaxHandler extends BaseOpenAiCompatibleProvider { + constructor(options: ApiHandlerOptions) { + super({ + ...options, + providerName: "MiniMax", + baseURL: options.minimaxBaseUrl ?? "https://api.minimax.io/v1", + apiKey: options.minimaxApiKey, + defaultProviderModelId: minimaxDefaultModelId, + providerModels: minimaxModels, + defaultTemperature: 1.0, + }) + } +} diff --git a/src/api/providers/openai-native.ts b/src/api/providers/openai-native.ts index 34b26ef6f06..3fdbce6d2f2 100644 --- a/src/api/providers/openai-native.ts +++ b/src/api/providers/openai-native.ts @@ -99,8 +99,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio const effectiveInfo = this.applyServiceTierPricing(model.info, effectiveTier) // Pass total input tokens directly to calculateApiCostOpenAI - // The function handles subtracting both cache reads and writes internally (see shared/cost.ts:46) - const totalCost = calculateApiCostOpenAI( + // The function handles subtracting both cache reads and writes internally + const { totalCost } = calculateApiCostOpenAI( effectiveInfo, totalInputTokens, totalOutputTokens, diff --git a/src/api/providers/ovhcloud.ts b/src/api/providers/ovhcloud.ts index 716ef341771..b4d48de6879 100644 --- a/src/api/providers/ovhcloud.ts +++ b/src/api/providers/ovhcloud.ts @@ -69,7 +69,9 @@ export class OVHcloudAIEndpointsHandler extends RouterProvider implements Single type: "usage", inputTokens: usage.prompt_tokens || 0, outputTokens: usage.completion_tokens || 0, - totalCost: calculateApiCostOpenAI(info, usage.prompt_tokens || 0, usage.completion_tokens || 0), + totalCost: + calculateApiCostOpenAI(info, usage.prompt_tokens || 0, usage.completion_tokens || 0) + .totalCost || undefined, } } } diff --git a/src/api/providers/requesty.ts b/src/api/providers/requesty.ts index 16aefae5286..1c0e9ed6407 100644 --- a/src/api/providers/requesty.ts +++ b/src/api/providers/requesty.ts @@ -85,9 +85,9 @@ export class RequestyHandler extends BaseProvider implements SingleCompletionHan const outputTokens = requestyUsage?.completion_tokens || 0 const cacheWriteTokens = requestyUsage?.prompt_tokens_details?.caching_tokens || 0 const cacheReadTokens = requestyUsage?.prompt_tokens_details?.cached_tokens || 0 - const totalCost = modelInfo + const { totalCost } = modelInfo ? calculateApiCostOpenAI(modelInfo, inputTokens, outputTokens, cacheWriteTokens, cacheReadTokens) - : 0 + : { totalCost: 0 } return { type: "usage", diff --git a/src/api/providers/roo.ts b/src/api/providers/roo.ts index 6f10157a313..327796a1ffc 100644 --- a/src/api/providers/roo.ts +++ b/src/api/providers/roo.ts @@ -1,24 +1,48 @@ import { Anthropic } from "@anthropic-ai/sdk" import OpenAI from "openai" -import { AuthState, rooDefaultModelId, rooModels, type RooModelId } from "@roo-code/types" +import { rooDefaultModelId } from "@roo-code/types" import { CloudService } from "@roo-code/cloud" -import type { ApiHandlerOptions } from "../../shared/api" +import type { ApiHandlerOptions, ModelRecord } from "../../shared/api" import { ApiStream } from "../transform/stream" +import { getModelParams } from "../transform/model-params" +import { convertToOpenAiMessages } from "../transform/openai-format" +import type { RooReasoningParams } from "../transform/reasoning" +import { getRooReasoning } from "../transform/reasoning" import type { ApiHandlerCreateMessageMetadata } from "../index" -import { DEFAULT_HEADERS } from "./constants" import { BaseOpenAiCompatibleProvider } from "./base-openai-compatible-provider" +import { getModels, getModelsFromCache } from "../providers/fetchers/modelCache" +import { handleOpenAIError } from "./utils/openai-error-handler" -export class RooHandler extends BaseOpenAiCompatibleProvider { - private authStateListener?: (state: { state: AuthState }) => void +// Extend OpenAI's CompletionUsage to include Roo specific fields +interface RooUsage extends OpenAI.CompletionUsage { + cache_creation_input_tokens?: number + cost?: number +} + +// Add custom interface for Roo params to support reasoning +type RooChatCompletionParams = OpenAI.Chat.ChatCompletionCreateParamsStreaming & { + reasoning?: RooReasoningParams +} + +function getSessionToken(): string { + const token = CloudService.hasInstance() ? CloudService.instance.authService?.getSessionToken() : undefined + return token ?? "unauthenticated" +} + +export class RooHandler extends BaseOpenAiCompatibleProvider { + private fetcherBaseURL: string constructor(options: ApiHandlerOptions) { - let sessionToken: string | undefined = undefined + const sessionToken = getSessionToken() + + let baseURL = process.env.ROO_CODE_PROVIDER_URL ?? "https://api.roocode.com/proxy" - if (CloudService.hasInstance()) { - sessionToken = CloudService.instance.authService?.getSessionToken() + // Ensure baseURL ends with /v1 for OpenAI client, but don't duplicate it + if (!baseURL.endsWith("/v1")) { + baseURL = `${baseURL}/v1` } // Always construct the handler, even without a valid token. @@ -26,39 +50,63 @@ export class RooHandler extends BaseOpenAiCompatibleProvider { super({ ...options, providerName: "Roo Code Cloud", - baseURL: process.env.ROO_CODE_PROVIDER_URL ?? "https://api.roocode.com/proxy/v1", - apiKey: sessionToken || "unauthenticated", // Use a placeholder if no token. + baseURL, // Already has /v1 suffix + apiKey: sessionToken, defaultProviderModelId: rooDefaultModelId, - providerModels: rooModels, + providerModels: {}, defaultTemperature: 0.7, }) - if (CloudService.hasInstance()) { - const cloudService = CloudService.instance - - this.authStateListener = (state: { state: AuthState }) => { - if (state.state === "active-session") { - this.client = new OpenAI({ - baseURL: this.baseURL, - apiKey: cloudService.authService?.getSessionToken() ?? "unauthenticated", - defaultHeaders: DEFAULT_HEADERS, - }) - } else if (state.state === "logged-out") { - this.client = new OpenAI({ - baseURL: this.baseURL, - apiKey: "unauthenticated", - defaultHeaders: DEFAULT_HEADERS, - }) - } - } + // Load dynamic models asynchronously - strip /v1 from baseURL for fetcher + this.fetcherBaseURL = baseURL.endsWith("/v1") ? baseURL.slice(0, -3) : baseURL + this.loadDynamicModels(this.fetcherBaseURL, sessionToken).catch((error) => { + console.error("[RooHandler] Failed to load dynamic models:", error) + }) + } + + protected override createStream( + systemPrompt: string, + messages: Anthropic.Messages.MessageParam[], + metadata?: ApiHandlerCreateMessageMetadata, + requestOptions?: OpenAI.RequestOptions, + ) { + const { id: model, info } = this.getModel() + + // Get model parameters including reasoning + const params = getModelParams({ + format: "openai", + modelId: model, + model: info, + settings: this.options, + defaultTemperature: this.defaultTemperature, + }) + + // Get Roo-specific reasoning parameters + const reasoning = getRooReasoning({ + model: info, + reasoningBudget: params.reasoningBudget, + reasoningEffort: params.reasoningEffort, + settings: this.options, + }) - cloudService.on("auth-state-changed", this.authStateListener) + const max_tokens = params.maxTokens ?? undefined + const temperature = params.temperature ?? this.defaultTemperature + + const rooParams: RooChatCompletionParams = { + model, + max_tokens, + temperature, + messages: [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)], + stream: true, + stream_options: { include_usage: true }, + ...(reasoning && { reasoning }), } - } - dispose() { - if (this.authStateListener && CloudService.hasInstance()) { - CloudService.instance.off("auth-state-changed", this.authStateListener) + try { + this.client.apiKey = getSessionToken() + return this.client.chat.completions.create(rooParams, requestOptions) + } catch (error) { + throw handleOpenAIError(error, this.providerName) } } @@ -74,50 +122,94 @@ export class RooHandler extends BaseOpenAiCompatibleProvider { metadata?.taskId ? { headers: { "X-Roo-Task-ID": metadata.taskId } } : undefined, ) + let lastUsage: RooUsage | undefined = undefined + for await (const chunk of stream) { const delta = chunk.choices[0]?.delta if (delta) { - if (delta.content) { + // Check for reasoning content (similar to OpenRouter) + if ("reasoning" in delta && delta.reasoning && typeof delta.reasoning === "string") { yield { - type: "text", - text: delta.content, + type: "reasoning", + text: delta.reasoning, } } + // Also check for reasoning_content for backward compatibility if ("reasoning_content" in delta && typeof delta.reasoning_content === "string") { yield { type: "reasoning", text: delta.reasoning_content, } } + + if (delta.content) { + yield { + type: "text", + text: delta.content, + } + } } if (chunk.usage) { - yield { - type: "usage", - inputTokens: chunk.usage.prompt_tokens || 0, - outputTokens: chunk.usage.completion_tokens || 0, - } + lastUsage = chunk.usage as RooUsage + } + } + + if (lastUsage) { + // Check if the current model is marked as free + const model = this.getModel() + const isFreeModel = model.info.isFree ?? false + + yield { + type: "usage", + inputTokens: lastUsage.prompt_tokens || 0, + outputTokens: lastUsage.completion_tokens || 0, + cacheWriteTokens: lastUsage.cache_creation_input_tokens, + cacheReadTokens: lastUsage.prompt_tokens_details?.cached_tokens, + totalCost: isFreeModel ? 0 : (lastUsage.cost ?? 0), } } } + override async completePrompt(prompt: string): Promise { + // Update API key before making request to ensure we use the latest session token + this.client.apiKey = getSessionToken() + return super.completePrompt(prompt) + } + + private async loadDynamicModels(baseURL: string, apiKey?: string): Promise { + try { + // Fetch models and cache them in the shared cache + await getModels({ + provider: "roo", + baseUrl: baseURL, + apiKey, + }) + } catch (error) { + console.error("[RooHandler] Error loading dynamic models:", error) + } + } override getModel() { const modelId = this.options.apiModelId || rooDefaultModelId - const modelInfo = this.providerModels[modelId as RooModelId] ?? this.providerModels[rooDefaultModelId] + + // Get models from shared cache + const models = getModelsFromCache("roo") || {} + const modelInfo = models[modelId] if (modelInfo) { - return { id: modelId as RooModelId, info: modelInfo } + return { id: modelId, info: modelInfo } } // Return the requested model ID even if not found, with fallback info. return { - id: modelId as RooModelId, + id: modelId, info: { maxTokens: 16_384, contextWindow: 262_144, supportsImages: false, + supportsReasoningEffort: false, supportsPromptCache: true, inputPrice: 0, outputPrice: 0, diff --git a/src/api/providers/zai.ts b/src/api/providers/zai.ts index ce5aab9dd9f..cc83945e48a 100644 --- a/src/api/providers/zai.ts +++ b/src/api/providers/zai.ts @@ -5,19 +5,27 @@ import { mainlandZAiDefaultModelId, type InternationalZAiModelId, type MainlandZAiModelId, + type ModelInfo, ZAI_DEFAULT_TEMPERATURE, zaiApiLineConfigs, } from "@roo-code/types" +import { Anthropic } from "@anthropic-ai/sdk" +import OpenAI from "openai" + import type { ApiHandlerOptions } from "../../shared/api" +import { getModelMaxOutputTokens } from "../../shared/api" +import { convertToOpenAiMessages } from "../transform/openai-format" +import type { ApiHandlerCreateMessageMetadata } from "../index" +import { handleOpenAIError } from "./utils/openai-error-handler" import { BaseOpenAiCompatibleProvider } from "./base-openai-compatible-provider" -export class ZAiHandler extends BaseOpenAiCompatibleProvider { +export class ZAiHandler extends BaseOpenAiCompatibleProvider { constructor(options: ApiHandlerOptions) { const isChina = zaiApiLineConfigs[options.zaiApiLine ?? "international_coding"].isChina - const models = isChina ? mainlandZAiModels : internationalZAiModels - const defaultModelId = isChina ? mainlandZAiDefaultModelId : internationalZAiDefaultModelId + const models = (isChina ? mainlandZAiModels : internationalZAiModels) as unknown as Record + const defaultModelId = (isChina ? mainlandZAiDefaultModelId : internationalZAiDefaultModelId) as string super({ ...options, @@ -29,4 +37,67 @@ export class ZAiHandler extends BaseOpenAiCompatibleProvider { + const { id: modelId } = this.getModel() + + const params: OpenAI.Chat.Completions.ChatCompletionCreateParams = { + model: modelId, + messages: [{ role: "user", content: prompt }], + } + + // Add thinking parameter if reasoning is enabled and model supports it + const { info: modelInfo } = this.getModel() + if (this.options.enableReasoningEffort && modelInfo.supportsReasoningBinary) { + ;(params as any).thinking = { type: "enabled" } + } + + try { + const response = await this.client.chat.completions.create(params) + return response.choices[0]?.message.content || "" + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } + } } diff --git a/src/api/transform/__tests__/reasoning.spec.ts b/src/api/transform/__tests__/reasoning.spec.ts index fc0983d7416..ae565e9628b 100644 --- a/src/api/transform/__tests__/reasoning.spec.ts +++ b/src/api/transform/__tests__/reasoning.spec.ts @@ -6,10 +6,12 @@ import { getOpenRouterReasoning, getAnthropicReasoning, getOpenAiReasoning, + getRooReasoning, GetModelReasoningOptions, OpenRouterReasoningParams, AnthropicReasoningParams, OpenAiReasoningParams, + RooReasoningParams, } from "../reasoning" describe("reasoning.ts", () => { @@ -761,4 +763,133 @@ describe("reasoning.ts", () => { } }) }) + + describe("getRooReasoning", () => { + it("should return undefined when model does not support reasoning effort", () => { + const options = { ...baseOptions } + const result = getRooReasoning(options) + expect(result).toBeUndefined() + }) + + it("should return enabled: false when enableReasoningEffort is explicitly false", () => { + const modelWithSupported: ModelInfo = { + ...baseModel, + supportsReasoningEffort: true, + } + + const settingsWithDisabled: ProviderSettings = { + enableReasoningEffort: false, + } + + const options = { + ...baseOptions, + model: modelWithSupported, + settings: settingsWithDisabled, + } + + const result = getRooReasoning(options) + expect(result).toEqual({ enabled: false }) + }) + + it("should return enabled: true with effort when reasoningEffort is provided", () => { + const modelWithSupported: ModelInfo = { + ...baseModel, + supportsReasoningEffort: true, + } + + const settingsWithEffort: ProviderSettings = { + reasoningEffort: "high", + } + + const options = { + ...baseOptions, + model: modelWithSupported, + settings: settingsWithEffort, + reasoningEffort: "high" as const, + } + + const result = getRooReasoning(options) + expect(result).toEqual({ enabled: true, effort: "high" }) + }) + + it("should return enabled: false when reasoningEffort is undefined (None selected)", () => { + const modelWithSupported: ModelInfo = { + ...baseModel, + supportsReasoningEffort: true, + } + + const options = { + ...baseOptions, + model: modelWithSupported, + settings: {}, + reasoningEffort: undefined, + } + + const result = getRooReasoning(options) + expect(result).toEqual({ enabled: false }) + }) + + it("should not return reasoning params for minimal effort", () => { + const modelWithSupported: ModelInfo = { + ...baseModel, + supportsReasoningEffort: true, + } + + const settingsWithMinimal: ProviderSettings = { + reasoningEffort: "minimal", + } + + const options = { + ...baseOptions, + model: modelWithSupported, + settings: settingsWithMinimal, + reasoningEffort: "minimal" as ReasoningEffortWithMinimal, + } + + const result = getRooReasoning(options) + expect(result).toBeUndefined() + }) + + it("should handle all valid reasoning effort values", () => { + const efforts: Array<"low" | "medium" | "high"> = ["low", "medium", "high"] + + efforts.forEach((effort) => { + const modelWithSupported: ModelInfo = { + ...baseModel, + supportsReasoningEffort: true, + } + + const settingsWithEffort: ProviderSettings = { + reasoningEffort: effort, + } + + const options = { + ...baseOptions, + model: modelWithSupported, + settings: settingsWithEffort, + reasoningEffort: effort, + } + + const result = getRooReasoning(options) + expect(result).toEqual({ enabled: true, effort }) + }) + }) + + it("should return enabled: false when model supports reasoning but no effort is provided", () => { + const modelWithSupported: ModelInfo = { + ...baseModel, + supportsReasoningEffort: true, + } + + const options = { + ...baseOptions, + model: modelWithSupported, + settings: {}, + reasoningEffort: undefined, + } + + const result = getRooReasoning(options) + expect(result).toEqual({ enabled: false }) + }) + }) }) diff --git a/src/api/transform/reasoning.ts b/src/api/transform/reasoning.ts index a9b42e4a845..441f2a28e00 100644 --- a/src/api/transform/reasoning.ts +++ b/src/api/transform/reasoning.ts @@ -13,6 +13,11 @@ export type OpenRouterReasoningParams = { enabled?: boolean // kilocode_change } +export type RooReasoningParams = { + enabled?: boolean + effort?: ReasoningEffortWithMinimal +} + export type AnthropicReasoningParams = BetaThinkingConfigParam export type OpenAiReasoningParams = { reasoning_effort: OpenAI.Chat.ChatCompletionCreateParams["reasoning_effort"] } @@ -40,6 +45,36 @@ export const getOpenRouterReasoning = ({ : undefined : undefined +export const getRooReasoning = ({ + model, + reasoningEffort, + settings, +}: GetModelReasoningOptions): RooReasoningParams | undefined => { + // Check if model supports reasoning effort + if (!model.supportsReasoningEffort) { + return undefined + } + + // If enableReasoningEffort is explicitly false, return enabled: false + if (settings.enableReasoningEffort === false) { + return { enabled: false } + } + + // If reasoning effort is provided, return it with enabled: true + if (reasoningEffort && reasoningEffort !== "minimal") { + return { enabled: true, effort: reasoningEffort } + } + + // If reasoningEffort is explicitly undefined (None selected), disable reasoning + // This ensures we explicitly tell the backend not to use reasoning + if (reasoningEffort === undefined) { + return { enabled: false } + } + + // Default: no reasoning parameter (reasoning not enabled) + return undefined +} + export const getAnthropicReasoning = ({ model, reasoningBudget, diff --git a/src/core/assistant-message/presentAssistantMessage.ts b/src/core/assistant-message/presentAssistantMessage.ts index 75614d7f302..1640f70a869 100644 --- a/src/core/assistant-message/presentAssistantMessage.ts +++ b/src/core/assistant-message/presentAssistantMessage.ts @@ -15,7 +15,6 @@ import { shouldUseSingleFileRead } from "@roo-code/types" import { writeToFileTool } from "../tools/writeToFileTool" import { applyDiffTool } from "../tools/multiApplyDiffTool" import { insertContentTool } from "../tools/insertContentTool" -import { searchAndReplaceTool } from "../tools/searchAndReplaceTool" import { editFileTool } from "../tools/editFileTool" // kilocode_change: Morph fast apply import { listCodeDefinitionNamesTool } from "../tools/listCodeDefinitionNamesTool" import { searchFilesTool } from "../tools/searchFilesTool" @@ -201,8 +200,6 @@ export async function presentAssistantMessage(cline: Task) { }]` case "insert_content": return `[${block.name} for '${block.params.path}']` - case "search_and_replace": - return `[${block.name} for '${block.params.path}']` // kilocode_change start: Morph fast apply case "edit_file": return `[${block.name} for '${block.params.target_file}']` @@ -504,10 +501,6 @@ export async function presentAssistantMessage(cline: Task) { // await checkpointSaveAndMark(cline) // kilocode_change await insertContentTool(cline, block, askApproval, handleError, pushToolResult, removeClosingTag) break - case "search_and_replace": - // await checkpointSaveAndMark(cline) // kilocode_change - await searchAndReplaceTool(cline, block, askApproval, handleError, pushToolResult, removeClosingTag) - break // kilocode_change start: Morph fast apply case "edit_file": await editFileTool(cline, block, askApproval, handleError, pushToolResult, removeClosingTag) diff --git a/src/core/checkpoints/__tests__/checkpoint.test.ts b/src/core/checkpoints/__tests__/checkpoint.test.ts index 6b2a55c5cd9..0b79f89c4f5 100644 --- a/src/core/checkpoints/__tests__/checkpoint.test.ts +++ b/src/core/checkpoints/__tests__/checkpoint.test.ts @@ -1,4 +1,4 @@ -import { describe, it, expect, vi, beforeEach, afterEach } from "vitest" +import { describe, it, expect, vi, beforeEach, afterEach, Mock } from "vitest" import { Task } from "../../task/Task" import { ClineProvider } from "../../webview/ClineProvider" import { checkpointSave, checkpointRestore, checkpointDiff, getCheckpointService } from "../index" @@ -36,6 +36,27 @@ vi.mock("../../../utils/path", () => ({ getWorkspacePath: vi.fn(() => "/test/workspace"), })) +vi.mock("../../../utils/git", () => ({ + checkGitInstalled: vi.fn().mockResolvedValue(true), +})) + +vi.mock("../../../i18n", () => ({ + t: vi.fn((key: string, options?: Record) => { + if (key === "common:errors.wait_checkpoint_long_time") { + return `Checkpoint initialization is taking longer than ${options?.timeout} seconds...` + } + if (key === "common:errors.init_checkpoint_fail_long_time") { + return `Checkpoint initialization failed after ${options?.timeout} seconds` + } + return key + }), +})) + +// Mock p-wait-for to control timeout behavior +vi.mock("p-wait-for", () => ({ + default: vi.fn(), +})) + vi.mock("../../../services/checkpoints") describe("Checkpoint functionality", () => { @@ -284,7 +305,7 @@ describe("Checkpoint functionality", () => { ] }) - it("should show diff for full mode", async () => { + it("should show diff for to-current mode", async () => { const mockChanges = [ { paths: { absolute: "/test/file.ts", relative: "file.ts" }, @@ -296,7 +317,7 @@ describe("Checkpoint functionality", () => { await checkpointDiff(mockTask, { ts: 4, commitHash: "commit2", - mode: "full", + mode: "to-current", }) expect(mockCheckpointService.getDiff).toHaveBeenCalledWith({ @@ -305,7 +326,7 @@ describe("Checkpoint functionality", () => { }) expect(vscode.commands.executeCommand).toHaveBeenCalledWith( "vscode.changes", - "Changes since task started", + "common:errors.checkpoint_diff_to_current", expect.any(Array), ) }) @@ -330,7 +351,7 @@ describe("Checkpoint functionality", () => { }) expect(vscode.commands.executeCommand).toHaveBeenCalledWith( "vscode.changes", - "Changes compare with next checkpoint", + "common:errors.checkpoint_diff_with_next", expect.any(Array), ) }) @@ -362,10 +383,10 @@ describe("Checkpoint functionality", () => { await checkpointDiff(mockTask, { ts: 4, commitHash: "commit2", - mode: "full", + mode: "to-current", }) - expect(vscode.window.showInformationMessage).toHaveBeenCalledWith("No changes found.") + expect(vscode.window.showInformationMessage).toHaveBeenCalledWith("common:errors.checkpoint_no_changes") expect(vscode.commands.executeCommand).not.toHaveBeenCalled() }) @@ -375,7 +396,7 @@ describe("Checkpoint functionality", () => { await checkpointDiff(mockTask, { ts: 4, commitHash: "commit2", - mode: "full", + mode: "to-current", }) expect(mockTask.enableCheckpoints).toBe(false) @@ -430,4 +451,142 @@ describe("Checkpoint functionality", () => { expect(mockTask.enableCheckpoints).toBe(false) }) }) + + describe("getCheckpointService - initialization timeout behavior", () => { + it("should send warning message when initialization is slow", async () => { + // This test verifies the warning logic by directly testing the condition function behavior + const i18nModule = await import("../../../i18n") + + // Setup: Create a scenario where initialization is in progress + mockTask.checkpointService = undefined + mockTask.checkpointServiceInitializing = true + mockTask.checkpointTimeout = 15 + + vi.clearAllMocks() + + // Simulate the condition function that runs inside pWaitFor + let warningShown = false + const simulateConditionCheck = (elapsedMs: number) => { + // This simulates what happens inside the pWaitFor condition function (lines 85-100) + if (!warningShown && elapsedMs >= 5000) { + warningShown = true + // This is what the actual code does at line 91-94 + const provider = mockTask.providerRef.deref() + provider?.postMessageToWebview({ + type: "checkpointInitWarning", + checkpointWarning: i18nModule.t("common:errors.wait_checkpoint_long_time", { timeout: 5 }), + }) + } + + return !!mockTask.checkpointService && !!mockTask.checkpointService.isInitialized + } + + // Test: At 4 seconds, no warning should be sent + expect(simulateConditionCheck(4000)).toBe(false) + expect(mockProvider.postMessageToWebview).not.toHaveBeenCalled() + + // Test: At 5 seconds, warning should be sent + expect(simulateConditionCheck(5000)).toBe(false) + expect(mockProvider.postMessageToWebview).toHaveBeenCalledWith({ + type: "checkpointInitWarning", + checkpointWarning: "Checkpoint initialization is taking longer than 5 seconds...", + }) + + // Test: At 6 seconds, warning should not be sent again (warningShown is true) + vi.clearAllMocks() + expect(simulateConditionCheck(6000)).toBe(false) + expect(mockProvider.postMessageToWebview).not.toHaveBeenCalled() + }) + + it("should send timeout error message when initialization fails", async () => { + const i18nModule = await import("../../../i18n") + + // Setup + mockTask.checkpointService = undefined + mockTask.checkpointTimeout = 10 + mockTask.enableCheckpoints = true + + vi.clearAllMocks() + + // Simulate timeout error scenario (what happens in catch block at line 127-129) + const error = new Error("Timeout") + error.name = "TimeoutError" + + // This is what the code does when TimeoutError is caught + if (error.name === "TimeoutError" && mockTask.enableCheckpoints) { + const provider = mockTask.providerRef.deref() + provider?.postMessageToWebview({ + type: "checkpointInitWarning", + checkpointWarning: i18nModule.t("common:errors.init_checkpoint_fail_long_time", { + timeout: mockTask.checkpointTimeout, + }), + }) + } + + mockTask.enableCheckpoints = false + + // Verify + expect(mockProvider.postMessageToWebview).toHaveBeenCalledWith({ + type: "checkpointInitWarning", + checkpointWarning: "Checkpoint initialization failed after 10 seconds", + }) + expect(mockTask.enableCheckpoints).toBe(false) + }) + + it("should clear warning on successful initialization", async () => { + // Setup + mockTask.checkpointService = mockCheckpointService + mockTask.enableCheckpoints = true + + vi.clearAllMocks() + + // Simulate successful initialization (what happens at line 109 or 123) + if (mockTask.enableCheckpoints) { + const provider = mockTask.providerRef.deref() + provider?.postMessageToWebview({ + type: "checkpointInitWarning", + checkpointWarning: "", + }) + } + + // Verify warning was cleared + expect(mockProvider.postMessageToWebview).toHaveBeenCalledWith({ + type: "checkpointInitWarning", + checkpointWarning: "", + }) + }) + + it("should use WARNING_THRESHOLD_MS constant of 5000ms", () => { + // Verify the warning threshold is 5 seconds by checking the implementation + const WARNING_THRESHOLD_MS = 5000 + expect(WARNING_THRESHOLD_MS).toBe(5000) + expect(WARNING_THRESHOLD_MS / 1000).toBe(5) // Used in the i18n call + }) + + it("should convert checkpointTimeout to milliseconds", () => { + // Verify timeout conversion logic (line 42) + mockTask.checkpointTimeout = 15 + const checkpointTimeoutMs = mockTask.checkpointTimeout * 1000 + expect(checkpointTimeoutMs).toBe(15000) + + mockTask.checkpointTimeout = 10 + expect(mockTask.checkpointTimeout * 1000).toBe(10000) + + mockTask.checkpointTimeout = 60 + expect(mockTask.checkpointTimeout * 1000).toBe(60000) + }) + + it("should use correct i18n keys for warning messages", async () => { + const i18nModule = await import("../../../i18n") + vi.clearAllMocks() + + // Test warning message i18n key + const warningMessage = i18nModule.t("common:errors.wait_checkpoint_long_time", { timeout: 5 }) + expect(warningMessage).toBe("Checkpoint initialization is taking longer than 5 seconds...") + + // Test timeout error message i18n key + const errorMessage = i18nModule.t("common:errors.init_checkpoint_fail_long_time", { timeout: 30 }) + expect(errorMessage).toBe("Checkpoint initialization failed after 30 seconds") + }) + }) }) diff --git a/src/core/checkpoints/index.ts b/src/core/checkpoints/index.ts index e116e6e6e3f..eb0e6e8612d 100644 --- a/src/core/checkpoints/index.ts +++ b/src/core/checkpoints/index.ts @@ -28,10 +28,16 @@ function reportError(callsite: string, error: unknown) { } // kilocode_change end -export async function getCheckpointService( - task: Task, - { interval = 250, timeout = 15_000 }: { interval?: number; timeout?: number } = {}, -) { +const WARNING_THRESHOLD_MS = 5000 + +function sendCheckpointInitWarn(task: Task, type?: "WAIT_TIMEOUT" | "INIT_TIMEOUT", timeout?: number) { + task.providerRef.deref()?.postMessageToWebview({ + type: "checkpointInitWarning", + checkpointWarning: type && timeout ? { type, timeout } : undefined, + }) +} + +export async function getCheckpointService(task: Task, { interval = 250 }: { interval?: number } = {}) { if (!task.enableCheckpoints) { return undefined } @@ -42,6 +48,9 @@ export async function getCheckpointService( const provider = task.providerRef.deref() + // Get checkpoint timeout from task settings (converted to milliseconds) + const checkpointTimeoutMs = task.checkpointTimeout * 1000 + const log = (message: string) => { console.log(message) @@ -79,16 +88,32 @@ export async function getCheckpointService( } if (task.checkpointServiceInitializing) { + const checkpointInitStartTime = Date.now() + let warningShown = false + await pWaitFor( () => { - console.log("[Task#getCheckpointService] waiting for service to initialize") + const elapsed = Date.now() - checkpointInitStartTime + + // Show warning if we're past the threshold and haven't shown it yet + if (!warningShown && elapsed >= WARNING_THRESHOLD_MS) { + warningShown = true + sendCheckpointInitWarn(task, "WAIT_TIMEOUT", WARNING_THRESHOLD_MS / 1000) + } + + console.log( + `[Task#getCheckpointService] waiting for service to initialize (${Math.round(elapsed / 1000)}s)`, + ) return !!task.checkpointService && !!task?.checkpointService?.isInitialized }, - { interval, timeout }, + { interval, timeout: checkpointTimeoutMs }, ) if (!task?.checkpointService) { + sendCheckpointInitWarn(task, "INIT_TIMEOUT", task.checkpointTimeout) task.enableCheckpoints = false return undefined + } else { + sendCheckpointInitWarn(task) } return task.checkpointService } @@ -101,8 +126,14 @@ export async function getCheckpointService( task.checkpointServiceInitializing = true await checkGitInstallation(task, service, log, provider) task.checkpointService = service + if (task.enableCheckpoints) { + sendCheckpointInitWarn(task) + } return service } catch (err) { + if (err.name === "TimeoutError" && task.enableCheckpoints) { + sendCheckpointInitWarn(task, "INIT_TIMEOUT", task.checkpointTimeout) + } log(`[Task#getCheckpointService] ${err.message}`) task.enableCheckpoints = false reportError("Task#getCheckpointService", err) // kilocode_change @@ -146,6 +177,7 @@ async function checkGitInstallation( service.on("checkpoint", ({ fromHash: from, toHash: to, suppressMessage }) => { try { + sendCheckpointInitWarn(task) // Always update the current checkpoint hash in the webview, including the suppress flag provider?.postMessageToWebview({ type: "currentCheckpointUpdated", @@ -289,10 +321,16 @@ export async function checkpointRestore( } export type CheckpointDiffOptions = { - ts: number + ts?: number previousCommitHash?: string commitHash: string - mode: "full" | "checkpoint" + /** + * from-init: Compare from the first checkpoint to the selected checkpoint. + * checkpoint: Compare the selected checkpoint to the next checkpoint. + * to-current: Compare the selected checkpoint to the current workspace. + * full: Compare from the first checkpoint to the current workspace. + */ + mode: "from-init" | "checkpoint" | "to-current" | "full" } export async function checkpointDiff(task: Task, { ts, previousCommitHash, commitHash, mode }: CheckpointDiffOptions) { @@ -304,30 +342,57 @@ export async function checkpointDiff(task: Task, { ts, previousCommitHash, commi TelemetryService.instance.captureCheckpointDiffed(task.taskId) - let prevHash = commitHash - let nextHash: string | undefined = undefined + let fromHash: string | undefined + let toHash: string | undefined + let title: string - if (mode !== "full") { - const checkpoints = task.clineMessages.filter(({ say }) => say === "checkpoint_saved").map(({ text }) => text!) - const idx = checkpoints.indexOf(commitHash) - if (idx !== -1 && idx < checkpoints.length - 1) { - nextHash = checkpoints[idx + 1] - } else { - nextHash = undefined - } + const checkpoints = task.clineMessages.filter(({ say }) => say === "checkpoint_saved").map(({ text }) => text!) + + if (["from-init", "full"].includes(mode) && checkpoints.length < 1) { + vscode.window.showInformationMessage(t("common:errors.checkpoint_no_first")) + return + } + + const idx = checkpoints.indexOf(commitHash) + switch (mode) { + case "checkpoint": + fromHash = commitHash + toHash = idx !== -1 && idx < checkpoints.length - 1 ? checkpoints[idx + 1] : undefined + title = t("common:errors.checkpoint_diff_with_next") + break + case "from-init": + fromHash = checkpoints[0] + toHash = commitHash + title = t("common:errors.checkpoint_diff_since_first") + break + case "to-current": + fromHash = commitHash + toHash = undefined + title = t("common:errors.checkpoint_diff_to_current") + break + case "full": + fromHash = checkpoints[0] + toHash = undefined + title = t("common:errors.checkpoint_diff_since_first") + break + } + + if (!fromHash) { + vscode.window.showInformationMessage(t("common:errors.checkpoint_no_previous")) + return } try { - const changes = await service.getDiff({ from: prevHash, to: nextHash }) + const changes = await service.getDiff({ from: fromHash, to: toHash }) if (!changes?.length) { - vscode.window.showInformationMessage("No changes found.") + vscode.window.showInformationMessage(t("common:errors.checkpoint_no_changes")) return } await vscode.commands.executeCommand( "vscode.changes", - mode === "full" ? "Changes since task started" : "Changes compare with next checkpoint", + title, changes.map((change) => [ vscode.Uri.file(change.paths.absolute), vscode.Uri.parse(`${DIFF_VIEW_URI_SCHEME}:${change.paths.relative}`).with({ diff --git a/src/core/condense/__tests__/index.spec.ts b/src/core/condense/__tests__/index.spec.ts index fa59b5fba27..e3c0483dffc 100644 --- a/src/core/condense/__tests__/index.spec.ts +++ b/src/core/condense/__tests__/index.spec.ts @@ -102,7 +102,6 @@ describe("summarizeConversation", () => { info: { contextWindow: 8000, supportsImages: true, - supportsComputerUse: true, supportsVision: true, maxTokens: 4000, supportsPromptCache: true, @@ -577,7 +576,6 @@ describe("summarizeConversation with custom settings", () => { info: { contextWindow: 8000, supportsImages: true, - supportsComputerUse: true, supportsVision: true, maxTokens: 4000, supportsPromptCache: true, @@ -601,7 +599,6 @@ describe("summarizeConversation with custom settings", () => { info: { contextWindow: 4000, supportsImages: true, - supportsComputerUse: false, supportsVision: false, maxTokens: 2000, supportsPromptCache: false, diff --git a/src/core/config/ProviderSettingsManager.ts b/src/core/config/ProviderSettingsManager.ts index 3e1408bf685..14e3fde0fa1 100644 --- a/src/core/config/ProviderSettingsManager.ts +++ b/src/core/config/ProviderSettingsManager.ts @@ -11,12 +11,12 @@ import { DEFAULT_CONSECUTIVE_MISTAKE_LIMIT, getModelId, type ProviderName, - type RooModelId, } from "@roo-code/types" import { TelemetryService } from "@roo-code/telemetry" import { Mode, modes } from "../../shared/modes" import { migrateMorphApiKey } from "./kilocode/migrateMorphApiKey" +import { buildApiHandler } from "../../api" // Type-safe model migrations mapping type ModelMigrations = { @@ -25,7 +25,7 @@ type ModelMigrations = { const MODEL_MIGRATIONS: ModelMigrations = { roo: { - "roo/code-supernova": "roo/code-supernova-1-million" as RooModelId, + "roo/code-supernova": "roo/code-supernova-1-million", }, } as const satisfies ModelMigrations @@ -539,6 +539,31 @@ export class ProviderSettingsManager { for (const name in configs) { // Avoid leaking properties from other providers. configs[name] = discriminatedProviderSettingsWithIdSchema.parse(configs[name]) + + // If it has no apiProvider, skip filtering + if (!configs[name].apiProvider) { + continue + } + + // Try to build an API handler to get model information + try { + const apiHandler = buildApiHandler(configs[name]) + const modelInfo = apiHandler.getModel().info + + // Check if the model supports reasoning budgets + const supportsReasoningBudget = + modelInfo.supportsReasoningBudget || modelInfo.requiredReasoningBudget + + // If the model doesn't support reasoning budgets, remove the token fields + if (!supportsReasoningBudget) { + delete configs[name].modelMaxTokens + delete configs[name].modelMaxThinkingTokens + } + } catch (error) { + // If we can't build the API handler or get model info, skip filtering + // to avoid accidental data loss from incomplete configurations + console.warn(`Skipping token field filtering for config '${name}': ${error}`) + } } return profiles }) diff --git a/src/core/config/__tests__/importExport.spec.ts b/src/core/config/__tests__/importExport.spec.ts index 1a73136c710..07b27744f4d 100644 --- a/src/core/config/__tests__/importExport.spec.ts +++ b/src/core/config/__tests__/importExport.spec.ts @@ -17,6 +17,11 @@ import { safeWriteJson } from "../../../utils/safeWriteJson" import type { Mock } from "vitest" vi.mock("vscode", () => ({ + workspace: { + getConfiguration: vi.fn().mockReturnValue({ + get: vi.fn(), + }), + }, window: { showOpenDialog: vi.fn(), showSaveDialog: vi.fn(), @@ -58,6 +63,45 @@ vi.mock("os", () => ({ vi.mock("../../../utils/safeWriteJson") +// Mock buildApiHandler to avoid issues with provider instantiation in tests +vi.mock("../../../api", () => ({ + buildApiHandler: vi.fn().mockImplementation((config) => { + // Return different model info based on the provider and model + const getModelInfo = () => { + if (config.apiProvider === "claude-code") { + return { + id: config.apiModelId || "claude-sonnet-4-5", + info: { + supportsReasoningBudget: false, + requiredReasoningBudget: false, + }, + } + } + if (config.apiProvider === "anthropic" && config.apiModelId === "claude-3-5-sonnet-20241022") { + return { + id: "claude-3-5-sonnet-20241022", + info: { + supportsReasoningBudget: true, + requiredReasoningBudget: true, + }, + } + } + // Default fallback + return { + id: config.apiModelId || "claude-sonnet-4-5", + info: { + supportsReasoningBudget: false, + requiredReasoningBudget: false, + }, + } + } + + return { + getModel: vi.fn().mockReturnValue(getModelInfo()), + } + }), +})) + describe("importExport", () => { let mockProviderSettingsManager: ReturnType> let mockContextProxy: ReturnType> @@ -436,6 +480,71 @@ describe("importExport", () => { showErrorMessageSpy.mockRestore() }) + + it("should handle import when reasoning budget fields are missing from config", async () => { + // This test verifies that import works correctly when reasoning budget fields are not present + // Using claude-code provider which doesn't support reasoning budgets + + ;(vscode.window.showOpenDialog as Mock).mockResolvedValue([{ fsPath: "/mock/path/settings.json" }]) + + const mockFileContent = JSON.stringify({ + providerProfiles: { + currentApiConfigName: "claude-code-provider", + apiConfigs: { + "claude-code-provider": { + apiProvider: "claude-code" as ProviderName, + apiModelId: "claude-3-5-sonnet-20241022", + id: "claude-code-id", + apiKey: "test-key", + // No modelMaxTokens or modelMaxThinkingTokens fields + }, + }, + }, + globalSettings: { mode: "code", autoApprovalEnabled: true }, + }) + + ;(fs.readFile as Mock).mockResolvedValue(mockFileContent) + + const previousProviderProfiles = { + currentApiConfigName: "default", + apiConfigs: { default: { apiProvider: "anthropic" as ProviderName, id: "default-id" } }, + } + + mockProviderSettingsManager.export.mockResolvedValue(previousProviderProfiles) + mockProviderSettingsManager.listConfig.mockResolvedValue([ + { name: "claude-code-provider", id: "claude-code-id", apiProvider: "claude-code" as ProviderName }, + { name: "default", id: "default-id", apiProvider: "anthropic" as ProviderName }, + ]) + + mockContextProxy.export.mockResolvedValue({ mode: "code" }) + + const result = await importSettings({ + providerSettingsManager: mockProviderSettingsManager, + contextProxy: mockContextProxy, + customModesManager: mockCustomModesManager, + }) + + expect(result.success).toBe(true) + expect(fs.readFile).toHaveBeenCalledWith("/mock/path/settings.json", "utf-8") + expect(mockProviderSettingsManager.export).toHaveBeenCalled() + + expect(mockProviderSettingsManager.import).toHaveBeenCalledWith({ + currentApiConfigName: "claude-code-provider", + apiConfigs: { + default: { apiProvider: "anthropic" as ProviderName, id: "default-id" }, + "claude-code-provider": { + apiProvider: "claude-code" as ProviderName, + apiModelId: "claude-3-5-sonnet-20241022", + apiKey: "test-key", + id: "claude-code-id", + }, + }, + modeApiConfigs: {}, + }) + + expect(mockContextProxy.setValues).toHaveBeenCalledWith({ mode: "code", autoApprovalEnabled: true }) + expect(mockContextProxy.setValue).toHaveBeenCalledWith("currentApiConfigName", "claude-code-provider") + }) }) describe("exportSettings", () => { @@ -1610,5 +1719,78 @@ describe("importExport", () => { "https://custom-api.example.com/v1", ) }) + + it.each([ + { + testCase: "supportsReasoningBudget is false", + providerName: "claude-code-provider", + modelId: "claude-sonnet-4-5", + providerId: "claude-code-id", + }, + { + testCase: "requiredReasoningBudget is false", + providerName: "claude-code-provider-2", + modelId: "claude-sonnet-4-5", + providerId: "claude-code-id-2", + }, + { + testCase: "both supportsReasoningBudget and requiredReasoningBudget are false", + providerName: "claude-code-provider-3", + modelId: "claude-3-5-haiku-20241022", + providerId: "claude-code-id-3", + }, + ])( + "should exclude modelMaxTokens and modelMaxThinkingTokens when $testCase", + async ({ providerName, modelId, providerId }) => { + // This test verifies that token fields are excluded when model doesn't support reasoning budget + // Using claude-code provider which has supportsReasoningBudget: false and requiredReasoningBudget: false + + ;(vscode.window.showSaveDialog as Mock).mockResolvedValue({ + fsPath: "/mock/path/roo-code-settings.json", + }) + + // Use a real ProviderSettingsManager instance to test the actual filtering logic + const realProviderSettingsManager = new ProviderSettingsManager(mockExtensionContext) + + // Wait for initialization to complete + await realProviderSettingsManager.initialize() + + // Save a claude-code provider config with token fields + await realProviderSettingsManager.saveConfig(providerName, { + apiProvider: "claude-code" as ProviderName, + apiModelId: modelId, + id: providerId, + apiKey: "test-key", + modelMaxTokens: 4096, // This should be removed during export + modelMaxThinkingTokens: 2048, // This should be removed during export + }) + + // Set this as the current provider + await realProviderSettingsManager.activateProfile({ name: providerName }) + + const mockGlobalSettings = { + mode: "code", + autoApprovalEnabled: true, + } + + mockContextProxy.export.mockResolvedValue(mockGlobalSettings) + ;(fs.mkdir as Mock).mockResolvedValue(undefined) + + await exportSettings({ + providerSettingsManager: realProviderSettingsManager, + contextProxy: mockContextProxy, + }) + + // Get the exported data + const exportedData = (safeWriteJson as Mock).mock.calls[0][1] + + // Verify that token fields were excluded because reasoning budget is not supported/required + const provider = exportedData.providerProfiles.apiConfigs[providerName] + expect(provider).toBeDefined() + expect(provider.apiModelId).toBe(modelId) + expect("modelMaxTokens" in provider).toBe(false) // Should be excluded + expect("modelMaxThinkingTokens" in provider).toBe(false) // Should be excluded + }, + ) }) }) diff --git a/src/core/diff/strategies/__tests__/multi-search-replace-trailing-newline.spec.ts b/src/core/diff/strategies/__tests__/multi-search-replace-trailing-newline.spec.ts new file mode 100644 index 00000000000..95512193941 --- /dev/null +++ b/src/core/diff/strategies/__tests__/multi-search-replace-trailing-newline.spec.ts @@ -0,0 +1,163 @@ +import { MultiSearchReplaceDiffStrategy } from "../multi-search-replace" + +describe("MultiSearchReplaceDiffStrategy - trailing newline preservation", () => { + let strategy: MultiSearchReplaceDiffStrategy + + beforeEach(() => { + strategy = new MultiSearchReplaceDiffStrategy() + }) + + it("should preserve trailing newlines in SEARCH content with line numbers", async () => { + // This test verifies the fix for issue #8020 + // The regex should not consume trailing newlines, allowing stripLineNumbers to work correctly + const originalContent = `class Example { + constructor() { + this.value = 0; + } +}` + const diffContent = `<<<<<<< SEARCH +1 | class Example { +2 | constructor() { +3 | this.value = 0; +4 | } +5 | } +======= +class Example { + constructor() { + this.value = 1; + } +} +>>>>>>> REPLACE` + + const result = await strategy.applyDiff(originalContent, diffContent) + expect(result.success).toBe(true) + if (result.success) { + expect(result.content).toBe(`class Example { + constructor() { + this.value = 1; + } +}`) + } + }) + + it("should handle Windows line endings with trailing newlines and line numbers", async () => { + const originalContent = "function test() {\r\n return true;\r\n}\r\n" + const diffContent = `<<<<<<< SEARCH +1 | function test() { +2 | return true; +3 | } +======= +function test() { + return false; +} +>>>>>>> REPLACE` + + const result = await strategy.applyDiff(originalContent, diffContent) + expect(result.success).toBe(true) + if (result.success) { + // Should preserve Windows line endings + expect(result.content).toBe("function test() {\r\n return false;\r\n}\r\n") + } + }) + + it("should handle multiple search/replace blocks with trailing newlines", async () => { + const originalContent = `function one() { + return 1; +} + +function two() { + return 2; +}` + const diffContent = `<<<<<<< SEARCH +1 | function one() { +2 | return 1; +3 | } +======= +function one() { + return 10; +} +>>>>>>> REPLACE + +<<<<<<< SEARCH +5 | function two() { +6 | return 2; +7 | } +======= +function two() { + return 20; +} +>>>>>>> REPLACE` + + const result = await strategy.applyDiff(originalContent, diffContent) + expect(result.success).toBe(true) + if (result.success) { + expect(result.content).toBe(`function one() { + return 10; +} + +function two() { + return 20; +}`) + } + }) + + it("should handle content with line numbers at the last line", async () => { + // This specifically tests the scenario from the bug report + const originalContent = ` List addressInfoList = new ArrayList<>(CollectionUtils.size(repairInfoList) > 10 ? 10 + : CollectionUtils.size(repairInfoList) + CollectionUtils.size(homeAddressInfoList) + + CollectionUtils.size(idNoAddressInfoList) + CollectionUtils.size(workAddressInfoList) + + CollectionUtils.size(personIdentityInfoList));` + + const diffContent = `<<<<<<< SEARCH +1476 | List addressInfoList = new ArrayList<>(CollectionUtils.size(repairInfoList) > 10 ? 10 +1477 | : CollectionUtils.size(repairInfoList) + CollectionUtils.size(homeAddressInfoList) +1478 | + CollectionUtils.size(idNoAddressInfoList) + CollectionUtils.size(workAddressInfoList) +1479 | + CollectionUtils.size(personIdentityInfoList)); +======= + + // Filter addresses if optimization is enabled + if (isAddressDisplayOptimizeEnabled()) { + homeAddressInfoList = filterAddressesByThreeYearRule(homeAddressInfoList); + personIdentityInfoList = filterAddressesByThreeYearRule(personIdentityInfoList); + idNoAddressInfoList = filterAddressesByThreeYearRule(idNoAddressInfoList); + workAddressInfoList = filterAddressesByThreeYearRule(workAddressInfoList); + } + + List addressInfoList = new ArrayList<>(CollectionUtils.size(repairInfoList) > 10 ? 10 + : CollectionUtils.size(repairInfoList) + CollectionUtils.size(homeAddressInfoList) + + CollectionUtils.size(idNoAddressInfoList) + CollectionUtils.size(workAddressInfoList) + + CollectionUtils.size(personIdentityInfoList)); +>>>>>>> REPLACE` + + const result = await strategy.applyDiff(originalContent, diffContent) + expect(result.success).toBe(true) + if (result.success) { + expect(result.content).toContain("// Filter addresses if optimization is enabled") + expect(result.content).toContain("if (isAddressDisplayOptimizeEnabled())") + // Verify the last line doesn't have line numbers + expect(result.content).not.toContain("1488 |") + expect(result.content).not.toContain("1479 |") + } + }) + + it("should correctly strip line numbers even when last line has no trailing newline", async () => { + const originalContent = "line 1\nline 2\nline 3" // No trailing newline + const diffContent = `<<<<<<< SEARCH +1 | line 1 +2 | line 2 +3 | line 3 +======= +line 1 +modified line 2 +line 3 +>>>>>>> REPLACE` + + const result = await strategy.applyDiff(originalContent, diffContent) + expect(result.success).toBe(true) + if (result.success) { + expect(result.content).toBe("line 1\nmodified line 2\nline 3") + // Verify no line numbers remain + expect(result.content).not.toContain(" | ") + } + }) +}) diff --git a/src/core/environment/getEnvironmentDetails.ts b/src/core/environment/getEnvironmentDetails.ts index f3dbf3d54bb..2313d916c35 100644 --- a/src/core/environment/getEnvironmentDetails.ts +++ b/src/core/environment/getEnvironmentDetails.ts @@ -197,24 +197,32 @@ export async function getEnvironmentDetails(cline: Task, includeFileDetails: boo details += terminalDetails } - // Add current time information with timezone. - const now = new Date() - - const timeZone = Intl.DateTimeFormat().resolvedOptions().timeZone - const timeZoneOffset = -now.getTimezoneOffset() / 60 // Convert to hours and invert sign to match conventional notation - const timeZoneOffsetHours = Math.floor(Math.abs(timeZoneOffset)) - const timeZoneOffsetMinutes = Math.abs(Math.round((Math.abs(timeZoneOffset) - timeZoneOffsetHours) * 60)) - const timeZoneOffsetStr = `${timeZoneOffset >= 0 ? "+" : "-"}${timeZoneOffsetHours}:${timeZoneOffsetMinutes.toString().padStart(2, "0")}` - details += `\n\n# Current Time\nCurrent time in ISO 8601 UTC format: ${now.toISOString()}\nUser time zone: ${timeZone}, UTC${timeZoneOffsetStr}` + // Get settings for time and cost display + const { includeCurrentTime = true, includeCurrentCost = true } = state ?? {} + + // Add current time information with timezone (if enabled). + if (includeCurrentTime) { + const now = new Date() + + const timeZone = Intl.DateTimeFormat().resolvedOptions().timeZone + const timeZoneOffset = -now.getTimezoneOffset() / 60 // Convert to hours and invert sign to match conventional notation + const timeZoneOffsetHours = Math.floor(Math.abs(timeZoneOffset)) + const timeZoneOffsetMinutes = Math.abs(Math.round((Math.abs(timeZoneOffset) - timeZoneOffsetHours) * 60)) + const timeZoneOffsetStr = `${timeZoneOffset >= 0 ? "+" : "-"}${timeZoneOffsetHours}:${timeZoneOffsetMinutes.toString().padStart(2, "0")}` + details += `\n\n# Current Time\nCurrent time in ISO 8601 UTC format: ${now.toISOString()}\nUser time zone: ${timeZone}, UTC${timeZoneOffsetStr}` + } - // Add context tokens information. - const { contextTokens, totalCost } = getApiMetrics(cline.clineMessages) + // Add context tokens information (if enabled). + if (includeCurrentCost) { + const { totalCost } = getApiMetrics(cline.clineMessages) + details += `\n\n# Current Cost\n${totalCost !== null ? `$${totalCost.toFixed(2)}` : "(Not available)"}` + } // kilocode_change start // Be sure to fetch the model information before we need it. - if (cline.api instanceof OpenRouterHandler || cline.api instanceof NativeOllamaHandler) { + if (cline.api instanceof OpenRouterHandler || ("fetchModel" in cline.api && cline.api.fetchModel)) { try { - await cline.api.fetchModel() + await (cline.api.fetchModel as () => Promise)() } catch (e) { TelemetryService.instance.captureException(e, { context: "getEnvironmentDetails" }) await cline.say( @@ -226,9 +234,7 @@ export async function getEnvironmentDetails(cline: Task, includeFileDetails: boo } // kilocode_change end - const { id: modelId, info: modelInfo } = cline.api.getModel() - - details += `\n\n# Current Cost\n${totalCost !== null ? `$${totalCost.toFixed(2)}` : "(Not available)"}` + const { id: modelId } = cline.api.getModel() // Add current mode and any mode-specific warnings. const { diff --git a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/architect-mode-prompt.snap b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/architect-mode-prompt.snap index 96c46a980fb..ac2e21f8930 100644 --- a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/architect-mode-prompt.snap +++ b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/architect-mode-prompt.snap @@ -226,42 +226,6 @@ Example for appending to the end of file: -## search_and_replace -Description: Use this tool to find and replace specific text strings or patterns (using regex) within a file. It's suitable for targeted replacements across multiple locations within the file. Supports literal text and regex patterns, case sensitivity options, and optional line ranges. Shows a diff preview before applying changes. - -Required Parameters: -- path: The path of the file to modify (relative to the current workspace directory /test/path) -- search: The text or pattern to search for -- replace: The text to replace matches with - -Optional Parameters: -- start_line: Starting line number for restricted replacement (1-based) -- end_line: Ending line number for restricted replacement (1-based) -- use_regex: Set to "true" to treat search as a regex pattern (default: false) -- ignore_case: Set to "true" to ignore case when matching (default: false) - -Notes: -- When use_regex is true, the search parameter is treated as a regular expression pattern -- When ignore_case is true, the search is case-insensitive regardless of regex mode - -Examples: - -1. Simple text replacement: - -example.ts -oldText -newText - - -2. Case-insensitive regex pattern: - -example.ts -oldw+ -new$& -true -true - - ## ask_followup_question Description: Ask the user a question to gather additional information needed to complete the task. Use when you need clarification or more details to proceed effectively. @@ -465,9 +429,8 @@ RULES - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with `cd`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run `npm install` in a project outside of '/test/path', you would need to prepend with a `cd` i.e. pseudocode for this would be `cd (path to project) && (command, in this case npm install)`. - When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. - When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files), search_and_replace (for finding and replacing individual pieces of text). +- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files). - The insert_content tool adds lines of text to files at a specific line number, such as adding a new function to a JavaScript file or inserting a new route in a Python file. Use line number 0 to append at the end of the file, or any positive number to insert before that line. -- The search_and_replace tool finds and replaces text or regex in files. This tool allows you to search for a specific regex pattern or text and replace it with another value. Be cautious when using this tool to ensure you are replacing the correct text. It can support multiple operations at once. - You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files. - When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. diff --git a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/ask-mode-prompt.snap b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/ask-mode-prompt.snap index b53ee31cec6..cb1cbdcdd61 100644 --- a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/ask-mode-prompt.snap +++ b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/ask-mode-prompt.snap @@ -362,9 +362,8 @@ RULES - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with `cd`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run `npm install` in a project outside of '/test/path', you would need to prepend with a `cd` i.e. pseudocode for this would be `cd (path to project) && (command, in this case npm install)`. - When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. - When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files), search_and_replace (for finding and replacing individual pieces of text). +- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files). - The insert_content tool adds lines of text to files at a specific line number, such as adding a new function to a JavaScript file or inserting a new route in a Python file. Use line number 0 to append at the end of the file, or any positive number to insert before that line. -- The search_and_replace tool finds and replaces text or regex in files. This tool allows you to search for a specific regex pattern or text and replace it with another value. Be cautious when using this tool to ensure you are replacing the correct text. It can support multiple operations at once. - You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files. - When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. diff --git a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-disabled.snap b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-disabled.snap index 6be76d7724f..1940af838e1 100644 --- a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-disabled.snap +++ b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-disabled.snap @@ -225,42 +225,6 @@ Example for appending to the end of file: -## search_and_replace -Description: Use this tool to find and replace specific text strings or patterns (using regex) within a file. It's suitable for targeted replacements across multiple locations within the file. Supports literal text and regex patterns, case sensitivity options, and optional line ranges. Shows a diff preview before applying changes. - -Required Parameters: -- path: The path of the file to modify (relative to the current workspace directory /test/path) -- search: The text or pattern to search for -- replace: The text to replace matches with - -Optional Parameters: -- start_line: Starting line number for restricted replacement (1-based) -- end_line: Ending line number for restricted replacement (1-based) -- use_regex: Set to "true" to treat search as a regex pattern (default: false) -- ignore_case: Set to "true" to ignore case when matching (default: false) - -Notes: -- When use_regex is true, the search parameter is treated as a regular expression pattern -- When ignore_case is true, the search is case-insensitive regardless of regex mode - -Examples: - -1. Simple text replacement: - -example.ts -oldText -newText - - -2. Case-insensitive regex pattern: - -example.ts -oldw+ -new$& -true -true - - ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Prefer relative commands and paths that avoid location sensitivity for terminal consistency, e.g: `touch ./testdata/example.file`, `dir ./examples/model1/data/yaml`, or `go test ./cmd/front --config ./cmd/front/config.yml`. If directed by the user, you may open a terminal in a different directory by using the `cwd` parameter. Parameters: @@ -486,9 +450,8 @@ RULES - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with `cd`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run `npm install` in a project outside of '/test/path', you would need to prepend with a `cd` i.e. pseudocode for this would be `cd (path to project) && (command, in this case npm install)`. - When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. - When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files), search_and_replace (for finding and replacing individual pieces of text). +- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files). - The insert_content tool adds lines of text to files at a specific line number, such as adding a new function to a JavaScript file or inserting a new route in a Python file. Use line number 0 to append at the end of the file, or any positive number to insert before that line. -- The search_and_replace tool finds and replaces text or regex in files. This tool allows you to search for a specific regex pattern or text and replace it with another value. Be cautious when using this tool to ensure you are replacing the correct text. It can support multiple operations at once. - You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files. - When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. diff --git a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-enabled.snap b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-enabled.snap index fa19e0f710b..fe6a5c3d423 100644 --- a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-enabled.snap +++ b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/mcp-server-creation-enabled.snap @@ -226,42 +226,6 @@ Example for appending to the end of file: -## search_and_replace -Description: Use this tool to find and replace specific text strings or patterns (using regex) within a file. It's suitable for targeted replacements across multiple locations within the file. Supports literal text and regex patterns, case sensitivity options, and optional line ranges. Shows a diff preview before applying changes. - -Required Parameters: -- path: The path of the file to modify (relative to the current workspace directory /test/path) -- search: The text or pattern to search for -- replace: The text to replace matches with - -Optional Parameters: -- start_line: Starting line number for restricted replacement (1-based) -- end_line: Ending line number for restricted replacement (1-based) -- use_regex: Set to "true" to treat search as a regex pattern (default: false) -- ignore_case: Set to "true" to ignore case when matching (default: false) - -Notes: -- When use_regex is true, the search parameter is treated as a regular expression pattern -- When ignore_case is true, the search is case-insensitive regardless of regex mode - -Examples: - -1. Simple text replacement: - -example.ts -oldText -newText - - -2. Case-insensitive regex pattern: - -example.ts -oldw+ -new$& -true -true - - ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Prefer relative commands and paths that avoid location sensitivity for terminal consistency, e.g: `touch ./testdata/example.file`, `dir ./examples/model1/data/yaml`, or `go test ./cmd/front --config ./cmd/front/config.yml`. If directed by the user, you may open a terminal in a different directory by using the `cwd` parameter. Parameters: @@ -555,9 +519,8 @@ RULES - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with `cd`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run `npm install` in a project outside of '/test/path', you would need to prepend with a `cd` i.e. pseudocode for this would be `cd (path to project) && (command, in this case npm install)`. - When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. - When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files), search_and_replace (for finding and replacing individual pieces of text). +- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files). - The insert_content tool adds lines of text to files at a specific line number, such as adding a new function to a JavaScript file or inserting a new route in a Python file. Use line number 0 to append at the end of the file, or any positive number to insert before that line. -- The search_and_replace tool finds and replaces text or regex in files. This tool allows you to search for a specific regex pattern or text and replace it with another value. Be cautious when using this tool to ensure you are replacing the correct text. It can support multiple operations at once. - You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files. - When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. diff --git a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/partial-reads-enabled.snap b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/partial-reads-enabled.snap index 441254ace00..4c0e639dab0 100644 --- a/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/partial-reads-enabled.snap +++ b/src/core/prompts/__tests__/__snapshots__/add-custom-instructions/partial-reads-enabled.snap @@ -231,42 +231,6 @@ Example for appending to the end of file: -## search_and_replace -Description: Use this tool to find and replace specific text strings or patterns (using regex) within a file. It's suitable for targeted replacements across multiple locations within the file. Supports literal text and regex patterns, case sensitivity options, and optional line ranges. Shows a diff preview before applying changes. - -Required Parameters: -- path: The path of the file to modify (relative to the current workspace directory /test/path) -- search: The text or pattern to search for -- replace: The text to replace matches with - -Optional Parameters: -- start_line: Starting line number for restricted replacement (1-based) -- end_line: Ending line number for restricted replacement (1-based) -- use_regex: Set to "true" to treat search as a regex pattern (default: false) -- ignore_case: Set to "true" to ignore case when matching (default: false) - -Notes: -- When use_regex is true, the search parameter is treated as a regular expression pattern -- When ignore_case is true, the search is case-insensitive regardless of regex mode - -Examples: - -1. Simple text replacement: - -example.ts -oldText -newText - - -2. Case-insensitive regex pattern: - -example.ts -oldw+ -new$& -true -true - - ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Prefer relative commands and paths that avoid location sensitivity for terminal consistency, e.g: `touch ./testdata/example.file`, `dir ./examples/model1/data/yaml`, or `go test ./cmd/front --config ./cmd/front/config.yml`. If directed by the user, you may open a terminal in a different directory by using the `cwd` parameter. Parameters: @@ -492,9 +456,8 @@ RULES - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with `cd`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run `npm install` in a project outside of '/test/path', you would need to prepend with a `cd` i.e. pseudocode for this would be `cd (path to project) && (command, in this case npm install)`. - When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. - When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files), search_and_replace (for finding and replacing individual pieces of text). +- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files). - The insert_content tool adds lines of text to files at a specific line number, such as adding a new function to a JavaScript file or inserting a new route in a Python file. Use line number 0 to append at the end of the file, or any positive number to insert before that line. -- The search_and_replace tool finds and replaces text or regex in files. This tool allows you to search for a specific regex pattern or text and replace it with another value. Be cautious when using this tool to ensure you are replacing the correct text. It can support multiple operations at once. - You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files. - When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/consistent-system-prompt.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/consistent-system-prompt.snap index a3657083d48..e3e52261f54 100644 --- a/src/core/prompts/__tests__/__snapshots__/system-prompt/consistent-system-prompt.snap +++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/consistent-system-prompt.snap @@ -226,42 +226,6 @@ Example for appending to the end of file: -## search_and_replace -Description: Use this tool to find and replace specific text strings or patterns (using regex) within a file. It's suitable for targeted replacements across multiple locations within the file. Supports literal text and regex patterns, case sensitivity options, and optional line ranges. Shows a diff preview before applying changes. - -Required Parameters: -- path: The path of the file to modify (relative to the current workspace directory /test/path) -- search: The text or pattern to search for -- replace: The text to replace matches with - -Optional Parameters: -- start_line: Starting line number for restricted replacement (1-based) -- end_line: Ending line number for restricted replacement (1-based) -- use_regex: Set to "true" to treat search as a regex pattern (default: false) -- ignore_case: Set to "true" to ignore case when matching (default: false) - -Notes: -- When use_regex is true, the search parameter is treated as a regular expression pattern -- When ignore_case is true, the search is case-insensitive regardless of regex mode - -Examples: - -1. Simple text replacement: - -example.ts -oldText -newText - - -2. Case-insensitive regex pattern: - -example.ts -oldw+ -new$& -true -true - - ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Prefer relative commands and paths that avoid location sensitivity for terminal consistency, e.g: `touch ./testdata/example.file`, `dir ./examples/model1/data/yaml`, or `go test ./cmd/front --config ./cmd/front/config.yml`. If directed by the user, you may open a terminal in a different directory by using the `cwd` parameter. Parameters: @@ -487,9 +451,8 @@ RULES - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with `cd`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run `npm install` in a project outside of '/test/path', you would need to prepend with a `cd` i.e. pseudocode for this would be `cd (path to project) && (command, in this case npm install)`. - When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. - When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files), search_and_replace (for finding and replacing individual pieces of text). +- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files). - The insert_content tool adds lines of text to files at a specific line number, such as adding a new function to a JavaScript file or inserting a new route in a Python file. Use line number 0 to append at the end of the file, or any positive number to insert before that line. -- The search_and_replace tool finds and replaces text or regex in files. This tool allows you to search for a specific regex pattern or text and replace it with another value. Be cautious when using this tool to ensure you are replacing the correct text. It can support multiple operations at once. - You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files. - When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-computer-use-support.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-computer-use-support.snap index a4647a151d9..aca407bd2af 100644 --- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-computer-use-support.snap +++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-computer-use-support.snap @@ -226,42 +226,6 @@ Example for appending to the end of file: -## search_and_replace -Description: Use this tool to find and replace specific text strings or patterns (using regex) within a file. It's suitable for targeted replacements across multiple locations within the file. Supports literal text and regex patterns, case sensitivity options, and optional line ranges. Shows a diff preview before applying changes. - -Required Parameters: -- path: The path of the file to modify (relative to the current workspace directory /test/path) -- search: The text or pattern to search for -- replace: The text to replace matches with - -Optional Parameters: -- start_line: Starting line number for restricted replacement (1-based) -- end_line: Ending line number for restricted replacement (1-based) -- use_regex: Set to "true" to treat search as a regex pattern (default: false) -- ignore_case: Set to "true" to ignore case when matching (default: false) - -Notes: -- When use_regex is true, the search parameter is treated as a regular expression pattern -- When ignore_case is true, the search is case-insensitive regardless of regex mode - -Examples: - -1. Simple text replacement: - -example.ts -oldText -newText - - -2. Case-insensitive regex pattern: - -example.ts -oldw+ -new$& -true -true - - ## browser_action Description: Request to interact with a Puppeteer-controlled browser. Every action, except `close`, will be responded to with a screenshot of the browser's current state, along with any new console logs. You may only perform one browser action per message, and wait for the user's response including a screenshot and logs to determine the next action. - The sequence of actions **must always start with** launching the browser at a URL, and **must always end with** closing the browser. If you need to visit a new URL that is not possible to navigate to from the current webpage, you must first close the browser, then launch again at the new URL. @@ -542,9 +506,8 @@ RULES - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with `cd`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run `npm install` in a project outside of '/test/path', you would need to prepend with a `cd` i.e. pseudocode for this would be `cd (path to project) && (command, in this case npm install)`. - When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. - When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files), search_and_replace (for finding and replacing individual pieces of text). +- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files). - The insert_content tool adds lines of text to files at a specific line number, such as adding a new function to a JavaScript file or inserting a new route in a Python file. Use line number 0 to append at the end of the file, or any positive number to insert before that line. -- The search_and_replace tool finds and replaces text or regex in files. This tool allows you to search for a specific regex pattern or text and replace it with another value. Be cautious when using this tool to ensure you are replacing the correct text. It can support multiple operations at once. - You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files. - When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-false.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-false.snap index a3657083d48..e3e52261f54 100644 --- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-false.snap +++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-false.snap @@ -226,42 +226,6 @@ Example for appending to the end of file: -## search_and_replace -Description: Use this tool to find and replace specific text strings or patterns (using regex) within a file. It's suitable for targeted replacements across multiple locations within the file. Supports literal text and regex patterns, case sensitivity options, and optional line ranges. Shows a diff preview before applying changes. - -Required Parameters: -- path: The path of the file to modify (relative to the current workspace directory /test/path) -- search: The text or pattern to search for -- replace: The text to replace matches with - -Optional Parameters: -- start_line: Starting line number for restricted replacement (1-based) -- end_line: Ending line number for restricted replacement (1-based) -- use_regex: Set to "true" to treat search as a regex pattern (default: false) -- ignore_case: Set to "true" to ignore case when matching (default: false) - -Notes: -- When use_regex is true, the search parameter is treated as a regular expression pattern -- When ignore_case is true, the search is case-insensitive regardless of regex mode - -Examples: - -1. Simple text replacement: - -example.ts -oldText -newText - - -2. Case-insensitive regex pattern: - -example.ts -oldw+ -new$& -true -true - - ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Prefer relative commands and paths that avoid location sensitivity for terminal consistency, e.g: `touch ./testdata/example.file`, `dir ./examples/model1/data/yaml`, or `go test ./cmd/front --config ./cmd/front/config.yml`. If directed by the user, you may open a terminal in a different directory by using the `cwd` parameter. Parameters: @@ -487,9 +451,8 @@ RULES - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with `cd`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run `npm install` in a project outside of '/test/path', you would need to prepend with a `cd` i.e. pseudocode for this would be `cd (path to project) && (command, in this case npm install)`. - When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. - When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files), search_and_replace (for finding and replacing individual pieces of text). +- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files). - The insert_content tool adds lines of text to files at a specific line number, such as adding a new function to a JavaScript file or inserting a new route in a Python file. Use line number 0 to append at the end of the file, or any positive number to insert before that line. -- The search_and_replace tool finds and replaces text or regex in files. This tool allows you to search for a specific regex pattern or text and replace it with another value. Be cautious when using this tool to ensure you are replacing the correct text. It can support multiple operations at once. - You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files. - When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-true.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-true.snap index e92e02cf590..2975a023e89 100644 --- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-true.snap +++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-true.snap @@ -314,42 +314,6 @@ Example for appending to the end of file: -## search_and_replace -Description: Use this tool to find and replace specific text strings or patterns (using regex) within a file. It's suitable for targeted replacements across multiple locations within the file. Supports literal text and regex patterns, case sensitivity options, and optional line ranges. Shows a diff preview before applying changes. - -Required Parameters: -- path: The path of the file to modify (relative to the current workspace directory /test/path) -- search: The text or pattern to search for -- replace: The text to replace matches with - -Optional Parameters: -- start_line: Starting line number for restricted replacement (1-based) -- end_line: Ending line number for restricted replacement (1-based) -- use_regex: Set to "true" to treat search as a regex pattern (default: false) -- ignore_case: Set to "true" to ignore case when matching (default: false) - -Notes: -- When use_regex is true, the search parameter is treated as a regular expression pattern -- When ignore_case is true, the search is case-insensitive regardless of regex mode - -Examples: - -1. Simple text replacement: - -example.ts -oldText -newText - - -2. Case-insensitive regex pattern: - -example.ts -oldw+ -new$& -true -true - - ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Prefer relative commands and paths that avoid location sensitivity for terminal consistency, e.g: `touch ./testdata/example.file`, `dir ./examples/model1/data/yaml`, or `go test ./cmd/front --config ./cmd/front/config.yml`. If directed by the user, you may open a terminal in a different directory by using the `cwd` parameter. Parameters: @@ -575,9 +539,8 @@ RULES - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with `cd`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run `npm install` in a project outside of '/test/path', you would need to prepend with a `cd` i.e. pseudocode for this would be `cd (path to project) && (command, in this case npm install)`. - When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using apply_diff or write_to_file to make informed changes. - When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- For editing files, you have access to these tools: apply_diff (for surgical edits - targeted changes to specific lines or functions), write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files), search_and_replace (for finding and replacing individual pieces of text). +- For editing files, you have access to these tools: apply_diff (for surgical edits - targeted changes to specific lines or functions), write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files). - The insert_content tool adds lines of text to files at a specific line number, such as adding a new function to a JavaScript file or inserting a new route in a Python file. Use line number 0 to append at the end of the file, or any positive number to insert before that line. -- The search_and_replace tool finds and replaces text or regex in files. This tool allows you to search for a specific regex pattern or text and replace it with another value. Be cautious when using this tool to ensure you are replacing the correct text. It can support multiple operations at once. - You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files. - When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-undefined.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-undefined.snap index a3657083d48..e3e52261f54 100644 --- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-undefined.snap +++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-diff-enabled-undefined.snap @@ -226,42 +226,6 @@ Example for appending to the end of file: -## search_and_replace -Description: Use this tool to find and replace specific text strings or patterns (using regex) within a file. It's suitable for targeted replacements across multiple locations within the file. Supports literal text and regex patterns, case sensitivity options, and optional line ranges. Shows a diff preview before applying changes. - -Required Parameters: -- path: The path of the file to modify (relative to the current workspace directory /test/path) -- search: The text or pattern to search for -- replace: The text to replace matches with - -Optional Parameters: -- start_line: Starting line number for restricted replacement (1-based) -- end_line: Ending line number for restricted replacement (1-based) -- use_regex: Set to "true" to treat search as a regex pattern (default: false) -- ignore_case: Set to "true" to ignore case when matching (default: false) - -Notes: -- When use_regex is true, the search parameter is treated as a regular expression pattern -- When ignore_case is true, the search is case-insensitive regardless of regex mode - -Examples: - -1. Simple text replacement: - -example.ts -oldText -newText - - -2. Case-insensitive regex pattern: - -example.ts -oldw+ -new$& -true -true - - ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Prefer relative commands and paths that avoid location sensitivity for terminal consistency, e.g: `touch ./testdata/example.file`, `dir ./examples/model1/data/yaml`, or `go test ./cmd/front --config ./cmd/front/config.yml`. If directed by the user, you may open a terminal in a different directory by using the `cwd` parameter. Parameters: @@ -487,9 +451,8 @@ RULES - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with `cd`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run `npm install` in a project outside of '/test/path', you would need to prepend with a `cd` i.e. pseudocode for this would be `cd (path to project) && (command, in this case npm install)`. - When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. - When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files), search_and_replace (for finding and replacing individual pieces of text). +- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files). - The insert_content tool adds lines of text to files at a specific line number, such as adding a new function to a JavaScript file or inserting a new route in a Python file. Use line number 0 to append at the end of the file, or any positive number to insert before that line. -- The search_and_replace tool finds and replaces text or regex in files. This tool allows you to search for a specific regex pattern or text and replace it with another value. Be cautious when using this tool to ensure you are replacing the correct text. It can support multiple operations at once. - You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files. - When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-different-viewport-size.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-different-viewport-size.snap index 4618f522417..e3e52261f54 100644 --- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-different-viewport-size.snap +++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-different-viewport-size.snap @@ -27,7 +27,7 @@ Always use the actual tool name as the XML tag name for proper parsing and execu # Tools ## read_file -Description: Request to read the contents of one or more files. The tool outputs line-numbered content (e.g. "1 | const x = 1") for easy reference when creating diffs or discussing code. Supports text extraction from .pdf and .docx and .ipynb and .xlsx and .png and .jpg and .jpeg and .gif and .webp and .svg and .bmp and .ico and .tiff and .tif and .avif files, but may not handle other binary files properly. +Description: Request to read the contents of one or more files. The tool outputs line-numbered content (e.g. "1 | const x = 1") for easy reference when creating diffs or discussing code. Supports text extraction from .pdf and .docx and .ipynb and .xlsx files, but may not handle other binary files properly. **IMPORTANT: You can read a maximum of 5 files in a single request.** If you need to read more files, use multiple sequential read_file requests. @@ -226,95 +226,6 @@ Example for appending to the end of file: -## search_and_replace -Description: Use this tool to find and replace specific text strings or patterns (using regex) within a file. It's suitable for targeted replacements across multiple locations within the file. Supports literal text and regex patterns, case sensitivity options, and optional line ranges. Shows a diff preview before applying changes. - -Required Parameters: -- path: The path of the file to modify (relative to the current workspace directory /test/path) -- search: The text or pattern to search for -- replace: The text to replace matches with - -Optional Parameters: -- start_line: Starting line number for restricted replacement (1-based) -- end_line: Ending line number for restricted replacement (1-based) -- use_regex: Set to "true" to treat search as a regex pattern (default: false) -- ignore_case: Set to "true" to ignore case when matching (default: false) - -Notes: -- When use_regex is true, the search parameter is treated as a regular expression pattern -- When ignore_case is true, the search is case-insensitive regardless of regex mode - -Examples: - -1. Simple text replacement: - -example.ts -oldText -newText - - -2. Case-insensitive regex pattern: - -example.ts -oldw+ -new$& -true -true - - -## browser_action -Description: Request to interact with a Puppeteer-controlled browser. Every action, except `close`, will be responded to with a screenshot of the browser's current state, along with any new console logs. You may only perform one browser action per message, and wait for the user's response including a screenshot and logs to determine the next action. -- The sequence of actions **must always start with** launching the browser at a URL, and **must always end with** closing the browser. If you need to visit a new URL that is not possible to navigate to from the current webpage, you must first close the browser, then launch again at the new URL. -- While the browser is active, only the `browser_action` tool can be used. No other tools should be called during this time. You may proceed to use other tools only after closing the browser. For example if you run into an error and need to fix a file, you must close the browser, then use other tools to make the necessary changes, then re-launch the browser to verify the result. -- The browser window has a resolution of **900x600** pixels. When performing any click actions, ensure the coordinates are within this resolution range. -- Before clicking on any elements such as icons, links, or buttons, you must consult the provided screenshot of the page to determine the coordinates of the element. The click should be targeted at the **center of the element**, not on its edges. -Parameters: -- action: (required) The action to perform. The available actions are: - * launch: Launch a new Puppeteer-controlled browser instance at the specified URL. This **must always be the first action**. - - Use with the `url` parameter to provide the URL. - - Ensure the URL is valid and includes the appropriate protocol (e.g. http://localhost:3000/page, file:///path/to/file.html, etc.) - * hover: Move the cursor to a specific x,y coordinate. - - Use with the `coordinate` parameter to specify the location. - - Always move to the center of an element (icon, button, link, etc.) based on coordinates derived from a screenshot. - * click: Click at a specific x,y coordinate. - - Use with the `coordinate` parameter to specify the location. - - Always click in the center of an element (icon, button, link, etc.) based on coordinates derived from a screenshot. - * type: Type a string of text on the keyboard. You might use this after clicking on a text field to input text. - - Use with the `text` parameter to provide the string to type. - * resize: Resize the viewport to a specific w,h size. - - Use with the `size` parameter to specify the new size. - * scroll_down: Scroll down the page by one page height. - * scroll_up: Scroll up the page by one page height. - * close: Close the Puppeteer-controlled browser instance. This **must always be the final browser action**. - - Example: `close` -- url: (optional) Use this for providing the URL for the `launch` action. - * Example: https://example.com -- coordinate: (optional) The X and Y coordinates for the `click` and `hover` actions. Coordinates should be within the **900x600** resolution. - * Example: 450,300 -- size: (optional) The width and height for the `resize` action. - * Example: 1280,720 -- text: (optional) Use this for providing the text for the `type` action. - * Example: Hello, world! -Usage: - -Action to perform (e.g., launch, click, type, scroll_down, scroll_up, close) -URL to launch the browser at (optional) -x,y coordinates (optional) -Text to type (optional) - - -Example: Requesting to launch a browser at https://example.com - -launch -https://example.com - - -Example: Requesting to click on the element at coordinates 450,300 - -click -450,300 - - ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Prefer relative commands and paths that avoid location sensitivity for terminal consistency, e.g: `touch ./testdata/example.file`, `dir ./examples/model1/data/yaml`, or `go test ./cmd/front --config ./cmd/front/config.yml`. If directed by the user, you may open a terminal in a different directory by using the `cwd` parameter. Parameters: @@ -516,14 +427,12 @@ By waiting for and carefully considering the user's response after each tool use CAPABILITIES -- You have access to tools that let you execute CLI commands on the user's computer, list files, view source code definitions, regex search, use the browser, read and write files, and ask follow-up questions. These tools help you effectively accomplish a wide range of tasks, such as writing code, making edits or improvements to existing files, understanding the current state of a project, performing system operations, and much more. +- You have access to tools that let you execute CLI commands on the user's computer, list files, view source code definitions, regex search, read and write files, and ask follow-up questions. These tools help you effectively accomplish a wide range of tasks, such as writing code, making edits or improvements to existing files, understanding the current state of a project, performing system operations, and much more. - When the user initially gives you a task, a recursive list of all filepaths in the current workspace directory ('/test/path') will be included in environment_details. This provides an overview of the project's file structure, offering key insights into the project from directory/file names (how developers conceptualize and organize their code) and file extensions (the language used). This can also guide decision-making on which files to explore further. If you need to further explore directories such as outside the current workspace directory, you can use the list_files tool. If you pass 'true' for the recursive parameter, it will list files recursively. Otherwise, it will list files at the top level, which is better suited for generic directories where you don't necessarily need the nested structure, like the Desktop. - You can use search_files to perform regex searches across files in a specified directory, outputting context-rich results that include surrounding lines. This is particularly useful for understanding code patterns, finding specific implementations, or identifying areas that need refactoring. - You can use the list_code_definition_names tool to get an overview of source code definitions for all files at the top level of a specified directory. This can be particularly useful when you need to understand the broader context and relationships between certain parts of the code. You may need to call this tool multiple times to understand various parts of the codebase related to the task. - For example, when asked to make edits or improvements you might analyze the file structure in the initial environment_details to get an overview of the project, then use list_code_definition_names to get further insight using source code definitions for files located in relevant directories, then read_file to examine the contents of relevant files, analyze the code and suggest improvements or make necessary edits, then use the write_to_file tool to apply the changes. If you refactored code that could affect other parts of the codebase, you could use search_files to ensure you update other files as needed. - You can use the execute_command tool to run commands on the user's computer whenever you feel it can help accomplish the user's task. When you need to execute a CLI command, you must provide a clear explanation of what the command does. Prefer to execute complex CLI commands over creating executable scripts, since they are more flexible and easier to run. Interactive and long-running commands are allowed, since the commands are run in the user's VSCode terminal. The user may keep commands running in the background and you will be kept updated on their status along the way. Each command you execute is run in a new terminal instance. -- You can use the browser_action tool to interact with websites (including html files and locally running development servers) through a Puppeteer-controlled browser when you feel it is necessary in accomplishing the user's task. This tool is particularly useful for web development tasks as it allows you to launch a browser, navigate to pages, interact with elements through clicks and keyboard input, and capture the results through screenshots and console logs. This tool may be useful at key stages of web development tasks-such as after implementing new features, making substantial changes, when troubleshooting issues, or to verify the result of your work. You can analyze the provided screenshots to ensure correct rendering or identify errors, and review console logs for runtime issues. - - For example, if asked to add a component to a react website, you might create the necessary files, use execute_command to run the site locally, then use browser_action to launch the browser, navigate to the local server, and verify the component renders & functions correctly before closing the browser. ==== @@ -542,9 +451,8 @@ RULES - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with `cd`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run `npm install` in a project outside of '/test/path', you would need to prepend with a `cd` i.e. pseudocode for this would be `cd (path to project) && (command, in this case npm install)`. - When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. - When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files), search_and_replace (for finding and replacing individual pieces of text). +- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files). - The insert_content tool adds lines of text to files at a specific line number, such as adding a new function to a JavaScript file or inserting a new route in a Python file. Use line number 0 to append at the end of the file, or any positive number to insert before that line. -- The search_and_replace tool finds and replaces text or regex in files. This tool allows you to search for a specific regex pattern or text and replace it with another value. Be cautious when using this tool to ensure you are replacing the correct text. It can support multiple operations at once. - You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files. - When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. @@ -556,14 +464,13 @@ RULES - When executing commands, if you don't see the expected output, assume the terminal executed the command successfully and proceed with the task. The user's terminal may be unable to stream the output back properly. If you absolutely need to see the actual terminal output, use the ask_followup_question tool to request the user to copy and paste it back to you. - The user may provide a file's contents directly in their message, in which case you shouldn't use the read_file tool to get the file contents again since you already have it. - Your goal is to try to accomplish the user's task, NOT engage in a back and forth conversation. -- The user may ask generic non-development tasks, such as "what's the latest news" or "look up the weather in San Diego", in which case you might use the browser_action tool to complete the task if it makes sense to do so, rather than trying to create a website or using curl to answer the question. However, if an available MCP server tool or resource can be used instead, you should prefer to use it over browser_action. - NEVER end attempt_completion result with a question or request to engage in further conversation! Formulate the end of your result in a way that is final and does not require further input from the user. - You are STRICTLY FORBIDDEN from starting your messages with "Great", "Certainly", "Okay", "Sure". You should NOT be conversational in your responses, but rather direct and to the point. For example you should NOT say "Great, I've updated the CSS" but instead something like "I've updated the CSS". It is important you be clear and technical in your messages. - When presented with images, utilize your vision capabilities to thoroughly examine them and extract meaningful information. Incorporate these insights into your thought process as you accomplish the user's task. - At the end of each user message, you will automatically receive environment_details. This information is not written by the user themselves, but is auto-generated to provide potentially relevant context about the project structure and environment. While this information can be valuable for understanding the project context, do not treat it as a direct part of the user's request or response. Use it to inform your actions and decisions, but don't assume the user is explicitly asking about or referring to this information unless they clearly do so in their message. When using environment_details, explain your actions clearly to ensure the user understands, as they may not be aware of these details. - Before executing commands, check the "Actively Running Terminals" section in environment_details. If present, consider how these active processes might impact your task. For example, if a local development server is already running, you wouldn't need to start it again. If no active terminals are listed, proceed with command execution as normal. - MCP operations should be used one at a time, similar to other tool usage. Wait for confirmation of success before proceeding with additional operations. -- It is critical you wait for the user's response after each tool use, in order to confirm the success of the tool use. For example, if asked to make a todo app, you would create a file, wait for the user's response it was created successfully, then create another file if needed, wait for the user's response it was created successfully, etc. Then if you want to test your work, you might use browser_action to launch the site, wait for the user's response confirming the site was launched along with a screenshot, then perhaps e.g., click a button to test functionality if needed, wait for the user's response confirming the button was clicked along with a screenshot of the new state, before finally closing the browser. +- It is critical you wait for the user's response after each tool use, in order to confirm the success of the tool use. For example, if asked to make a todo app, you would create a file, wait for the user's response it was created successfully, then create another file if needed, wait for the user's response it was created successfully, etc. ==== diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-mcp-hub-provided.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-mcp-hub-provided.snap index fa19e0f710b..fe6a5c3d423 100644 --- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-mcp-hub-provided.snap +++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-mcp-hub-provided.snap @@ -226,42 +226,6 @@ Example for appending to the end of file: -## search_and_replace -Description: Use this tool to find and replace specific text strings or patterns (using regex) within a file. It's suitable for targeted replacements across multiple locations within the file. Supports literal text and regex patterns, case sensitivity options, and optional line ranges. Shows a diff preview before applying changes. - -Required Parameters: -- path: The path of the file to modify (relative to the current workspace directory /test/path) -- search: The text or pattern to search for -- replace: The text to replace matches with - -Optional Parameters: -- start_line: Starting line number for restricted replacement (1-based) -- end_line: Ending line number for restricted replacement (1-based) -- use_regex: Set to "true" to treat search as a regex pattern (default: false) -- ignore_case: Set to "true" to ignore case when matching (default: false) - -Notes: -- When use_regex is true, the search parameter is treated as a regular expression pattern -- When ignore_case is true, the search is case-insensitive regardless of regex mode - -Examples: - -1. Simple text replacement: - -example.ts -oldText -newText - - -2. Case-insensitive regex pattern: - -example.ts -oldw+ -new$& -true -true - - ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Prefer relative commands and paths that avoid location sensitivity for terminal consistency, e.g: `touch ./testdata/example.file`, `dir ./examples/model1/data/yaml`, or `go test ./cmd/front --config ./cmd/front/config.yml`. If directed by the user, you may open a terminal in a different directory by using the `cwd` parameter. Parameters: @@ -555,9 +519,8 @@ RULES - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with `cd`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run `npm install` in a project outside of '/test/path', you would need to prepend with a `cd` i.e. pseudocode for this would be `cd (path to project) && (command, in this case npm install)`. - When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. - When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files), search_and_replace (for finding and replacing individual pieces of text). +- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files). - The insert_content tool adds lines of text to files at a specific line number, such as adding a new function to a JavaScript file or inserting a new route in a Python file. Use line number 0 to append at the end of the file, or any positive number to insert before that line. -- The search_and_replace tool finds and replaces text or regex in files. This tool allows you to search for a specific regex pattern or text and replace it with another value. Be cautious when using this tool to ensure you are replacing the correct text. It can support multiple operations at once. - You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files. - When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. diff --git a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-undefined-mcp-hub.snap b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-undefined-mcp-hub.snap index a3657083d48..e3e52261f54 100644 --- a/src/core/prompts/__tests__/__snapshots__/system-prompt/with-undefined-mcp-hub.snap +++ b/src/core/prompts/__tests__/__snapshots__/system-prompt/with-undefined-mcp-hub.snap @@ -226,42 +226,6 @@ Example for appending to the end of file: -## search_and_replace -Description: Use this tool to find and replace specific text strings or patterns (using regex) within a file. It's suitable for targeted replacements across multiple locations within the file. Supports literal text and regex patterns, case sensitivity options, and optional line ranges. Shows a diff preview before applying changes. - -Required Parameters: -- path: The path of the file to modify (relative to the current workspace directory /test/path) -- search: The text or pattern to search for -- replace: The text to replace matches with - -Optional Parameters: -- start_line: Starting line number for restricted replacement (1-based) -- end_line: Ending line number for restricted replacement (1-based) -- use_regex: Set to "true" to treat search as a regex pattern (default: false) -- ignore_case: Set to "true" to ignore case when matching (default: false) - -Notes: -- When use_regex is true, the search parameter is treated as a regular expression pattern -- When ignore_case is true, the search is case-insensitive regardless of regex mode - -Examples: - -1. Simple text replacement: - -example.ts -oldText -newText - - -2. Case-insensitive regex pattern: - -example.ts -oldw+ -new$& -true -true - - ## execute_command Description: Request to execute a CLI command on the system. Use this when you need to perform system operations or run specific commands to accomplish any step in the user's task. You must tailor your command to the user's system and provide a clear explanation of what the command does. For command chaining, use the appropriate chaining syntax for the user's shell. Prefer to execute complex CLI commands over creating executable scripts, as they are more flexible and easier to run. Prefer relative commands and paths that avoid location sensitivity for terminal consistency, e.g: `touch ./testdata/example.file`, `dir ./examples/model1/data/yaml`, or `go test ./cmd/front --config ./cmd/front/config.yml`. If directed by the user, you may open a terminal in a different directory by using the `cwd` parameter. Parameters: @@ -487,9 +451,8 @@ RULES - Before using the execute_command tool, you must first think about the SYSTEM INFORMATION context provided to understand the user's environment and tailor your commands to ensure they are compatible with their system. You must also consider if the command you need to run should be executed in a specific directory outside of the current working directory '/test/path', and if so prepend with `cd`'ing into that directory && then executing the command (as one command since you are stuck operating from '/test/path'). For example, if you needed to run `npm install` in a project outside of '/test/path', you would need to prepend with a `cd` i.e. pseudocode for this would be `cd (path to project) && (command, in this case npm install)`. - When using the search_files tool, craft your regex patterns carefully to balance specificity and flexibility. Based on the user's task you may use it to find code patterns, TODO comments, function definitions, or any text-based information across the project. The results include context, so analyze the surrounding code to better understand the matches. Leverage the search_files tool in combination with other tools for more comprehensive analysis. For example, use it to find specific code patterns, then use read_file to examine the full context of interesting matches before using write_to_file to make informed changes. - When creating a new project (such as an app, website, or any software project), organize all new files within a dedicated project directory unless the user specifies otherwise. Use appropriate file paths when writing files, as the write_to_file tool will automatically create any necessary directories. Structure the project logically, adhering to best practices for the specific type of project being created. Unless otherwise specified, new projects should be easily run without additional setup, for example most projects can be built in HTML, CSS, and JavaScript - which you can open in a browser. -- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files), search_and_replace (for finding and replacing individual pieces of text). +- For editing files, you have access to these tools: write_to_file (for creating new files or complete file rewrites), insert_content (for adding lines to files). - The insert_content tool adds lines of text to files at a specific line number, such as adding a new function to a JavaScript file or inserting a new route in a Python file. Use line number 0 to append at the end of the file, or any positive number to insert before that line. -- The search_and_replace tool finds and replaces text or regex in files. This tool allows you to search for a specific regex pattern or text and replace it with another value. Be cautious when using this tool to ensure you are replacing the correct text. It can support multiple operations at once. - You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files. - When using the write_to_file tool to modify a file, use the tool directly with the desired content. You do not need to display the content before using the tool. ALWAYS provide the COMPLETE file content in your response. This is NON-NEGOTIABLE. Partial updates or placeholders like '// rest of code unchanged' are STRICTLY FORBIDDEN. You MUST include ALL parts of the file, even if they haven't been modified. Failure to do so will result in incomplete or broken code, severely impacting the user's project. - Some modes have restrictions on which files they can edit. If you attempt to edit a restricted file, the operation will be rejected with a FileRestrictionError that will specify which file patterns are allowed for the current mode. diff --git a/src/core/prompts/__tests__/add-custom-instructions.spec.ts b/src/core/prompts/__tests__/add-custom-instructions.spec.ts index 5097685e3bc..c49baf5dea4 100644 --- a/src/core/prompts/__tests__/add-custom-instructions.spec.ts +++ b/src/core/prompts/__tests__/add-custom-instructions.spec.ts @@ -193,7 +193,7 @@ describe("addCustomInstructions", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, // supportsImages undefined, // mcpHub undefined, // diffStrategy undefined, // browserViewportSize @@ -216,7 +216,7 @@ describe("addCustomInstructions", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, // supportsImages undefined, // mcpHub undefined, // diffStrategy undefined, // browserViewportSize @@ -241,7 +241,7 @@ describe("addCustomInstructions", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, // supportsImages mockMcpHub, // mcpHub undefined, // diffStrategy undefined, // browserViewportSize @@ -267,7 +267,7 @@ describe("addCustomInstructions", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, // supportsImages mockMcpHub, // mcpHub undefined, // diffStrategy undefined, // browserViewportSize @@ -291,7 +291,7 @@ describe("addCustomInstructions", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, // supportsImages undefined, // mcpHub undefined, // diffStrategy undefined, // browserViewportSize diff --git a/src/core/prompts/__tests__/custom-system-prompt.spec.ts b/src/core/prompts/__tests__/custom-system-prompt.spec.ts index 2b5f5907f08..eeaa9232b6b 100644 --- a/src/core/prompts/__tests__/custom-system-prompt.spec.ts +++ b/src/core/prompts/__tests__/custom-system-prompt.spec.ts @@ -96,7 +96,7 @@ describe("File-Based Custom System Prompt", () => { const prompt = await SYSTEM_PROMPT( mockContext, "test/path", // Using a relative path without leading slash - false, // supportsComputerUse + false, // supportsImages undefined, // mcpHub undefined, // diffStrategy undefined, // browserViewportSize @@ -135,7 +135,7 @@ describe("File-Based Custom System Prompt", () => { const prompt = await SYSTEM_PROMPT( mockContext, "test/path", // Using a relative path without leading slash - false, // supportsComputerUse + false, // supportsImages undefined, // mcpHub undefined, // diffStrategy undefined, // browserViewportSize @@ -183,7 +183,7 @@ describe("File-Based Custom System Prompt", () => { const prompt = await SYSTEM_PROMPT( mockContext, "test/path", // Using a relative path without leading slash - false, // supportsComputerUse + false, // supportsImages undefined, // mcpHub undefined, // diffStrategy undefined, // browserViewportSize diff --git a/src/core/prompts/__tests__/kilocode/system-prompt-morph.spec.ts b/src/core/prompts/__tests__/kilocode/system-prompt-morph.spec.ts index b44c12c44ee..7e9028e5225 100644 --- a/src/core/prompts/__tests__/kilocode/system-prompt-morph.spec.ts +++ b/src/core/prompts/__tests__/kilocode/system-prompt-morph.spec.ts @@ -216,13 +216,9 @@ describe("SYSTEM_PROMPT", () => { expect(prompt).not.toContain("## apply_diff") expect(prompt).not.toContain("## write_to_file") expect(prompt).not.toContain("## insert_content") - expect(prompt).not.toContain("## search_and_replace") // Should contain Fast Apply-specific instructions expect(prompt).toContain("FastApply is enabled") - expect(prompt).toContain( - "Traditional editing tools (apply_diff, write_to_file, insert_content, search_and_replace) are disabled", - ) expect(prompt).toContain("ONLY use the edit_file tool for file modifications") }) @@ -257,13 +253,9 @@ describe("SYSTEM_PROMPT", () => { expect(prompt).toContain("## apply_diff") expect(prompt).toContain("## write_to_file") expect(prompt).toContain("## insert_content") - expect(prompt).toContain("## search_and_replace") // Should NOT contain Fast Apply-specific instructions expect(prompt).not.toContain("FastApply is enabled") - expect(prompt).not.toContain( - "Traditional editing tools (apply_diff, write_to_file, insert_content, search_and_replace) are disabled", - ) // Should contain traditional editing instructions expect(prompt).toContain("For editing files, you have access to these tools:") diff --git a/src/core/prompts/__tests__/system-prompt.spec.ts b/src/core/prompts/__tests__/system-prompt.spec.ts index f2d5469da4d..477a68261d2 100644 --- a/src/core/prompts/__tests__/system-prompt.spec.ts +++ b/src/core/prompts/__tests__/system-prompt.spec.ts @@ -207,7 +207,7 @@ describe("SYSTEM_PROMPT", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, // supportsImages undefined, // mcpHub undefined, // diffStrategy undefined, // browserViewportSize @@ -226,11 +226,11 @@ describe("SYSTEM_PROMPT", () => { expect(prompt).toMatchFileSnapshot("./__snapshots__/system-prompt/consistent-system-prompt.snap") }) - it("should include browser actions when supportsComputerUse is true", async () => { + it("should include browser actions when supportsImages is true", async () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - true, // supportsComputerUse + true, // supportsImages undefined, // mcpHub undefined, // diffStrategy "1280x800", // browserViewportSize @@ -255,7 +255,7 @@ describe("SYSTEM_PROMPT", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, mockMcpHub, // mcpHub undefined, // diffStrategy undefined, // browserViewportSize @@ -278,7 +278,7 @@ describe("SYSTEM_PROMPT", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, undefined, // explicitly undefined mcpHub undefined, // diffStrategy undefined, // browserViewportSize @@ -301,7 +301,7 @@ describe("SYSTEM_PROMPT", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - true, // supportsComputerUse + false, undefined, // mcpHub undefined, // diffStrategy "900x600", // different viewport size @@ -324,7 +324,7 @@ describe("SYSTEM_PROMPT", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, undefined, // mcpHub new MultiSearchReplaceDiffStrategy(), // Use actual diff strategy from the codebase undefined, // browserViewportSize @@ -348,7 +348,7 @@ describe("SYSTEM_PROMPT", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, // supportsImages undefined, // mcpHub new MultiSearchReplaceDiffStrategy(), // Use actual diff strategy from the codebase undefined, // browserViewportSize @@ -372,7 +372,7 @@ describe("SYSTEM_PROMPT", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, undefined, // mcpHub new MultiSearchReplaceDiffStrategy(), // Use actual diff strategy from the codebase undefined, // browserViewportSize @@ -423,7 +423,7 @@ describe("SYSTEM_PROMPT", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, undefined, // mcpHub undefined, // diffStrategy undefined, // browserViewportSize @@ -484,7 +484,7 @@ describe("SYSTEM_PROMPT", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, undefined, // mcpHub undefined, // diffStrategy undefined, // browserViewportSize @@ -522,7 +522,7 @@ describe("SYSTEM_PROMPT", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, undefined, // mcpHub undefined, // diffStrategy undefined, // browserViewportSize @@ -555,7 +555,7 @@ describe("SYSTEM_PROMPT", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, undefined, // mcpHub undefined, // diffStrategy undefined, // browserViewportSize @@ -586,7 +586,7 @@ describe("SYSTEM_PROMPT", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, undefined, // mcpHub undefined, // diffStrategy undefined, // browserViewportSize @@ -619,7 +619,7 @@ describe("SYSTEM_PROMPT", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, undefined, // mcpHub undefined, // diffStrategy undefined, // browserViewportSize @@ -651,7 +651,7 @@ describe("SYSTEM_PROMPT", () => { const prompt = await SYSTEM_PROMPT( mockContext, "/test/path", - false, // supportsComputerUse + false, undefined, // mcpHub undefined, // diffStrategy undefined, // browserViewportSize diff --git a/src/core/prompts/instructions/create-mode.ts b/src/core/prompts/instructions/create-mode.ts index ea2112a582d..758f532b98e 100644 --- a/src/core/prompts/instructions/create-mode.ts +++ b/src/core/prompts/instructions/create-mode.ts @@ -1,11 +1,12 @@ import * as path from "path" import * as vscode from "vscode" import { GlobalFileNames } from "../../../shared/globalFileNames" +import { getSettingsDirectoryPath } from "../../../utils/storage" export async function createModeInstructions(context: vscode.ExtensionContext | undefined): Promise { if (!context) throw new Error("Missing VSCode Extension Context") - const settingsDir = path.join(context.globalStorageUri.fsPath, "settings") + const settingsDir = await getSettingsDirectoryPath(context.globalStorageUri.fsPath) const customModesPath = path.join(settingsDir, GlobalFileNames.customModes) return ` diff --git a/src/core/prompts/responses.ts b/src/core/prompts/responses.ts index e2a9096db9c..722a5f4c7d7 100644 --- a/src/core/prompts/responses.ts +++ b/src/core/prompts/responses.ts @@ -71,8 +71,7 @@ Otherwise, if you have not completed the task and do not need additional informa } existingFileApproaches.push( - `${diffStrategyEnabled ? "3" : "2"}. Or use search_and_replace for specific text replacements`, - `${diffStrategyEnabled ? "4" : "3"}. Or use insert_content to add specific content at particular lines`, + `${diffStrategyEnabled ? "3" : "2"}. Or use insert_content to add specific content at particular lines`, ) const existingFileGuidance = diff --git a/src/core/prompts/sections/modes.ts b/src/core/prompts/sections/modes.ts index b553ea47c65..56cf8644f7e 100644 --- a/src/core/prompts/sections/modes.ts +++ b/src/core/prompts/sections/modes.ts @@ -1,18 +1,17 @@ -import * as path from "path" import * as vscode from "vscode" -import { promises as fs } from "fs" import type { ModeConfig } from "@roo-code/types" import type { ToolUseStyle } from "@roo-code/types" // kilocode_change import { getAllModesWithPrompts } from "../../../shared/modes" +import { ensureSettingsDirectoryExists } from "../../../utils/globalContext" export async function getModesSection( context: vscode.ExtensionContext, toolUseStyle?: ToolUseStyle, // kilocode_change ): Promise { - const settingsDir = path.join(context.globalStorageUri.fsPath, "settings") - await fs.mkdir(settingsDir, { recursive: true }) + // Make sure path gets created + await ensureSettingsDirectoryExists(context) // Get all modes with their overrides from extension state const allModes = await getAllModesWithPrompts(context) diff --git a/src/core/prompts/sections/rules.ts b/src/core/prompts/sections/rules.ts index c3125490bc2..df6f5a02a88 100644 --- a/src/core/prompts/sections/rules.ts +++ b/src/core/prompts/sections/rules.ts @@ -23,7 +23,6 @@ function getEditingInstructions(diffStrategy?: DiffStrategy): string { } availableTools.push("insert_content (for adding lines to files)") - availableTools.push("search_and_replace (for finding and replacing individual pieces of text)") // Base editing instruction mentioning all available tools if (availableTools.length > 1) { @@ -35,10 +34,6 @@ function getEditingInstructions(diffStrategy?: DiffStrategy): string { "- The insert_content tool adds lines of text to files at a specific line number, such as adding a new function to a JavaScript file or inserting a new route in a Python file. Use line number 0 to append at the end of the file, or any positive number to insert before that line.", ) - instructions.push( - "- The search_and_replace tool finds and replaces text or regex in files. This tool allows you to search for a specific regex pattern or text and replace it with another value. Be cautious when using this tool to ensure you are replacing the correct text. It can support multiple operations at once.", - ) - if (availableTools.length > 1) { instructions.push( "- You should always prefer using other editing tools over write_to_file when making changes to existing files since write_to_file is much slower and cannot handle large files.", diff --git a/src/core/prompts/tools/edit-file.ts b/src/core/prompts/tools/edit-file.ts index a2039d26edf..9f5b51e4e39 100644 --- a/src/core/prompts/tools/edit-file.ts +++ b/src/core/prompts/tools/edit-file.ts @@ -2,7 +2,7 @@ export function getFastApplyEditingInstructions(modelType: "Morph" | "Relace"): string { return `- **${modelType} FastApply is enabled.** You have access to the \`edit_file\` tool which uses a specialized model optimized for intelligent code understanding and modification. -- **ONLY use the edit_file tool for file modifications.** Traditional editing tools (apply_diff, write_to_file, insert_content, search_and_replace) are disabled in ${modelType} mode. +- **ONLY use the edit_file tool for file modifications.** - **Focus on clear instructions and precise code edits** using the edit_file format with \`// ... existing code ...\` placeholders to represent unchanged sections. - **The edit_file tool requires three parameters:** - \`target_file\`: Full path to the file to modify diff --git a/src/core/prompts/tools/index.ts b/src/core/prompts/tools/index.ts index acc1b15500e..200c3968916 100644 --- a/src/core/prompts/tools/index.ts +++ b/src/core/prompts/tools/index.ts @@ -18,7 +18,6 @@ import { getWriteToFileDescription } from "./write-to-file" import { getSearchFilesDescription } from "./search-files" import { getListFilesDescription } from "./list-files" import { getInsertContentDescription } from "./insert-content" -import { getSearchAndReplaceDescription } from "./search-and-replace" import { getListCodeDefinitionNamesDescription } from "./list-code-definition-names" import { getBrowserActionDescription } from "./browser-action" import { getAskFollowupQuestionDescription } from "./ask-followup-question" @@ -64,7 +63,6 @@ const toolDescriptionMap: Record string | undefined> switch_mode: () => getSwitchModeDescription(), new_task: (args) => getNewTaskDescription(args), insert_content: (args) => getInsertContentDescription(args), - search_and_replace: (args) => getSearchAndReplaceDescription(args), edit_file: () => getEditFileDescription(), // kilocode_change: Morph fast apply apply_diff: (args) => args.diffStrategy ? args.diffStrategy.getToolDescription({ cwd: args.cwd, toolOptions: args.toolOptions }) : "", @@ -143,7 +141,7 @@ export function getToolDescriptionsForMode( // kilocode_change start: Morph fast apply if (isFastApplyAvailable(clineProviderState)) { // When Morph is enabled, disable traditional editing tools - const traditionalEditingTools = ["apply_diff", "write_to_file", "insert_content", "search_and_replace"] + const traditionalEditingTools = ["apply_diff", "write_to_file", "insert_content"] traditionalEditingTools.forEach((tool) => tools.delete(tool)) } else { tools.delete("edit_file") @@ -198,7 +196,6 @@ export { getAccessMcpResourceDescription, getSwitchModeDescription, getInsertContentDescription, - getSearchAndReplaceDescription, getEditFileDescription, // kilocode_change: Morph fast apply getCodebaseSearchDescription, getRunSlashCommandDescription, diff --git a/src/core/prompts/tools/native-tools/getAllowedJSONToolsForMode.ts b/src/core/prompts/tools/native-tools/getAllowedJSONToolsForMode.ts index d86272fa9f8..ed0de64ddf0 100644 --- a/src/core/prompts/tools/native-tools/getAllowedJSONToolsForMode.ts +++ b/src/core/prompts/tools/native-tools/getAllowedJSONToolsForMode.ts @@ -95,7 +95,7 @@ export async function getAllowedJSONToolsForMode( if (isFastApplyAvailable(providerState)) { // When Fast Apply is enabled, disable traditional editing tools - const traditionalEditingTools = ["apply_diff", "write_to_file", "insert_content", "search_and_replace"] + const traditionalEditingTools = ["apply_diff", "write_to_file", "insert_content"] traditionalEditingTools.forEach((tool) => tools.delete(tool)) } else { tools.delete("edit_file") diff --git a/src/core/prompts/tools/native-tools/index.ts b/src/core/prompts/tools/native-tools/index.ts index cc6a1a46342..a31b48fee00 100644 --- a/src/core/prompts/tools/native-tools/index.ts +++ b/src/core/prompts/tools/native-tools/index.ts @@ -13,7 +13,6 @@ import listFiles from "./list_files" import newTask from "./new_task" import { read_file_single, read_file_multi } from "./read_file" import runSlashCommand from "./run_slash_command" -import searchAndReplace from "./search_and_replace" import searchFiles from "./search_files" import switchMode from "./switch_mode" import updateTodoList from "./update_todo_list" @@ -38,7 +37,6 @@ export const nativeTools = [ read_file_single, read_file_multi, runSlashCommand, - searchAndReplace, searchFiles, switchMode, updateTodoList, diff --git a/src/core/prompts/tools/native-tools/search_and_replace.ts b/src/core/prompts/tools/native-tools/search_and_replace.ts deleted file mode 100644 index 730cebc897a..00000000000 --- a/src/core/prompts/tools/native-tools/search_and_replace.ts +++ /dev/null @@ -1,46 +0,0 @@ -import type OpenAI from "openai" - -export default { - type: "function", - function: { - name: "search_and_replace", - description: - "Find and replace text within a file using literal strings or regular expressions. Supports optional line ranges, regex mode, and case-insensitive matching, and shows a diff preview before applying changes.", - strict: true, - parameters: { - type: "object", - properties: { - path: { - type: "string", - description: "File path to modify, relative to the workspace", - }, - search: { - type: "string", - description: "Text or pattern to search for", - }, - replace: { - type: "string", - description: "Replacement text to insert for each match", - }, - start_line: { - type: ["integer", "null"], - description: "Optional starting line (1-based) to limit replacements", - }, - end_line: { - type: ["integer", "null"], - description: "Optional ending line (1-based) to limit replacements", - }, - use_regex: { - type: ["boolean", "null"], - description: "Set true to treat the search parameter as a regular expression", - }, - ignore_case: { - type: ["boolean", "null"], - description: "Set true to ignore case when matching", - }, - }, - required: ["path", "search", "replace", "start_line", "end_line", "use_regex", "ignore_case"], - additionalProperties: false, - }, - }, -} satisfies OpenAI.Chat.ChatCompletionTool diff --git a/src/core/prompts/tools/search-and-replace.ts b/src/core/prompts/tools/search-and-replace.ts deleted file mode 100644 index 357a7058323..00000000000 --- a/src/core/prompts/tools/search-and-replace.ts +++ /dev/null @@ -1,39 +0,0 @@ -import { ToolArgs } from "./types" - -export function getSearchAndReplaceDescription(args: ToolArgs): string { - return `## search_and_replace -Description: Use this tool to find and replace specific text strings or patterns (using regex) within a file. It's suitable for targeted replacements across multiple locations within the file. Supports literal text and regex patterns, case sensitivity options, and optional line ranges. Shows a diff preview before applying changes. - -Required Parameters: -- path: The path of the file to modify (relative to the current workspace directory ${args.cwd.toPosix()}) -- search: The text or pattern to search for -- replace: The text to replace matches with - -Optional Parameters: -- start_line: Starting line number for restricted replacement (1-based) -- end_line: Ending line number for restricted replacement (1-based) -- use_regex: Set to "true" to treat search as a regex pattern (default: false) -- ignore_case: Set to "true" to ignore case when matching (default: false) - -Notes: -- When use_regex is true, the search parameter is treated as a regular expression pattern -- When ignore_case is true, the search is case-insensitive regardless of regex mode - -Examples: - -1. Simple text replacement: - -example.ts -oldText -newText - - -2. Case-insensitive regex pattern: - -example.ts -old\w+ -new$& -true -true -` -} diff --git a/src/core/task-persistence/__tests__/taskMessages.spec.ts b/src/core/task-persistence/__tests__/taskMessages.spec.ts new file mode 100644 index 00000000000..ecd6225692d --- /dev/null +++ b/src/core/task-persistence/__tests__/taskMessages.spec.ts @@ -0,0 +1,71 @@ +import { describe, it, expect, vi, beforeEach } from "vitest" +import * as os from "os" +import * as path from "path" +import * as fs from "fs/promises" + +// Mocks (use hoisted to avoid initialization ordering issues) +const hoisted = vi.hoisted(() => ({ + safeWriteJsonMock: vi.fn().mockResolvedValue(undefined), +})) +vi.mock("../../../utils/safeWriteJson", () => ({ + safeWriteJson: hoisted.safeWriteJsonMock, +})) + +// Import after mocks +import { saveTaskMessages } from "../taskMessages" + +let tmpBaseDir: string + +beforeEach(async () => { + hoisted.safeWriteJsonMock.mockClear() + // Create a unique, writable temp directory to act as globalStoragePath + tmpBaseDir = await fs.mkdtemp(path.join(os.tmpdir(), "roo-test-")) +}) + +describe("taskMessages.saveTaskMessages", () => { + beforeEach(() => { + hoisted.safeWriteJsonMock.mockClear() + }) + + it("persists messages as-is", async () => { + const messages: any[] = [ + { + role: "assistant", + content: "Hello", + metadata: { + gpt5: { + previous_response_id: "resp_123", + }, + other: "keep", + }, + }, + { role: "user", content: "Do thing" }, + ] + + await saveTaskMessages({ + messages, + taskId: "task-1", + globalStoragePath: tmpBaseDir, + }) + + expect(hoisted.safeWriteJsonMock).toHaveBeenCalledTimes(1) + const [, persisted] = hoisted.safeWriteJsonMock.mock.calls[0] + expect(persisted).toEqual(messages) + }) + + it("persists messages without modification when no metadata", async () => { + const messages: any[] = [ + { role: "assistant", content: "Hi" }, + { role: "user", content: "Yo" }, + ] + + await saveTaskMessages({ + messages, + taskId: "task-2", + globalStoragePath: tmpBaseDir, + }) + + const [, persisted] = hoisted.safeWriteJsonMock.mock.calls[0] + expect(persisted).toEqual(messages) + }) +}) diff --git a/src/core/task/Task.ts b/src/core/task/Task.ts index d2f9a33f7f1..89945487487 100644 --- a/src/core/task/Task.ts +++ b/src/core/task/Task.ts @@ -36,6 +36,9 @@ import { isResumableAsk, QueuedMessage, getActiveToolUseStyle, // kilocode_change + DEFAULT_CHECKPOINT_TIMEOUT_SECONDS, + MAX_CHECKPOINT_TIMEOUT_SECONDS, + MIN_CHECKPOINT_TIMEOUT_SECONDS, } from "@roo-code/types" import { TelemetryService } from "@roo-code/telemetry" import { CloudService, BridgeOrchestrator } from "@roo-code/cloud" @@ -54,7 +57,7 @@ import { t } from "../../i18n" import { ClineApiReqCancelReason, ClineApiReqInfo } from "../../shared/ExtensionMessage" import { getApiMetrics, hasTokenUsageChanged } from "../../shared/getApiMetrics" import { ClineAskResponse } from "../../shared/WebviewMessage" -import { defaultModeSlug } from "../../shared/modes" +import { defaultModeSlug, getModeBySlug, getGroupName } from "../../shared/modes" import { DiffStrategy } from "../../shared/tools" import { EXPERIMENT_IDS, experiments } from "../../shared/experiments" import { getModelMaxOutputTokens } from "../../shared/api" @@ -73,7 +76,7 @@ import { RooTerminalProcess } from "../../integrations/terminal/types" import { TerminalRegistry } from "../../integrations/terminal/TerminalRegistry" // utils -import { calculateApiCostAnthropic } from "../../shared/cost" +import { calculateApiCostAnthropic, calculateApiCostOpenAI } from "../../shared/cost" import { getWorkspacePath } from "../../utils/path" // prompts @@ -136,6 +139,7 @@ export interface TaskOptions extends CreateTaskOptions { apiConfiguration: ProviderSettings enableDiff?: boolean enableCheckpoints?: boolean + checkpointTimeout?: number enableBridge?: boolean fuzzyMatchThreshold?: number consecutiveMistakeLimit?: number @@ -283,6 +287,7 @@ export class Task extends EventEmitter implements TaskLike { // Checkpoints enableCheckpoints: boolean + checkpointTimeout: number checkpointService?: RepoPerTaskCheckpointService checkpointServiceInitializing = false @@ -324,6 +329,7 @@ export class Task extends EventEmitter implements TaskLike { apiConfiguration, enableDiff = false, enableCheckpoints = true, + checkpointTimeout = DEFAULT_CHECKPOINT_TIMEOUT_SECONDS, enableBridge = false, fuzzyMatchThreshold = 1.0, consecutiveMistakeLimit = DEFAULT_CONSECUTIVE_MISTAKE_LIMIT, @@ -345,6 +351,20 @@ export class Task extends EventEmitter implements TaskLike { throw new Error("Either historyItem or task/images must be provided") } + if ( + !checkpointTimeout || + checkpointTimeout > MAX_CHECKPOINT_TIMEOUT_SECONDS || + checkpointTimeout < MIN_CHECKPOINT_TIMEOUT_SECONDS + ) { + throw new Error( + "checkpointTimeout must be between " + + MIN_CHECKPOINT_TIMEOUT_SECONDS + + " and " + + MAX_CHECKPOINT_TIMEOUT_SECONDS + + " seconds", + ) + } + this.taskId = historyItem ? historyItem.id : crypto.randomUUID() this.taskIsFavorited = historyItem?.isFavorited // kilocode_change this.rootTaskId = historyItem ? historyItem.rootTaskId : rootTask?.taskId @@ -392,6 +412,7 @@ export class Task extends EventEmitter implements TaskLike { this.globalStoragePath = provider.context.globalStorageUri.fsPath this.diffViewProvider = new DiffViewProvider(this.cwd, this) this.enableCheckpoints = enableCheckpoints + this.checkpointTimeout = checkpointTimeout this.enableBridge = enableBridge this.parentTask = parentTask @@ -891,6 +912,7 @@ export class Task extends EventEmitter implements TaskLike { const isMessageQueued = !this.messageQueueService.isEmpty() const isStatusMutable = !partial && isBlocking && !isMessageQueued let statusMutationTimeouts: NodeJS.Timeout[] = [] + const statusMutationTimeout = 5_000 if (isStatusMutable) { console.log(`Task#ask will block -> type: ${type}`) @@ -904,7 +926,7 @@ export class Task extends EventEmitter implements TaskLike { this.interactiveAsk = message this.emit(RooCodeEventName.TaskInteractive, this.taskId) } - }, 1_000), + }, statusMutationTimeout), ) } else if (isResumableAsk(type)) { statusMutationTimeouts.push( @@ -915,7 +937,7 @@ export class Task extends EventEmitter implements TaskLike { this.resumableAsk = message this.emit(RooCodeEventName.TaskResumable, this.taskId) } - }, 1_000), + }, statusMutationTimeout), ) } else if (isIdleAsk(type)) { statusMutationTimeouts.push( @@ -926,7 +948,7 @@ export class Task extends EventEmitter implements TaskLike { this.idleAsk = message this.emit(RooCodeEventName.TaskIdle, this.taskId) } - }, 1_000), + }, statusMutationTimeout), ) } } else if (isMessageQueued) { @@ -935,17 +957,19 @@ export class Task extends EventEmitter implements TaskLike { const message = this.messageQueueService.dequeueMessage() if (message) { - // Check if this is a tool approval ask that needs to be handled + // Check if this is a tool approval ask that needs to be handled. if ( type === "tool" || type === "command" || type === "browser_action_launch" || type === "use_mcp_server" ) { - // For tool approvals, we need to approve first, then send the message if there's text/images + // For tool approvals, we need to approve first, then send + // the message if there's text/images. this.handleWebviewAskResponse("yesButtonClicked", message.text, message.images) } else { - // For other ask types (like followup), fulfill the ask directly + // For other ask types (like followup), fulfill the ask + // directly. this.setMessageResponse(message.text, message.images) } } @@ -1146,6 +1170,9 @@ export class Task extends EventEmitter implements TaskLike { { isNonInteractive: true } /* options */, contextCondense, ) + + // Process any queued messages after condensing completes + this.processQueuedMessages() } async say( @@ -1839,9 +1866,10 @@ export class Task extends EventEmitter implements TaskLike { interface StackItem { userContent: Anthropic.Messages.ContentBlockParam[] includeFileDetails: boolean + retryAttempt?: number } - const stack: StackItem[] = [{ userContent, includeFileDetails }] + const stack: StackItem[] = [{ userContent, includeFileDetails, retryAttempt: 0 }] while (stack.length > 0) { const currentItem = stack.pop()! @@ -1993,21 +2021,35 @@ export class Task extends EventEmitter implements TaskLike { } const existingData = JSON.parse(this.clineMessages[lastApiReqIndex].text || "{}") + + // Calculate total tokens and cost using provider-aware function + const modelId = getModelId(this.apiConfiguration) + const apiProtocol = getApiProtocol(this.apiConfiguration.apiProvider, modelId) + + const costResult = + apiProtocol === "anthropic" + ? calculateApiCostAnthropic( + this.api.getModel().info, + inputTokens, + outputTokens, + cacheWriteTokens, + cacheReadTokens, + ) + : calculateApiCostOpenAI( + this.api.getModel().info, + inputTokens, + outputTokens, + cacheWriteTokens, + cacheReadTokens, + ) + this.clineMessages[lastApiReqIndex].text = JSON.stringify({ ...existingData, - tokensIn: inputTokens, - tokensOut: outputTokens, + tokensIn: costResult.totalInputTokens, + tokensOut: costResult.totalOutputTokens, cacheWrites: cacheWriteTokens, cacheReads: cacheReadTokens, - cost: - totalCost ?? - calculateApiCostAnthropic( - this.api.getModel().info, - inputTokens, - outputTokens, - cacheWriteTokens, - cacheReadTokens, - ), + cost: totalCost ?? costResult.totalCost, // kilocode_change start usageMissing, inferenceProvider, @@ -2197,7 +2239,7 @@ export class Task extends EventEmitter implements TaskLike { const drainStreamInBackgroundToFindAllUsage = async (apiReqIndex: number) => { const timeoutMs = DEFAULT_USAGE_COLLECTION_TIMEOUT_MS - const startTime = Date.now() + const startTime = performance.now() const modelId = getModelId(this.apiConfiguration) // Local variables to accumulate usage data without affecting the main flow @@ -2255,21 +2297,34 @@ export class Task extends EventEmitter implements TaskLike { await this.updateClineMessage(apiReqMessage) } - // Capture telemetry + // Capture telemetry with provider-aware cost calculation + const modelId = getModelId(this.apiConfiguration) + const apiProtocol = getApiProtocol(this.apiConfiguration.apiProvider, modelId) + + // Use the appropriate cost function based on the API protocol + const costResult = + apiProtocol === "anthropic" + ? calculateApiCostAnthropic( + this.api.getModel().info, + tokens.input, + tokens.output, + tokens.cacheWrite, + tokens.cacheRead, + ) + : calculateApiCostOpenAI( + this.api.getModel().info, + tokens.input, + tokens.output, + tokens.cacheWrite, + tokens.cacheRead, + ) + TelemetryService.instance.captureLlmCompletion(this.taskId, { - inputTokens: tokens.input, - outputTokens: tokens.output, + inputTokens: costResult.totalInputTokens, + outputTokens: costResult.totalOutputTokens, cacheWriteTokens: tokens.cacheWrite, cacheReadTokens: tokens.cacheRead, - cost: - tokens.total ?? - calculateApiCostAnthropic( - this.api.getModel().info, - tokens.input, - tokens.output, - tokens.cacheWrite, - tokens.cacheRead, - ), + cost: tokens.total ?? costResult.totalCost, // kilocode_change start completionTime: performance.now() - apiRequestStartTime, inferenceProvider, @@ -2286,7 +2341,7 @@ export class Task extends EventEmitter implements TaskLike { // Use the same iterator that the main loop was using while (!item.done) { // Check for timeout - if (Date.now() - startTime > timeoutMs) { + if (performance.now() - startTime > timeoutMs) { console.warn( `[Background Usage Collection] Timed out after ${timeoutMs}ms for model: ${modelId}, processed ${chunkCount} chunks`, ) @@ -2376,28 +2431,58 @@ export class Task extends EventEmitter implements TaskLike { // Cline instance to finish aborting (error is thrown here when // any function in the for loop throws due to this.abort). if (!this.abandoned) { - // If the stream failed, there's various states the task - // could be in (i.e. could have streamed some tools the user - // may have executed), so we just resort to replicating a - // cancel task. - - // Determine cancellation reason BEFORE aborting to ensure correct persistence + // Determine cancellation reason const cancelReason: ClineApiReqCancelReason = this.abort ? "user_cancelled" : "streaming_failed" const streamingFailedMessage = this.abort ? undefined : (error.message ?? JSON.stringify(serializeError(error), null, 2)) - // Persist interruption details first to both UI and API histories + // Clean up partial state await abortStream(cancelReason, streamingFailedMessage) - // Record reason for provider to decide rehydration path - this.abortReason = cancelReason + if (this.abort) { + // User cancelled - abort the entire task + this.abortReason = cancelReason + await this.abortTask() + } else { + // Stream failed - log the error and retry with the same content + // The existing rate limiting will prevent rapid retries + console.error( + `[Task#${this.taskId}.${this.instanceId}] Stream failed, will retry: ${streamingFailedMessage}`, + ) + + // Apply exponential backoff similar to first-chunk errors when auto-resubmit is enabled + const stateForBackoff = await this.providerRef.deref()?.getState() + if (stateForBackoff?.autoApprovalEnabled && stateForBackoff?.alwaysApproveResubmit) { + await this.backoffAndAnnounce( + currentItem.retryAttempt ?? 0, + error, + streamingFailedMessage, + ) + + // Check if task was aborted during the backoff + if (this.abort) { + console.log( + `[Task#${this.taskId}.${this.instanceId}] Task aborted during mid-stream retry backoff`, + ) + // Abort the entire task + this.abortReason = "user_cancelled" + await this.abortTask() + break + } + } - // Now abort (emits TaskAborted which provider listens to) - await this.abortTask() + // Push the same content back onto the stack to retry, incrementing the retry attempt counter + stack.push({ + userContent: currentUserContent, + includeFileDetails: false, + retryAttempt: (currentItem.retryAttempt ?? 0) + 1, + }) - // Do not rehydrate here; provider owns rehydration to avoid duplication races + // Continue to retry the request + continue + } } } finally { this.isStreaming = false @@ -2457,7 +2542,7 @@ export class Task extends EventEmitter implements TaskLike { } } - await this.persistGpt5Metadata(reasoningMessage) + await this.persistGpt5Metadata() await this.saveClineMessages() await this.providerRef.deref()?.postStateToWebview() @@ -2481,10 +2566,17 @@ export class Task extends EventEmitter implements TaskLike { }) } + // Check if we should preserve reasoning in the assistant message + let finalAssistantMessage = assistantMessage + if (reasoningMessage && this.api.getModel().info.preserveReasoning) { + // Prepend reasoning in XML tags to the assistant message so it's included in API history + finalAssistantMessage = `${reasoningMessage}\n${assistantMessage}` + } + // kilocode_change start: also add tool calls to history const assistantMessageContent = new Array() - if (assistantMessage) { - assistantMessageContent.push({ type: "text", text: assistantMessage }) + if (finalAssistantMessage) { + assistantMessageContent.push({ type: "text", text: finalAssistantMessage }) } assistantMessageContent.push(...assistantToolUses) await this.addToApiConversationHistory({ @@ -2701,15 +2793,25 @@ export class Task extends EventEmitter implements TaskLike { throw new Error("Provider not available") } + // Align browser tool enablement with generateSystemPrompt: require model image support, + // mode to include the browser group, and the user setting to be enabled. + const modeConfig = getModeBySlug(mode ?? defaultModeSlug, customModes) + const modeSupportsBrowser = modeConfig?.groups.some((group) => getGroupName(group) === "browser") ?? false + + // Check if model supports browser capability (images) + const modelInfo = this.api.getModel().info + const modelSupportsBrowser = (modelInfo as any)?.supportsImages === true + + const canUseBrowserTool = modelSupportsBrowser && modeSupportsBrowser && (browserToolEnabled ?? true) + return SYSTEM_PROMPT( provider.context, this.cwd, - // kilocode_change: supports images => supports browser - (this.api.getModel().info.supportsImages ?? false) && (browserToolEnabled ?? true), + canUseBrowserTool, mcpHub, this.diffStrategy, - browserViewportSize, - mode, + browserViewportSize ?? "900x600", + mode ?? defaultModeSlug, customModePrompts, customModes, customInstructions, @@ -2852,19 +2954,10 @@ export class Task extends EventEmitter implements TaskLike { // Use the shared timestamp so that subtasks respect the same rate-limit // window as their parent tasks. if (Task.lastGlobalApiRequestTime) { - const now = performance.now() // kilocode_change + const now = performance.now() const timeSinceLastRequest = now - Task.lastGlobalApiRequestTime const rateLimit = apiConfiguration?.rateLimitSeconds || 0 - rateLimitDelay = Math.ceil(Math.max(0, rateLimit * 1000 - timeSinceLastRequest) / 1000) - - // kilocode_change start - if (rateLimitDelay > rateLimit) { - console.warn( - `rateLimitDelay ${rateLimitDelay}s is larger than the configured rateLimit ${rateLimit}s; this makes no sense`, - ) - rateLimitDelay = rateLimit - } - // kilocode_change end + rateLimitDelay = Math.ceil(Math.min(rateLimit, Math.max(0, rateLimit * 1000 - timeSinceLastRequest) / 1000)) } // Only show rate limiting message if we're not retrying. If retrying, we'll include the delay there. @@ -2879,7 +2972,7 @@ export class Task extends EventEmitter implements TaskLike { // Update last request time before making the request so that subsequent // requests — even from new subtasks — will honour the provider's rate-limit. - Task.lastGlobalApiRequestTime = performance.now() // kilocode_change + Task.lastGlobalApiRequestTime = performance.now() const systemPrompt = await this.getSystemPrompt() this.lastUsedInstructions = systemPrompt @@ -3086,46 +3179,18 @@ export class Task extends EventEmitter implements TaskLike { errorMsg = "Unknown error" } - const baseDelay = requestDelaySeconds || 5 - let exponentialDelay = Math.min( - Math.ceil(baseDelay * Math.pow(2, retryAttempt)), - MAX_EXPONENTIAL_BACKOFF_SECONDS, - ) - - // If the error is a 429, and the error details contain a retry delay, use that delay instead of exponential backoff - if (error.status === 429) { - const geminiRetryDetails = error.errorDetails?.find( - (detail: any) => detail["@type"] === "type.googleapis.com/google.rpc.RetryInfo", - ) - if (geminiRetryDetails) { - const match = geminiRetryDetails?.retryDelay?.match(/^(\d+)s$/) - if (match) { - exponentialDelay = Number(match[1]) + 1 - } - } - } + // Apply shared exponential backoff and countdown UX + await this.backoffAndAnnounce(retryAttempt, error, errorMsg) - // Wait for the greater of the exponential delay or the rate limit delay - const finalDelay = Math.max(exponentialDelay, rateLimitDelay) - - // Show countdown timer with exponential backoff - for (let i = finalDelay; i > 0; i--) { - await this.say( - "api_req_retry_delayed", - `${errorMsg}\n\nRetry attempt ${retryAttempt + 1}\nRetrying in ${i} seconds...`, - undefined, - true, + // CRITICAL: Check if task was aborted during the backoff countdown + // This prevents infinite loops when users cancel during auto-retry + // Without this check, the recursive call below would continue even after abort + if (this.abort) { + throw new Error( + `[Task#attemptApiRequest] task ${this.taskId}.${this.instanceId} aborted during retry`, ) - await delay(1000) } - await this.say( - "api_req_retry_delayed", - `${errorMsg}\n\nRetry attempt ${retryAttempt + 1}\nRetrying now...`, - undefined, - false, - ) - // Delegate generator output from the recursive call with // incremented retry count. yield* this.attemptApiRequest(retryAttempt + 1) @@ -3168,6 +3233,79 @@ export class Task extends EventEmitter implements TaskLike { // kilocode_change end } + // Shared exponential backoff for retries (first-chunk and mid-stream) + private async backoffAndAnnounce(retryAttempt: number, error: any, header?: string): Promise { + try { + const state = await this.providerRef.deref()?.getState() + const baseDelay = state?.requestDelaySeconds || 5 + + let exponentialDelay = Math.min( + Math.ceil(baseDelay * Math.pow(2, retryAttempt)), + MAX_EXPONENTIAL_BACKOFF_SECONDS, + ) + + // Respect provider rate limit window + let rateLimitDelay = 0 + const rateLimit = state?.apiConfiguration?.rateLimitSeconds || 0 + if (Task.lastGlobalApiRequestTime && rateLimit > 0) { + const elapsed = performance.now() - Task.lastGlobalApiRequestTime + rateLimitDelay = Math.ceil(Math.min(rateLimit, Math.max(0, rateLimit * 1000 - elapsed) / 1000)) + } + + // Prefer RetryInfo on 429 if present + if (error?.status === 429) { + const retryInfo = error?.errorDetails?.find( + (d: any) => d["@type"] === "type.googleapis.com/google.rpc.RetryInfo", + ) + const match = retryInfo?.retryDelay?.match?.(/^(\d+)s$/) + if (match) { + exponentialDelay = Number(match[1]) + 1 + } + } + + const finalDelay = Math.max(exponentialDelay, rateLimitDelay) + if (finalDelay <= 0) return + + // Build header text; fall back to error message if none provided + let headerText = header + if (!headerText) { + if (error?.error?.metadata?.raw) { + headerText = JSON.stringify(error.error.metadata.raw, null, 2) + } else if (error?.message) { + headerText = error.message + } else { + headerText = "Unknown error" + } + } + headerText = headerText ? `${headerText}\n\n` : "" + + // Show countdown timer with exponential backoff + for (let i = finalDelay; i > 0; i--) { + // Check abort flag during countdown to allow early exit + if (this.abort) { + throw new Error(`[Task#${this.taskId}] Aborted during retry countdown`) + } + + await this.say( + "api_req_retry_delayed", + `${headerText}Retry attempt ${retryAttempt + 1}\nRetrying in ${i} seconds...`, + undefined, + true, + ) + await delay(1000) + } + + await this.say( + "api_req_retry_delayed", + `${headerText}Retry attempt ${retryAttempt + 1}\nRetrying now...`, + undefined, + false, + ) + } catch (err) { + console.error("Exponential backoff failed:", err) + } + } + // Checkpoints public async checkpointSave(force: boolean = false, suppressMessage: boolean = false) { @@ -3214,10 +3352,12 @@ export class Task extends EventEmitter implements TaskLike { } /** - * Persist GPT-5 per-turn metadata (previous_response_id, instructions, reasoning_summary) + * Persist GPT-5 per-turn metadata (previous_response_id only) * onto the last complete assistant say("text") message. + * + * Note: We do not persist system instructions or reasoning summaries. */ - private async persistGpt5Metadata(reasoningMessage?: string): Promise { + private async persistGpt5Metadata(): Promise { try { const modelId = this.api.getModel().id if (!modelId || !modelId.startsWith("gpt-5")) return @@ -3236,9 +3376,7 @@ export class Task extends EventEmitter implements TaskLike { } const gpt5Metadata: Gpt5Metadata = { ...(msg.metadata.gpt5 ?? {}), - previous_response_id: lastResponseId, - instructions: this.lastUsedInstructions, - reasoning_summary: (reasoningMessage ?? "").trim() || undefined, + ...(lastResponseId ? { previous_response_id: lastResponseId } : {}), } msg.metadata.gpt5 = gpt5Metadata } diff --git a/src/core/task/__tests__/Task.spec.ts b/src/core/task/__tests__/Task.spec.ts index 9b6e7ecd2c8..70b6df97145 100644 --- a/src/core/task/__tests__/Task.spec.ts +++ b/src/core/task/__tests__/Task.spec.ts @@ -153,6 +153,18 @@ vi.mock("../../environment/getEnvironmentDetails", () => ({ vi.mock("../../ignore/RooIgnoreController") +vi.mock("../../condense", async (importOriginal) => { + const actual = (await importOriginal()) as any + return { + ...actual, + summarizeConversation: vi.fn().mockResolvedValue({ + messages: [{ role: "user", content: [{ type: "text", text: "continued" }], ts: Date.now() }], + summary: "summary", + cost: 0, + newContextTokens: 1, + }), + } +}) // Mock storagePathManager to prevent dynamic import issues. vi.mock("../../../utils/storage", () => ({ getTaskDirectoryPath: vi @@ -530,7 +542,6 @@ describe("Cline", () => { info: { supportsImages: true, supportsPromptCache: true, - supportsComputerUse: true, contextWindow: 200000, maxTokens: 4096, inputPrice: 0.25, @@ -554,7 +565,6 @@ describe("Cline", () => { info: { supportsImages: false, supportsPromptCache: false, - supportsComputerUse: false, contextWindow: 16000, maxTokens: 2048, inputPrice: 0.1, @@ -1118,11 +1128,9 @@ describe("Cline", () => { await parentIterator.next() // Simulate time passing (more than rate limit) - // kilocode_change start: use performance instead of Date const originalPerformanceNow = performance.now const mockTime = performance.now() + (mockApiConfig.rateLimitSeconds + 1) * 1000 performance.now = vi.fn(() => mockTime) - // kilocode_change end // Create a subtask after time has passed const child = new Task({ @@ -1144,9 +1152,8 @@ describe("Cline", () => { // Verify no rate limiting was applied expect(mockDelay).not.toHaveBeenCalled() - // kilocode_change start + // Restore performance.now performance.now = originalPerformanceNow - // kilocode_change end }) it("should share rate limiting across multiple subtasks", async () => { @@ -1824,5 +1831,180 @@ describe("Cline", () => { // Restore console.error consoleErrorSpy.mockRestore() }) + describe("Stream Failure Retry", () => { + it("should not abort task on stream failure, only on user cancellation", async () => { + const task = new Task({ + provider: mockProvider, + apiConfiguration: mockApiConfig, + task: "test task", + startTask: false, + context: mockExtensionContext, // kilocode_change + }) + + // Spy on console.error to verify error logging + const consoleErrorSpy = vi.spyOn(console, "error").mockImplementation(() => {}) + + // Spy on abortTask to verify it's NOT called for stream failures + const abortTaskSpy = vi.spyOn(task, "abortTask").mockResolvedValue(undefined) + + // Test Case 1: Stream failure should NOT abort task + task.abort = false + task.abandoned = false + + // Simulate the catch block behavior for stream failure + const streamFailureError = new Error("Stream failed mid-execution") + + // The key assertion: verify that when abort=false, abortTask is NOT called + // This would normally happen in the catch block around line 2184 + const shouldAbort = task.abort + expect(shouldAbort).toBe(false) + + // Verify error would be logged (this is what the new code does) + console.error( + `[Task#${task.taskId}.${task.instanceId}] Stream failed, will retry: ${streamFailureError.message}`, + ) + expect(consoleErrorSpy).toHaveBeenCalledWith(expect.stringContaining("Stream failed, will retry")) + + // Verify abortTask was NOT called + expect(abortTaskSpy).not.toHaveBeenCalled() + + // Test Case 2: User cancellation SHOULD abort task + task.abort = true + + // For user cancellation, abortTask SHOULD be called + if (task.abort) { + await task.abortTask() + } + + expect(abortTaskSpy).toHaveBeenCalled() + + // Restore mocks + consoleErrorSpy.mockRestore() + }) + }) + }) +}) + +describe("Queued message processing after condense", () => { + function createProvider(): any { + const storageUri = { fsPath: path.join(os.tmpdir(), "test-storage") } + const ctx = { + globalState: { + get: vi.fn().mockImplementation((_key: keyof GlobalState) => undefined), + update: vi.fn().mockResolvedValue(undefined), + keys: vi.fn().mockReturnValue([]), + }, + globalStorageUri: storageUri, + workspaceState: { + get: vi.fn().mockImplementation((_key) => undefined), + update: vi.fn().mockResolvedValue(undefined), + keys: vi.fn().mockReturnValue([]), + }, + secrets: { + get: vi.fn().mockResolvedValue(undefined), + store: vi.fn().mockResolvedValue(undefined), + delete: vi.fn().mockResolvedValue(undefined), + }, + extensionUri: { fsPath: "/mock/extension/path" }, + extension: { packageJSON: { version: "1.0.0" } }, + } as unknown as vscode.ExtensionContext + + const output = { + appendLine: vi.fn(), + append: vi.fn(), + clear: vi.fn(), + show: vi.fn(), + hide: vi.fn(), + dispose: vi.fn(), + } + + const provider = new ClineProvider(ctx, output as any, "sidebar", new ContextProxy(ctx)) as any + provider.postMessageToWebview = vi.fn().mockResolvedValue(undefined) + provider.postStateToWebview = vi.fn().mockResolvedValue(undefined) + provider.getState = vi.fn().mockResolvedValue({}) + return provider + } + + const apiConfig: ProviderSettings = { + apiProvider: "anthropic", + apiModelId: "claude-3-5-sonnet-20241022", + apiKey: "test-api-key", + } as any + + it("processes queued message after condense completes", async () => { + const provider = createProvider() + const task = new Task({ + provider, + apiConfiguration: apiConfig, + task: "initial task", + startTask: false, + context: provider.context, // kilocode_change + }) + + // Make condense fast + deterministic + vi.spyOn(task as any, "getSystemPrompt").mockResolvedValue("system") + const submitSpy = vi.spyOn(task, "submitUserMessage").mockResolvedValue(undefined) + + // Queue a message during condensing + task.messageQueueService.addMessage("queued text", ["img1.png"]) + + // Use fake timers to capture setTimeout(0) in processQueuedMessages + vi.useFakeTimers() + await task.condenseContext() + + // Flush the microtask that submits the queued message + vi.runAllTimers() + vi.useRealTimers() + + expect(submitSpy).toHaveBeenCalledWith("queued text", ["img1.png"]) + expect(task.messageQueueService.isEmpty()).toBe(true) + }) + + it("does not cross-drain queues between separate tasks", async () => { + const providerA = createProvider() + const providerB = createProvider() + + const taskA = new Task({ + provider: providerA, + apiConfiguration: apiConfig, + task: "task A", + startTask: false, + context: providerA.context, // kilocode_change + }) + const taskB = new Task({ + provider: providerB, + apiConfiguration: apiConfig, + task: "task B", + startTask: false, + context: providerB.context, // kilocode_change + }) + + vi.spyOn(taskA as any, "getSystemPrompt").mockResolvedValue("system") + vi.spyOn(taskB as any, "getSystemPrompt").mockResolvedValue("system") + + const spyA = vi.spyOn(taskA, "submitUserMessage").mockResolvedValue(undefined) + const spyB = vi.spyOn(taskB, "submitUserMessage").mockResolvedValue(undefined) + + taskA.messageQueueService.addMessage("A message") + taskB.messageQueueService.addMessage("B message") + + // Condense in task A should only drain A's queue + vi.useFakeTimers() + await taskA.condenseContext() + vi.runAllTimers() + vi.useRealTimers() + + expect(spyA).toHaveBeenCalledWith("A message", undefined) + expect(spyB).not.toHaveBeenCalled() + expect(taskB.messageQueueService.isEmpty()).toBe(false) + + // Now condense in task B should drain B's queue + vi.useFakeTimers() + await taskB.condenseContext() + vi.runAllTimers() + vi.useRealTimers() + + expect(spyB).toHaveBeenCalledWith("B message", undefined) + expect(taskB.messageQueueService.isEmpty()).toBe(true) }) }) diff --git a/src/core/task/__tests__/reasoning-preservation.test.ts b/src/core/task/__tests__/reasoning-preservation.test.ts new file mode 100644 index 00000000000..28d25c9a8e9 --- /dev/null +++ b/src/core/task/__tests__/reasoning-preservation.test.ts @@ -0,0 +1,328 @@ +import { describe, it, expect, vi, beforeEach, beforeAll } from "vitest" +import type { ClineProvider } from "../../webview/ClineProvider" +import type { ProviderSettings, ModelInfo } from "@roo-code/types" + +// Mock vscode module before importing Task +vi.mock("vscode", () => ({ + workspace: { + createFileSystemWatcher: vi.fn(() => ({ + onDidCreate: vi.fn(), + onDidChange: vi.fn(), + onDidDelete: vi.fn(), + dispose: vi.fn(), + })), + getConfiguration: vi.fn(() => ({ + get: vi.fn(() => true), + })), + openTextDocument: vi.fn(), + applyEdit: vi.fn(), + }, + RelativePattern: vi.fn((base, pattern) => ({ base, pattern })), + window: { + createOutputChannel: vi.fn(() => ({ + appendLine: vi.fn(), + dispose: vi.fn(), + })), + createTextEditorDecorationType: vi.fn(() => ({ + dispose: vi.fn(), + })), + showTextDocument: vi.fn(), + activeTextEditor: undefined, + }, + Uri: { + file: vi.fn((path) => ({ fsPath: path })), + parse: vi.fn((str) => ({ toString: () => str })), + }, + Range: vi.fn(), + Position: vi.fn(), + WorkspaceEdit: vi.fn(() => ({ + replace: vi.fn(), + insert: vi.fn(), + delete: vi.fn(), + })), + ViewColumn: { + One: 1, + Two: 2, + Three: 3, + }, +})) + +// Mock other dependencies +vi.mock("../../services/mcp/McpServerManager", () => ({ + McpServerManager: { + getInstance: vi.fn().mockResolvedValue(null), + }, +})) + +vi.mock("../../integrations/terminal/TerminalRegistry", () => ({ + TerminalRegistry: { + releaseTerminalsForTask: vi.fn(), + }, +})) + +vi.mock("@roo-code/telemetry", () => ({ + TelemetryService: { + instance: { + captureTaskCreated: vi.fn(), + captureTaskRestarted: vi.fn(), + captureConversationMessage: vi.fn(), + captureLlmCompletion: vi.fn(), + captureConsecutiveMistakeError: vi.fn(), + }, + }, +})) + +describe("Task reasoning preservation", () => { + let mockProvider: Partial + let mockApiConfiguration: ProviderSettings + let Task: any + + beforeAll(async () => { + // Import Task after mocks are set up + const taskModule = await import("../Task") + Task = taskModule.Task + }) + + beforeEach(() => { + // Mock provider with necessary methods + mockProvider = { + postStateToWebview: vi.fn().mockResolvedValue(undefined), + getState: vi.fn().mockResolvedValue({ + mode: "code", + experiments: {}, + }), + context: { + globalStorageUri: { fsPath: "/test/storage" }, + extensionPath: "/test/extension", + } as any, + log: vi.fn(), + updateTaskHistory: vi.fn().mockResolvedValue(undefined), + postMessageToWebview: vi.fn().mockResolvedValue(undefined), + } + + mockApiConfiguration = { + apiProvider: "anthropic", + apiKey: "test-key", + } as ProviderSettings + }) + + it("should append reasoning to assistant message when preserveReasoning is true", async () => { + // Create a task instance + const task = new Task({ + provider: mockProvider as ClineProvider, + apiConfiguration: mockApiConfiguration, + task: "Test task", + startTask: false, + }) + + // Mock the API to return a model with preserveReasoning enabled + const mockModelInfo: ModelInfo = { + contextWindow: 16000, + supportsPromptCache: true, + preserveReasoning: true, + } + + task.api = { + getModel: vi.fn().mockReturnValue({ + id: "test-model", + info: mockModelInfo, + }), + } + + // Mock the API conversation history + task.apiConversationHistory = [] + + // Simulate adding an assistant message with reasoning + const assistantMessage = "Here is my response to your question." + const reasoningMessage = "Let me think about this step by step. First, I need to..." + + // Spy on addToApiConversationHistory + const addToApiHistorySpy = vi.spyOn(task as any, "addToApiConversationHistory") + + // Simulate what happens in the streaming loop when preserveReasoning is true + let finalAssistantMessage = assistantMessage + if (reasoningMessage && task.api.getModel().info.preserveReasoning) { + finalAssistantMessage = `${reasoningMessage}\n${assistantMessage}` + } + + await (task as any).addToApiConversationHistory({ + role: "assistant", + content: [{ type: "text", text: finalAssistantMessage }], + }) + + // Verify that reasoning was prepended in tags to the assistant message + expect(addToApiHistorySpy).toHaveBeenCalledWith({ + role: "assistant", + content: [ + { + type: "text", + text: "Let me think about this step by step. First, I need to...\nHere is my response to your question.", + }, + ], + }) + + // Verify the API conversation history contains the message with reasoning + expect(task.apiConversationHistory).toHaveLength(1) + expect(task.apiConversationHistory[0].content[0].text).toContain("") + expect(task.apiConversationHistory[0].content[0].text).toContain("") + expect(task.apiConversationHistory[0].content[0].text).toContain("Here is my response to your question.") + expect(task.apiConversationHistory[0].content[0].text).toContain( + "Let me think about this step by step. First, I need to...", + ) + }) + + it("should NOT append reasoning to assistant message when preserveReasoning is false", async () => { + // Create a task instance + const task = new Task({ + provider: mockProvider as ClineProvider, + apiConfiguration: mockApiConfiguration, + task: "Test task", + startTask: false, + }) + + // Mock the API to return a model with preserveReasoning disabled (or undefined) + const mockModelInfo: ModelInfo = { + contextWindow: 16000, + supportsPromptCache: true, + preserveReasoning: false, + } + + task.api = { + getModel: vi.fn().mockReturnValue({ + id: "test-model", + info: mockModelInfo, + }), + } + + // Mock the API conversation history + task.apiConversationHistory = [] + + // Simulate adding an assistant message with reasoning + const assistantMessage = "Here is my response to your question." + const reasoningMessage = "Let me think about this step by step. First, I need to..." + + // Spy on addToApiConversationHistory + const addToApiHistorySpy = vi.spyOn(task as any, "addToApiConversationHistory") + + // Simulate what happens in the streaming loop when preserveReasoning is false + let finalAssistantMessage = assistantMessage + if (reasoningMessage && task.api.getModel().info.preserveReasoning) { + finalAssistantMessage = `${reasoningMessage}\n${assistantMessage}` + } + + await (task as any).addToApiConversationHistory({ + role: "assistant", + content: [{ type: "text", text: finalAssistantMessage }], + }) + + // Verify that reasoning was NOT appended to the assistant message + expect(addToApiHistorySpy).toHaveBeenCalledWith({ + role: "assistant", + content: [{ type: "text", text: "Here is my response to your question." }], + }) + + // Verify the API conversation history does NOT contain reasoning + expect(task.apiConversationHistory).toHaveLength(1) + expect(task.apiConversationHistory[0].content[0].text).toBe("Here is my response to your question.") + expect(task.apiConversationHistory[0].content[0].text).not.toContain("") + }) + + it("should handle empty reasoning message gracefully when preserveReasoning is true", async () => { + // Create a task instance + const task = new Task({ + provider: mockProvider as ClineProvider, + apiConfiguration: mockApiConfiguration, + task: "Test task", + startTask: false, + }) + + // Mock the API to return a model with preserveReasoning enabled + const mockModelInfo: ModelInfo = { + contextWindow: 16000, + supportsPromptCache: true, + preserveReasoning: true, + } + + task.api = { + getModel: vi.fn().mockReturnValue({ + id: "test-model", + info: mockModelInfo, + }), + } + + // Mock the API conversation history + task.apiConversationHistory = [] + + const assistantMessage = "Here is my response." + const reasoningMessage = "" // Empty reasoning + + // Spy on addToApiConversationHistory + const addToApiHistorySpy = vi.spyOn(task as any, "addToApiConversationHistory") + + // Simulate what happens in the streaming loop + let finalAssistantMessage = assistantMessage + if (reasoningMessage && task.api.getModel().info.preserveReasoning) { + finalAssistantMessage = `${reasoningMessage}\n${assistantMessage}` + } + + await (task as any).addToApiConversationHistory({ + role: "assistant", + content: [{ type: "text", text: finalAssistantMessage }], + }) + + // Verify that no reasoning tags were added when reasoning is empty + expect(addToApiHistorySpy).toHaveBeenCalledWith({ + role: "assistant", + content: [{ type: "text", text: "Here is my response." }], + }) + + // Verify the message doesn't contain reasoning tags + expect(task.apiConversationHistory[0].content[0].text).toBe("Here is my response.") + expect(task.apiConversationHistory[0].content[0].text).not.toContain("") + }) + + it("should handle undefined preserveReasoning (defaults to false)", async () => { + // Create a task instance + const task = new Task({ + provider: mockProvider as ClineProvider, + apiConfiguration: mockApiConfiguration, + task: "Test task", + startTask: false, + }) + + // Mock the API to return a model without preserveReasoning field (undefined) + const mockModelInfo: ModelInfo = { + contextWindow: 16000, + supportsPromptCache: true, + // preserveReasoning is undefined + } + + task.api = { + getModel: vi.fn().mockReturnValue({ + id: "test-model", + info: mockModelInfo, + }), + } + + // Mock the API conversation history + task.apiConversationHistory = [] + + const assistantMessage = "Here is my response." + const reasoningMessage = "Some reasoning here." + + // Simulate what happens in the streaming loop + let finalAssistantMessage = assistantMessage + if (reasoningMessage && task.api.getModel().info.preserveReasoning) { + finalAssistantMessage = `${reasoningMessage}\n${assistantMessage}` + } + + await (task as any).addToApiConversationHistory({ + role: "assistant", + content: [{ type: "text", text: finalAssistantMessage }], + }) + + // Verify reasoning was NOT prepended (undefined defaults to false) + expect(task.apiConversationHistory[0].content[0].text).toBe("Here is my response.") + expect(task.apiConversationHistory[0].content[0].text).not.toContain("") + }) +}) diff --git a/src/core/task/types.ts b/src/core/task/types.ts index 607be51aab3..e3641590a64 100644 --- a/src/core/task/types.ts +++ b/src/core/task/types.ts @@ -12,18 +12,6 @@ export interface Gpt5Metadata { * Used to maintain conversation continuity in subsequent requests */ previous_response_id?: string - - /** - * The system instructions/prompt used for this response - * Stored to track what instructions were active when the response was generated - */ - instructions?: string - - /** - * The reasoning summary from GPT-5's reasoning process - * Contains the model's internal reasoning if reasoning mode was enabled - */ - reasoning_summary?: string } /** diff --git a/src/core/tools/__tests__/listCodeDefinitionNamesTool.spec.ts b/src/core/tools/__tests__/listCodeDefinitionNamesTool.spec.ts new file mode 100644 index 00000000000..7a26c2f8eeb --- /dev/null +++ b/src/core/tools/__tests__/listCodeDefinitionNamesTool.spec.ts @@ -0,0 +1,351 @@ +// npx vitest src/core/tools/__tests__/listCodeDefinitionNamesTool.spec.ts + +import { describe, it, expect, vi, beforeEach } from "vitest" +import { listCodeDefinitionNamesTool } from "../listCodeDefinitionNamesTool" +import { Task } from "../../task/Task" +import { ToolUse } from "../../../shared/tools" +import * as treeSitter from "../../../services/tree-sitter" +import fs from "fs/promises" + +// Mock the tree-sitter service +vi.mock("../../../services/tree-sitter", () => ({ + parseSourceCodeDefinitionsForFile: vi.fn(), + parseSourceCodeForDefinitionsTopLevel: vi.fn(), +})) + +// Mock fs module +vi.mock("fs/promises", () => ({ + default: { + stat: vi.fn(), + }, +})) + +describe("listCodeDefinitionNamesTool", () => { + let mockTask: Partial + let mockAskApproval: any + let mockHandleError: any + let mockPushToolResult: any + let mockRemoveClosingTag: any + + beforeEach(() => { + vi.clearAllMocks() + + mockTask = { + cwd: "/test/path", + consecutiveMistakeCount: 0, + recordToolError: vi.fn(), + sayAndCreateMissingParamError: vi.fn(), + ask: vi.fn(), + fileContextTracker: { + trackFileContext: vi.fn(), + }, + providerRef: { + deref: vi.fn(() => ({ + getState: vi.fn(async () => ({ maxReadFileLine: -1 })), + })), + }, + rooIgnoreController: undefined, + } as any + + mockAskApproval = vi.fn(async () => true) + mockHandleError = vi.fn() + mockPushToolResult = vi.fn() + mockRemoveClosingTag = vi.fn((tag: string, value: string) => value) + }) + + describe("truncateDefinitionsToLineLimit", () => { + it("should not truncate when maxReadFileLine is -1 (no limit)", async () => { + const mockDefinitions = `# test.ts +10--20 | function foo() { +30--40 | function bar() { +50--60 | function baz() {` + + vi.mocked(treeSitter.parseSourceCodeDefinitionsForFile).mockResolvedValue(mockDefinitions) + + vi.mocked(fs.stat).mockResolvedValue({ + isFile: () => true, + isDirectory: () => false, + } as any) + + mockTask.providerRef = { + deref: vi.fn(() => ({ + getState: vi.fn(async () => ({ maxReadFileLine: -1 })), + })), + } as any + + const block: ToolUse = { + type: "tool_use", + name: "list_code_definition_names", + params: { path: "test.ts" }, + partial: false, + } + + await listCodeDefinitionNamesTool( + mockTask as Task, + block, + mockAskApproval, + mockHandleError, + mockPushToolResult, + mockRemoveClosingTag, + ) + + expect(mockPushToolResult).toHaveBeenCalledWith(mockDefinitions) + }) + + it("should not truncate when maxReadFileLine is 0 (definitions only mode)", async () => { + const mockDefinitions = `# test.ts +10--20 | function foo() { +30--40 | function bar() { +50--60 | function baz() {` + + vi.mocked(treeSitter.parseSourceCodeDefinitionsForFile).mockResolvedValue(mockDefinitions) + + vi.mocked(fs.stat).mockResolvedValue({ + isFile: () => true, + isDirectory: () => false, + } as any) + + mockTask.providerRef = { + deref: vi.fn(() => ({ + getState: vi.fn(async () => ({ maxReadFileLine: 0 })), + })), + } as any + + const block: ToolUse = { + type: "tool_use", + name: "list_code_definition_names", + params: { path: "test.ts" }, + partial: false, + } + + await listCodeDefinitionNamesTool( + mockTask as Task, + block, + mockAskApproval, + mockHandleError, + mockPushToolResult, + mockRemoveClosingTag, + ) + + expect(mockPushToolResult).toHaveBeenCalledWith(mockDefinitions) + }) + + it("should truncate definitions when maxReadFileLine is set", async () => { + const mockDefinitions = `# test.ts +10--20 | function foo() { +30--40 | function bar() { +50--60 | function baz() {` + + vi.mocked(treeSitter.parseSourceCodeDefinitionsForFile).mockResolvedValue(mockDefinitions) + + vi.mocked(fs.stat).mockResolvedValue({ + isFile: () => true, + isDirectory: () => false, + } as any) + + mockTask.providerRef = { + deref: vi.fn(() => ({ + getState: vi.fn(async () => ({ maxReadFileLine: 25 })), + })), + } as any + + const block: ToolUse = { + type: "tool_use", + name: "list_code_definition_names", + params: { path: "test.ts" }, + partial: false, + } + + await listCodeDefinitionNamesTool( + mockTask as Task, + block, + mockAskApproval, + mockHandleError, + mockPushToolResult, + mockRemoveClosingTag, + ) + + // Should only include definitions starting at or before line 25 + const expectedResult = `# test.ts +10--20 | function foo() {` + + expect(mockPushToolResult).toHaveBeenCalledWith(expectedResult) + }) + + it("should include definitions that start within limit even if they end beyond it", async () => { + const mockDefinitions = `# test.ts +10--50 | function foo() { +60--80 | function bar() {` + + vi.mocked(treeSitter.parseSourceCodeDefinitionsForFile).mockResolvedValue(mockDefinitions) + + vi.mocked(fs.stat).mockResolvedValue({ + isFile: () => true, + isDirectory: () => false, + } as any) + + mockTask.providerRef = { + deref: vi.fn(() => ({ + getState: vi.fn(async () => ({ maxReadFileLine: 30 })), + })), + } as any + + const block: ToolUse = { + type: "tool_use", + name: "list_code_definition_names", + params: { path: "test.ts" }, + partial: false, + } + + await listCodeDefinitionNamesTool( + mockTask as Task, + block, + mockAskApproval, + mockHandleError, + mockPushToolResult, + mockRemoveClosingTag, + ) + + // Should include foo (starts at 10) but not bar (starts at 60) + const expectedResult = `# test.ts +10--50 | function foo() {` + + expect(mockPushToolResult).toHaveBeenCalledWith(expectedResult) + }) + + it("should handle single-line definitions", async () => { + const mockDefinitions = `# test.ts +10 | const foo = 1 +20 | const bar = 2 +30 | const baz = 3` + + vi.mocked(treeSitter.parseSourceCodeDefinitionsForFile).mockResolvedValue(mockDefinitions) + + vi.mocked(fs.stat).mockResolvedValue({ + isFile: () => true, + isDirectory: () => false, + } as any) + + mockTask.providerRef = { + deref: vi.fn(() => ({ + getState: vi.fn(async () => ({ maxReadFileLine: 25 })), + })), + } as any + + const block: ToolUse = { + type: "tool_use", + name: "list_code_definition_names", + params: { path: "test.ts" }, + partial: false, + } + + await listCodeDefinitionNamesTool( + mockTask as Task, + block, + mockAskApproval, + mockHandleError, + mockPushToolResult, + mockRemoveClosingTag, + ) + + // Should include foo and bar but not baz + const expectedResult = `# test.ts +10 | const foo = 1 +20 | const bar = 2` + + expect(mockPushToolResult).toHaveBeenCalledWith(expectedResult) + }) + + it("should preserve header line when truncating", async () => { + const mockDefinitions = `# test.ts +100--200 | function foo() {` + + vi.mocked(treeSitter.parseSourceCodeDefinitionsForFile).mockResolvedValue(mockDefinitions) + + vi.mocked(fs.stat).mockResolvedValue({ + isFile: () => true, + isDirectory: () => false, + } as any) + + mockTask.providerRef = { + deref: vi.fn(() => ({ + getState: vi.fn(async () => ({ maxReadFileLine: 50 })), + })), + } as any + + const block: ToolUse = { + type: "tool_use", + name: "list_code_definition_names", + params: { path: "test.ts" }, + partial: false, + } + + await listCodeDefinitionNamesTool( + mockTask as Task, + block, + mockAskApproval, + mockHandleError, + mockPushToolResult, + mockRemoveClosingTag, + ) + + // Should keep header but exclude all definitions beyond line 50 + const expectedResult = `# test.ts` + + expect(mockPushToolResult).toHaveBeenCalledWith(expectedResult) + }) + }) + + it("should handle missing path parameter", async () => { + const block: ToolUse = { + type: "tool_use", + name: "list_code_definition_names", + params: {}, + partial: false, + } + + mockTask.sayAndCreateMissingParamError = vi.fn(async () => "Missing parameter: path") + + await listCodeDefinitionNamesTool( + mockTask as Task, + block, + mockAskApproval, + mockHandleError, + mockPushToolResult, + mockRemoveClosingTag, + ) + + expect(mockTask.consecutiveMistakeCount).toBe(1) + expect(mockTask.recordToolError).toHaveBeenCalledWith("list_code_definition_names") + expect(mockPushToolResult).toHaveBeenCalledWith("Missing parameter: path") + }) + + it("should handle directory path", async () => { + const mockDefinitions = "# Directory definitions" + + vi.mocked(treeSitter.parseSourceCodeForDefinitionsTopLevel).mockResolvedValue(mockDefinitions) + + vi.mocked(fs.stat).mockResolvedValue({ + isFile: () => false, + isDirectory: () => true, + } as any) + + const block: ToolUse = { + type: "tool_use", + name: "list_code_definition_names", + params: { path: "src" }, + partial: false, + } + + await listCodeDefinitionNamesTool( + mockTask as Task, + block, + mockAskApproval, + mockHandleError, + mockPushToolResult, + mockRemoveClosingTag, + ) + + expect(mockPushToolResult).toHaveBeenCalledWith(mockDefinitions) + }) +}) diff --git a/src/core/tools/__tests__/readFileTool.spec.ts b/src/core/tools/__tests__/readFileTool.spec.ts index 026daf55aa3..5c3e38af209 100644 --- a/src/core/tools/__tests__/readFileTool.spec.ts +++ b/src/core/tools/__tests__/readFileTool.spec.ts @@ -201,13 +201,13 @@ function createMockCline(): any { recordToolUsage: vi.fn().mockReturnValue(undefined), recordToolError: vi.fn().mockReturnValue(undefined), didRejectTool: false, + getTokenUsage: vi.fn().mockReturnValue({ + contextTokens: 10000, + }), // CRITICAL: Always ensure image support is enabled api: { getModel: vi.fn().mockReturnValue({ - info: { - supportsImages: true, - contextWindow: 100000, // Add context window for token limit calculations - }, + info: { supportsImages: true, contextWindow: 200000 }, }), countTokens: vi.fn().mockResolvedValue(100), // Mock countTokens to return a small number }, @@ -419,6 +419,61 @@ describe("read_file tool with maxReadFileLine setting", () => { expect(result).toContain(``) expect(result).toContain("Showing only 3 of 5 total lines") }) + + it("should truncate code definitions when file exceeds maxReadFileLine", async () => { + // Setup - file with 100 lines but we'll only read first 30 + const content = "Line 1\nLine 2\nLine 3" + const numberedContent = "1 | Line 1\n2 | Line 2\n3 | Line 3" + const fullDefinitions = `# file.txt +10--20 | function foo() { +50--60 | function bar() { +80--90 | function baz() {` + const truncatedDefinitions = `# file.txt +10--20 | function foo() {` + + mockedReadLines.mockResolvedValue(content) + mockedParseSourceCodeDefinitionsForFile.mockResolvedValue(fullDefinitions) + addLineNumbersMock.mockReturnValue(numberedContent) + + // Execute with maxReadFileLine = 30 + const result = await executeReadFileTool({}, { maxReadFileLine: 30, totalLines: 100 }) + + // Verify that only definitions within the first 30 lines are included + expect(result).toContain(`${testFilePath}`) + expect(result).toContain(``) + expect(result).toContain(``) + + // Should include foo (starts at line 10) but not bar (starts at line 50) or baz (starts at line 80) + expect(result).toContain("10--20 | function foo()") + expect(result).not.toContain("50--60 | function bar()") + expect(result).not.toContain("80--90 | function baz()") + + expect(result).toContain("Showing only 30 of 100 total lines") + }) + + it("should handle truncation when all definitions are beyond the line limit", async () => { + // Setup - all definitions start after maxReadFileLine + const content = "Line 1\nLine 2\nLine 3" + const numberedContent = "1 | Line 1\n2 | Line 2\n3 | Line 3" + const fullDefinitions = `# file.txt +50--60 | function foo() { +80--90 | function bar() {` + + mockedReadLines.mockResolvedValue(content) + mockedParseSourceCodeDefinitionsForFile.mockResolvedValue(fullDefinitions) + addLineNumbersMock.mockReturnValue(numberedContent) + + // Execute with maxReadFileLine = 30 + const result = await executeReadFileTool({}, { maxReadFileLine: 30, totalLines: 100 }) + + // Verify that only the header is included (all definitions filtered out) + expect(result).toContain(`${testFilePath}`) + expect(result).toContain(``) + expect(result).toContain(``) + expect(result).toContain("# file.txt") + expect(result).not.toContain("50--60 | function foo()") + expect(result).not.toContain("80--90 | function bar()") + }) }) describe("when maxReadFileLine equals or exceeds file length", () => { diff --git a/src/core/tools/executeCommandTool.ts b/src/core/tools/executeCommandTool.ts index b32233a0861..b0ae07bf86a 100644 --- a/src/core/tools/executeCommandTool.ts +++ b/src/core/tools/executeCommandTool.ts @@ -67,7 +67,7 @@ export async function executeCommandTool( const { terminalOutputLineLimit = 500, terminalOutputCharacterLimit = DEFAULT_TERMINAL_OUTPUT_CHARACTER_LIMIT, - terminalShellIntegrationDisabled = false, + terminalShellIntegrationDisabled = true, } = providerState ?? {} // Get command execution timeout from VSCode configuration (in seconds) @@ -149,7 +149,7 @@ export async function executeCommand( executionId, command, customCwd, - terminalShellIntegrationDisabled = true, // kilocode_change: default + terminalShellIntegrationDisabled = true, terminalOutputLineLimit = 500, terminalOutputCharacterLimit = DEFAULT_TERMINAL_OUTPUT_CHARACTER_LIMIT, commandExecutionTimeout = 0, diff --git a/src/core/tools/helpers/__tests__/fileTokenBudget.spec.ts b/src/core/tools/helpers/__tests__/fileTokenBudget.spec.ts new file mode 100644 index 00000000000..4eea6435a89 --- /dev/null +++ b/src/core/tools/helpers/__tests__/fileTokenBudget.spec.ts @@ -0,0 +1,357 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest" +import { + validateFileTokenBudget, + truncateFileContent, + FILE_SIZE_THRESHOLD, + MAX_FILE_SIZE_FOR_TOKENIZATION, + PREVIEW_SIZE_FOR_LARGE_FILES, +} from "../fileTokenBudget" + +// Mock dependencies +vi.mock("fs/promises", () => ({ + stat: vi.fn(), + readFile: vi.fn(), + open: vi.fn(), +})) + +vi.mock("../../../../utils/countTokens", () => ({ + countTokens: vi.fn(), +})) + +// Import after mocking +const fs = await import("fs/promises") +const { countTokens } = await import("../../../../utils/countTokens") + +const mockStat = vi.mocked(fs.stat) +const mockReadFile = vi.mocked(fs.readFile) +const mockOpen = vi.mocked(fs.open) +const mockCountTokens = vi.mocked(countTokens) + +describe("fileTokenBudget", () => { + beforeEach(() => { + vi.clearAllMocks() + mockOpen.mockReset() + }) + + afterEach(() => { + vi.restoreAllMocks() + }) + + describe("validateFileTokenBudget", () => { + it("should not truncate files smaller than FILE_SIZE_THRESHOLD", async () => { + const filePath = "/test/small-file.txt" + const contextWindow = 200000 + const currentTokens = 10000 + + // Mock file stats - small file (50KB) + mockStat.mockResolvedValue({ + size: 50000, + } as any) + + const result = await validateFileTokenBudget(filePath, contextWindow, currentTokens) + + expect(result.shouldTruncate).toBe(false) + expect(mockReadFile).not.toHaveBeenCalled() + expect(mockCountTokens).not.toHaveBeenCalled() + }) + + it("should validate and not truncate large files that fit within budget", async () => { + const filePath = "/test/large-file.txt" + const contextWindow = 200000 + const currentTokens = 10000 + const fileContent = "x".repeat(150000) // 150KB file + + // Mock file stats - large file (150KB) + mockStat.mockResolvedValue({ + size: 150000, + } as any) + + // Mock file read + mockReadFile.mockResolvedValue(fileContent) + + // Mock token counting - file uses 30k tokens (within 60% of 190k remaining = 114k budget) + mockCountTokens.mockResolvedValue(30000) + + const result = await validateFileTokenBudget(filePath, contextWindow, currentTokens) + + expect(result.shouldTruncate).toBe(false) + expect(mockReadFile).toHaveBeenCalledWith(filePath, "utf-8") + expect(mockCountTokens).toHaveBeenCalled() + }) + + it("should truncate large files that exceed token budget", async () => { + const filePath = "/test/huge-file.txt" + const contextWindow = 200000 + const currentTokens = 10000 + const fileContent = "x".repeat(500000) // 500KB file + + // Mock file stats - huge file (500KB) + mockStat.mockResolvedValue({ + size: 500000, + } as any) + + // Mock file read + mockReadFile.mockResolvedValue(fileContent) + + // Mock token counting - file uses 150k tokens (exceeds 60% of 190k remaining = 114k budget) + mockCountTokens.mockResolvedValue(150000) + + const result = await validateFileTokenBudget(filePath, contextWindow, currentTokens) + + expect(result.shouldTruncate).toBe(true) + expect(result.maxChars).toBeDefined() + expect(result.maxChars).toBeGreaterThan(0) + expect(result.reason).toContain("150000 tokens") + expect(result.reason).toContain("114000 tokens available") + }) + + it("should handle case where no budget is available", async () => { + const filePath = "/test/file.txt" + const contextWindow = 200000 + const currentTokens = 200000 // Context is full + + // Mock file stats - large file + mockStat.mockResolvedValue({ + size: 150000, + } as any) + + const result = await validateFileTokenBudget(filePath, contextWindow, currentTokens) + + expect(result.shouldTruncate).toBe(true) + expect(result.maxChars).toBe(0) + expect(result.reason).toContain("No available context budget") + }) + + it("should handle errors gracefully and not truncate", async () => { + const filePath = "/test/error-file.txt" + const contextWindow = 200000 + const currentTokens = 10000 + + // Mock file stats to throw an error + mockStat.mockRejectedValue(new Error("File not found")) + + const result = await validateFileTokenBudget(filePath, contextWindow, currentTokens) + + expect(result.shouldTruncate).toBe(false) + }) + + it("should calculate correct token budget with 60/40 split", async () => { + const filePath = "/test/file.txt" + const contextWindow = 100000 + const currentTokens = 20000 // 80k remaining + const fileContent = "test content" + + mockStat.mockResolvedValue({ size: 150000 } as any) + mockReadFile.mockResolvedValue(fileContent) + + // Available budget should be: (100000 - 20000) * 0.6 = 48000 + // File uses 50k tokens, should be truncated + mockCountTokens.mockResolvedValue(50000) + + const result = await validateFileTokenBudget(filePath, contextWindow, currentTokens) + + expect(result.shouldTruncate).toBe(true) + // maxChars should be approximately 48000 * 3 = 144000 + expect(result.maxChars).toBe(144000) + }) + + it("should validate files at the FILE_SIZE_THRESHOLD boundary", async () => { + const filePath = "/test/boundary-file.txt" + const contextWindow = 200000 + const currentTokens = 10000 + const fileContent = "x".repeat(1000) + + // Mock file stats - exactly at threshold (should trigger validation) + mockStat.mockResolvedValue({ + size: FILE_SIZE_THRESHOLD, + } as any) + + mockReadFile.mockResolvedValue(fileContent) + mockCountTokens.mockResolvedValue(30000) // Within budget + + const result = await validateFileTokenBudget(filePath, contextWindow, currentTokens) + + // At exactly the threshold, it should validate + expect(mockReadFile).toHaveBeenCalled() + expect(mockCountTokens).toHaveBeenCalled() + expect(result.shouldTruncate).toBe(false) + }) + + it("should provide preview for files exceeding MAX_FILE_SIZE_FOR_TOKENIZATION", async () => { + const filePath = "/test/huge-file.txt" + const contextWindow = 200000 + const currentTokens = 10000 + const previewContent = "x".repeat(PREVIEW_SIZE_FOR_LARGE_FILES) + + // Mock file stats - file exceeds max tokenization size (e.g., 10MB when max is 5MB) + mockStat.mockResolvedValue({ + size: MAX_FILE_SIZE_FOR_TOKENIZATION + 1000000, // 1MB over the limit + } as any) + + // Mock file.open and read for preview + const mockRead = vi.fn().mockResolvedValue({ + bytesRead: PREVIEW_SIZE_FOR_LARGE_FILES, + }) + const mockClose = vi.fn().mockResolvedValue(undefined) + mockOpen.mockResolvedValue({ + read: mockRead, + close: mockClose, + } as any) + + // Mock token counting for the preview + mockCountTokens.mockResolvedValue(30000) // Preview fits within budget + + const result = await validateFileTokenBudget(filePath, contextWindow, currentTokens) + + expect(result.shouldTruncate).toBe(true) + expect(result.isPreview).toBe(true) + expect(result.reason).toContain("too large") + expect(result.reason).toContain("preview") + // Should read preview and count tokens + expect(mockOpen).toHaveBeenCalled() + expect(mockCountTokens).toHaveBeenCalled() + }) + + it("should handle files exactly at MAX_FILE_SIZE_FOR_TOKENIZATION boundary", async () => { + const filePath = "/test/boundary-file.txt" + const contextWindow = 200000 + const currentTokens = 10000 + const fileContent = "x".repeat(1000) + + // Mock file stats - exactly at max size + mockStat.mockResolvedValue({ + size: MAX_FILE_SIZE_FOR_TOKENIZATION, + } as any) + + mockReadFile.mockResolvedValue(fileContent) + mockCountTokens.mockResolvedValue(30000) // Within budget + + const result = await validateFileTokenBudget(filePath, contextWindow, currentTokens) + + // At exactly the limit, should still attempt to tokenize + expect(mockReadFile).toHaveBeenCalled() + expect(mockCountTokens).toHaveBeenCalled() + }) + + it("should handle tokenizer unreachable errors gracefully", async () => { + const filePath = "/test/problematic-file.txt" + const contextWindow = 200000 + const currentTokens = 10000 + const fileContent = "x".repeat(200000) // Content that might cause issues + + // Mock file stats - within size limits but content causes tokenizer crash + mockStat.mockResolvedValue({ + size: 200000, + } as any) + + mockReadFile.mockResolvedValue(fileContent) + // Simulate tokenizer "unreachable" error + mockCountTokens.mockRejectedValue(new Error("unreachable")) + + const result = await validateFileTokenBudget(filePath, contextWindow, currentTokens) + + // Should fallback with conservative estimation + const remainingTokens = contextWindow - currentTokens + const safeReadBudget = Math.floor(remainingTokens * 0.6) // 114000 + + expect(result.shouldTruncate).toBe(true) + expect(result.isPreview).toBe(true) + expect(result.reason).toContain("tokenizer error") + + // The actual maxChars depends on conservative estimation + // content.length (200000) is used as estimate since tokenizer failed + expect(result.maxChars).toBeDefined() + expect(typeof result.maxChars).toBe("number") + }) + + it("should handle other tokenizer errors conservatively", async () => { + const filePath = "/test/error-file.txt" + const contextWindow = 200000 + const currentTokens = 10000 + const fileContent = "test content" + + mockStat.mockResolvedValue({ size: 150000 } as any) + mockReadFile.mockResolvedValue(fileContent) + // Simulate a different error + mockCountTokens.mockRejectedValue(new Error("Network error")) + + const result = await validateFileTokenBudget(filePath, contextWindow, currentTokens) + + // Should return safe fallback (don't truncate, let normal error handling take over) + expect(result.shouldTruncate).toBe(false) + }) + }) + + describe("truncateFileContent", () => { + it("should truncate content to specified character limit", () => { + const content = "a".repeat(1000) + const maxChars = 500 + const totalChars = 1000 + + const result = truncateFileContent(content, maxChars, totalChars, false) + + expect(result.content).toHaveLength(500) + expect(result.content).toBe("a".repeat(500)) + expect(result.notice).toContain("500 of 1000 characters") + expect(result.notice).toContain("context limitations") + }) + + it("should show preview message for large files", () => { + const content = "x".repeat(10000000) // ~10MB (9.54MB in binary) + const maxChars = 100000 // 100KB preview + const totalChars = 10000000 + + const result = truncateFileContent(content, maxChars, totalChars, true) + + expect(result.content).toHaveLength(maxChars) + expect(result.notice).toContain("Preview") + expect(result.notice).toContain("0.1MB") // 100KB = 0.1MB + expect(result.notice).toContain("9.54MB") // Binary MB calculation + expect(result.notice).toContain("line_range") + }) + + it("should include helpful notice about using line_range", () => { + const content = "test content that is very long" + const maxChars = 10 + const totalChars = 31 + + const result = truncateFileContent(content, maxChars, totalChars) + + expect(result.notice).toContain("line_range") + expect(result.notice).toContain("specific sections") + }) + + it("should handle empty content", () => { + const content = "" + const maxChars = 100 + const totalChars = 0 + + const result = truncateFileContent(content, maxChars, totalChars) + + expect(result.content).toBe("") + expect(result.notice).toContain("0 of 0 characters") + }) + + it("should truncate multi-line content correctly", () => { + const content = "line1\nline2\nline3\nline4\nline5" + const maxChars = 15 + const totalChars = content.length + + const result = truncateFileContent(content, maxChars, totalChars) + + expect(result.content).toBe("line1\nline2\nlin") + expect(result.content).toHaveLength(15) + }) + + it("should work with unicode characters", () => { + const content = "Hello 😀 World 🌍 Test 🎉" + const maxChars = 10 + const totalChars = content.length + + const result = truncateFileContent(content, maxChars, totalChars) + + expect(result.content).toHaveLength(10) + expect(result.notice).toBeDefined() + }) + }) +}) diff --git a/src/core/tools/helpers/__tests__/truncateDefinitions.spec.ts b/src/core/tools/helpers/__tests__/truncateDefinitions.spec.ts new file mode 100644 index 00000000000..a221b574055 --- /dev/null +++ b/src/core/tools/helpers/__tests__/truncateDefinitions.spec.ts @@ -0,0 +1,160 @@ +import { describe, it, expect } from "vitest" +import { truncateDefinitionsToLineLimit } from "../truncateDefinitions" + +describe("truncateDefinitionsToLineLimit", () => { + it("should not truncate when maxReadFileLine is -1 (no limit)", () => { + const definitions = `# test.ts +10--20 | function foo() { +30--40 | function bar() { +50--60 | function baz() {` + + const result = truncateDefinitionsToLineLimit(definitions, -1) + expect(result).toBe(definitions) + }) + + it("should not truncate when maxReadFileLine is 0 (definitions only mode)", () => { + const definitions = `# test.ts +10--20 | function foo() { +30--40 | function bar() { +50--60 | function baz() {` + + const result = truncateDefinitionsToLineLimit(definitions, 0) + expect(result).toBe(definitions) + }) + + it("should truncate definitions beyond the line limit", () => { + const definitions = `# test.ts +10--20 | function foo() { +30--40 | function bar() { +50--60 | function baz() {` + + const result = truncateDefinitionsToLineLimit(definitions, 25) + const expected = `# test.ts +10--20 | function foo() {` + + expect(result).toBe(expected) + }) + + it("should include definitions that start within limit even if they end beyond it", () => { + const definitions = `# test.ts +10--50 | function foo() { +60--80 | function bar() {` + + const result = truncateDefinitionsToLineLimit(definitions, 30) + const expected = `# test.ts +10--50 | function foo() {` + + expect(result).toBe(expected) + }) + + it("should handle single-line definitions", () => { + const definitions = `# test.ts +10 | const foo = 1 +20 | const bar = 2 +30 | const baz = 3` + + const result = truncateDefinitionsToLineLimit(definitions, 25) + const expected = `# test.ts +10 | const foo = 1 +20 | const bar = 2` + + expect(result).toBe(expected) + }) + + it("should preserve header line when all definitions are beyond limit", () => { + const definitions = `# test.ts +100--200 | function foo() {` + + const result = truncateDefinitionsToLineLimit(definitions, 50) + const expected = `# test.ts` + + expect(result).toBe(expected) + }) + + it("should handle empty definitions", () => { + const definitions = `# test.ts` + + const result = truncateDefinitionsToLineLimit(definitions, 50) + expect(result).toBe(definitions) + }) + + it("should handle definitions without header", () => { + const definitions = `10--20 | function foo() { +30--40 | function bar() {` + + const result = truncateDefinitionsToLineLimit(definitions, 25) + const expected = `10--20 | function foo() {` + + expect(result).toBe(expected) + }) + + it("should not preserve empty lines (only definition lines)", () => { + const definitions = `# test.ts +10--20 | function foo() { + +30--40 | function bar() {` + + const result = truncateDefinitionsToLineLimit(definitions, 25) + const expected = `# test.ts +10--20 | function foo() {` + + expect(result).toBe(expected) + }) + + it("should handle mixed single and range definitions", () => { + const definitions = `# test.ts +5 | const x = 1 +10--20 | function foo() { +25 | const y = 2 +30--40 | function bar() {` + + const result = truncateDefinitionsToLineLimit(definitions, 26) + const expected = `# test.ts +5 | const x = 1 +10--20 | function foo() { +25 | const y = 2` + + expect(result).toBe(expected) + }) + + it("should handle definitions at exactly the limit", () => { + const definitions = `# test.ts +10--20 | function foo() { +30--40 | function bar() { +50--60 | function baz() {` + + const result = truncateDefinitionsToLineLimit(definitions, 30) + const expected = `# test.ts +10--20 | function foo() { +30--40 | function bar() {` + + expect(result).toBe(expected) + }) + + it("should handle definitions with leading whitespace", () => { + const definitions = `# test.ts + 10--20 | function foo() { + 30--40 | function bar() { + 50--60 | function baz() {` + + const result = truncateDefinitionsToLineLimit(definitions, 25) + const expected = `# test.ts + 10--20 | function foo() {` + + expect(result).toBe(expected) + }) + + it("should handle definitions with mixed whitespace patterns", () => { + const definitions = `# test.ts +10--20 | function foo() { + 30--40 | function bar() { + 50--60 | function baz() {` + + const result = truncateDefinitionsToLineLimit(definitions, 35) + const expected = `# test.ts +10--20 | function foo() { + 30--40 | function bar() {` + + expect(result).toBe(expected) + }) +}) diff --git a/src/core/tools/helpers/fileTokenBudget.ts b/src/core/tools/helpers/fileTokenBudget.ts new file mode 100644 index 00000000000..ad82f8fb410 --- /dev/null +++ b/src/core/tools/helpers/fileTokenBudget.ts @@ -0,0 +1,228 @@ +import * as fs from "fs/promises" +import { countTokens } from "../../../utils/countTokens" +import { Anthropic } from "@anthropic-ai/sdk" +import { countFileLinesAndTokens } from "../../../integrations/misc/line-counter" + +/** + * File size threshold (in bytes) above which token validation is triggered. + * Files smaller than this are read without token counting overhead. + */ +export const FILE_SIZE_THRESHOLD = 100_000 // 100KB + +/** + * Absolute maximum file size (in bytes) that will be read for token validation. + * Files larger than this cannot be tokenized due to tokenizer limitations. + * This prevents WASM "unreachable" errors in tiktoken. + */ +export const MAX_FILE_SIZE_FOR_TOKENIZATION = 5_000_000 // 5MB + +/** + * Size of preview to read from files that exceed MAX_FILE_SIZE_FOR_TOKENIZATION. + * This allows the agent to see the beginning of large files without crashing. + */ +export const PREVIEW_SIZE_FOR_LARGE_FILES = 100_000 // 100KB + +/** + * Percentage of available context to reserve for file reading. + * The remaining percentage is reserved for the model's response and overhead. + */ +export const FILE_READ_BUDGET_PERCENT = 0.6 // 60% for file, 40% for response + +/** + * Result of token budget validation for a file. + */ +export interface TokenBudgetResult { + /** Whether the file content should be truncated */ + shouldTruncate: boolean + /** The maximum number of characters allowed (only relevant if shouldTruncate is true) */ + maxChars?: number + /** Human-readable reason for truncation */ + reason?: string + /** Whether this is a preview of a larger file (only showing beginning) */ + isPreview?: boolean +} + +/** + * Validates whether a file's content fits within the available token budget. + * + * Strategy: + * 1. Files < 100KB: Skip validation (fast path) + * 2. Files >= 100KB: Count tokens and check against budget + * 3. Budget = (contextWindow - currentTokens) * 0.6 + * + * @param filePath - Path to the file to validate + * @param contextWindow - Total context window size in tokens + * @param currentTokens - Current token usage + * @returns TokenBudgetResult indicating whether to truncate and at what character limit + */ +export async function validateFileTokenBudget( + filePath: string, + contextWindow: number, + currentTokens: number, +): Promise { + try { + // Check file size first (fast path) + const stats = await fs.stat(filePath) + const fileSizeBytes = stats.size + + // Fast path: small files always pass + if (fileSizeBytes < FILE_SIZE_THRESHOLD) { + return { shouldTruncate: false } + } + + // Calculate available token budget + const remainingTokens = contextWindow - currentTokens + const safeReadBudget = Math.floor(remainingTokens * FILE_READ_BUDGET_PERCENT) + + // If we don't have enough budget, truncate immediately without reading + if (safeReadBudget <= 0) { + return { + shouldTruncate: true, + maxChars: 0, + reason: "No available context budget for file reading", + } + } + + // For files too large to tokenize entirely, read a preview instead + // The tokenizer (tiktoken WASM) crashes with "unreachable" errors on very large files + const isPreviewMode = fileSizeBytes > MAX_FILE_SIZE_FOR_TOKENIZATION + + // Use streaming token counter for normal-sized files to avoid double read + // For previews, still use direct read since we're only reading a portion + let tokenCount = 0 + let streamingSucceeded = false + + if (!isPreviewMode) { + // Try streaming token estimation first (single pass, early exit capability) + try { + const result = await countFileLinesAndTokens(filePath, { + budgetTokens: safeReadBudget, + chunkLines: 256, + }) + tokenCount = result.tokenEstimate + streamingSucceeded = true + + // If streaming indicated we exceeded budget during scan + if (!result.complete) { + // Early exit - we know file exceeds budget without reading it all + const maxChars = Math.floor(safeReadBudget * 3) + return { + shouldTruncate: true, + maxChars, + reason: `File requires ${tokenCount}+ tokens but only ${safeReadBudget} tokens available in context budget`, + } + } + } catch (error) { + // Streaming failed - will fallback to full read below + streamingSucceeded = false + } + } + + // Fallback to full read + token count (for preview mode or if streaming failed) + if (!streamingSucceeded) { + let content: string + + if (isPreviewMode) { + // Read only the preview portion to avoid tokenizer crashes + const fileHandle = await fs.open(filePath, "r") + try { + const buffer = Buffer.alloc(PREVIEW_SIZE_FOR_LARGE_FILES) + const { bytesRead } = await fileHandle.read(buffer, 0, PREVIEW_SIZE_FOR_LARGE_FILES, 0) + content = buffer.slice(0, bytesRead).toString("utf-8") + } finally { + await fileHandle.close() + } + } else { + // Read the entire file for normal-sized files + content = await fs.readFile(filePath, "utf-8") + } + + // Count tokens with error handling + try { + const contentBlocks: Anthropic.Messages.ContentBlockParam[] = [{ type: "text", text: content }] + tokenCount = await countTokens(contentBlocks) + } catch (error) { + // Catch tokenizer "unreachable" errors + const errorMessage = error instanceof Error ? error.message : String(error) + if (errorMessage.includes("unreachable")) { + // Use conservative estimation: 2 chars = 1 token + const estimatedTokens = Math.ceil(content.length / 2) + if (estimatedTokens > safeReadBudget) { + return { + shouldTruncate: true, + maxChars: safeReadBudget, + isPreview: true, + reason: `File content caused tokenizer error. Showing truncated preview to fit context budget. Use line_range to read specific sections.`, + } + } + return { + shouldTruncate: true, + maxChars: content.length, + isPreview: true, + reason: `File content caused tokenizer error but fits in context. Use line_range for specific sections.`, + } + } + throw error + } + } + + // Check if content exceeds budget + if (tokenCount > safeReadBudget) { + const maxChars = Math.floor(safeReadBudget * 3) + return { + shouldTruncate: true, + maxChars, + isPreview: isPreviewMode, + reason: isPreviewMode + ? `Preview of large file (${(fileSizeBytes / 1024 / 1024).toFixed(2)}MB) truncated to fit context budget. Use line_range to read specific sections.` + : `File requires ${tokenCount} tokens but only ${safeReadBudget} tokens available in context budget`, + } + } + + // Content fits within budget + if (isPreviewMode) { + return { + shouldTruncate: true, + maxChars: PREVIEW_SIZE_FOR_LARGE_FILES, + isPreview: true, + reason: `File is too large (${(fileSizeBytes / 1024 / 1024).toFixed(2)}MB) to read entirely. Showing preview of first ${(PREVIEW_SIZE_FOR_LARGE_FILES / 1024 / 1024).toFixed(1)}MB. Use line_range to read specific sections.`, + } + } + + // File fits within budget + return { shouldTruncate: false } + } catch (error) { + // On error, be conservative and don't truncate + // This allows the existing error handling to take over + console.warn(`[fileTokenBudget] Error validating file ${filePath}:`, error) + return { shouldTruncate: false } + } +} + +/** + * Truncates file content to fit within the specified character limit. + * Adds a notice message at the end to inform the user about truncation. + * + * @param content - The full file content + * @param maxChars - Maximum number of characters to keep + * @param totalChars - Total number of characters in the original file + * @param isPreview - Whether this is a preview of a larger file (not token-budget limited) + * @returns Object containing truncated content and a notice message + */ +export function truncateFileContent( + content: string, + maxChars: number, + totalChars: number, + isPreview: boolean = false, +): { content: string; notice: string } { + const truncatedContent = content.slice(0, maxChars) + + const notice = isPreview + ? `Preview: Showing first ${(maxChars / 1024 / 1024).toFixed(1)}MB of ${(totalChars / 1024 / 1024).toFixed(2)}MB file. Use line_range to read specific sections.` + : `File truncated to ${maxChars} of ${totalChars} characters due to context limitations. Use line_range to read specific sections if needed.` + + return { + content: truncatedContent, + notice, + } +} diff --git a/src/core/tools/helpers/truncateDefinitions.ts b/src/core/tools/helpers/truncateDefinitions.ts new file mode 100644 index 00000000000..7c193ef52a5 --- /dev/null +++ b/src/core/tools/helpers/truncateDefinitions.ts @@ -0,0 +1,44 @@ +/** + * Truncate code definitions to only include those within the line limit + * @param definitions - The full definitions string from parseSourceCodeDefinitionsForFile + * @param maxReadFileLine - Maximum line number to include (-1 for no limit, 0 for definitions only) + * @returns Truncated definitions string + */ +export function truncateDefinitionsToLineLimit(definitions: string, maxReadFileLine: number): string { + // If no limit or definitions-only mode (0), return as-is + if (maxReadFileLine <= 0) { + return definitions + } + + const lines = definitions.split("\n") + const result: string[] = [] + let startIndex = 0 + + // Keep the header line (e.g., "# filename.ts") + if (lines.length > 0 && lines[0].startsWith("#")) { + result.push(lines[0]) + startIndex = 1 + } + + // Process definition lines + for (let i = startIndex; i < lines.length; i++) { + const line = lines[i] + + // Match definition format: "startLine--endLine | content" or "lineNumber | content" + // Allow optional leading whitespace to handle indented output or CRLF artifacts + const rangeMatch = line.match(/^\s*(\d+)(?:--(\d+))?\s*\|/) + + if (rangeMatch) { + const startLine = parseInt(rangeMatch[1], 10) + + // Only include definitions that start within the truncated range + if (startLine <= maxReadFileLine) { + result.push(line) + } + } + // Note: We don't preserve empty lines or other non-definition content + // as they're not part of the actual code definitions + } + + return result.join("\n") +} diff --git a/src/core/tools/listCodeDefinitionNamesTool.ts b/src/core/tools/listCodeDefinitionNamesTool.ts index 6ceec0a7257..0ec80ce9bd0 100644 --- a/src/core/tools/listCodeDefinitionNamesTool.ts +++ b/src/core/tools/listCodeDefinitionNamesTool.ts @@ -8,6 +8,7 @@ import { getReadablePath } from "../../utils/path" import { isPathOutsideWorkspace } from "../../utils/pathUtils" import { parseSourceCodeForDefinitionsTopLevel, parseSourceCodeDefinitionsForFile } from "../../services/tree-sitter" import { RecordSource } from "../context-tracking/FileContextTrackerTypes" +import { truncateDefinitionsToLineLimit } from "./helpers/truncateDefinitions" export async function listCodeDefinitionNamesTool( cline: Task, @@ -51,7 +52,14 @@ export async function listCodeDefinitionNamesTool( if (stats.isFile()) { const fileResult = await parseSourceCodeDefinitionsForFile(absolutePath, cline.rooIgnoreController) - result = fileResult ?? "No source code definitions found in cline file." + + // Apply truncation based on maxReadFileLine setting + if (fileResult) { + const { maxReadFileLine = -1 } = (await cline.providerRef.deref()?.getState()) ?? {} + result = truncateDefinitionsToLineLimit(fileResult, maxReadFileLine) + } else { + result = "No source code definitions found in file." + } } else if (stats.isDirectory()) { result = await parseSourceCodeForDefinitionsTopLevel(absolutePath, cline.rooIgnoreController) } else { diff --git a/src/core/tools/readFileTool.ts b/src/core/tools/readFileTool.ts index 3e389c54975..da2817bef25 100644 --- a/src/core/tools/readFileTool.ts +++ b/src/core/tools/readFileTool.ts @@ -23,6 +23,8 @@ import { processImageFile, ImageMemoryTracker, } from "./helpers/imageHelpers" +import { validateFileTokenBudget, truncateFileContent } from "./helpers/fileTokenBudget" +import { truncateDefinitionsToLineLimit } from "./helpers/truncateDefinitions" export function getReadFileToolDescription(blockName: string, blockParams: any): string { // Handle both single path and multiple files via args @@ -601,7 +603,9 @@ export async function readFileTool( try { const defResult = await parseSourceCodeDefinitionsForFile(fullPath, cline.rooIgnoreController) if (defResult) { - xmlInfo += `${defResult}\n` + // Truncate definitions to match the truncated file content + const truncatedDefs = truncateDefinitionsToLineLimit(defResult, maxReadFileLine) + xmlInfo += `${truncatedDefs}\n` } xmlInfo += `Showing only ${maxReadFileLine} of ${totalLines} total lines. Use line_range if you need to read more lines\n` updateFileResult(relPath, { @@ -619,8 +623,16 @@ export async function readFileTool( continue } - // Handle normal file read - const content = await extractTextFromFile(fullPath) + // Handle normal file read with token budget validation + const modelInfo = cline.api.getModel().info + const { contextTokens } = cline.getTokenUsage() + const contextWindow = modelInfo.contextWindow + + // Validate if file fits within token budget + const budgetResult = await validateFileTokenBudget(fullPath, contextWindow, contextTokens || 0) + + let content = await extractTextFromFile(fullPath) + let xmlInfo = "" // kilocode_change start: limit output size based on token count const blockResult = await blockFileReadWhenTooLarge(cline, relPath, content) @@ -630,11 +642,32 @@ export async function readFileTool( } // kilocode_change end - const lineRangeAttr = ` lines="1-${totalLines}"` - let xmlInfo = totalLines > 0 ? `\n${content}\n` : `` + if (budgetResult.shouldTruncate && budgetResult.maxChars !== undefined) { + // Truncate the content to fit budget or show preview for large files + const truncateResult = truncateFileContent( + content, + budgetResult.maxChars, + content.length, + budgetResult.isPreview, + ) + content = truncateResult.content - if (totalLines === 0) { - xmlInfo += `File is empty\n` + // Reflect actual displayed line count after truncation (count ALL lines, including empty) + // Handle trailing newline: "line1\nline2\n" should be 2 lines, not 3 + let displayedLines = content.length === 0 ? 0 : content.split(/\r?\n/).length + if (displayedLines > 0 && content.endsWith("\n")) { + displayedLines-- + } + const lineRangeAttr = displayedLines > 0 ? ` lines="1-${displayedLines}"` : "" + xmlInfo = content.length > 0 ? `\n${content}\n` : `` + xmlInfo += `${truncateResult.notice}\n` + } else { + const lineRangeAttr = ` lines="1-${totalLines}"` + xmlInfo = totalLines > 0 ? `\n${content}\n` : `` + + if (totalLines === 0) { + xmlInfo += `File is empty\n` + } } // Track file read diff --git a/src/core/tools/searchAndReplaceTool.ts b/src/core/tools/searchAndReplaceTool.ts deleted file mode 100644 index b0ee3947e1e..00000000000 --- a/src/core/tools/searchAndReplaceTool.ts +++ /dev/null @@ -1,282 +0,0 @@ -// Core Node.js imports -import path from "path" -import fs from "fs/promises" -import delay from "delay" - -// Internal imports -import { Task } from "../task/Task" -import { AskApproval, HandleError, PushToolResult, RemoveClosingTag, ToolUse } from "../../shared/tools" -import { formatResponse } from "../prompts/responses" -import { ClineSayTool } from "../../shared/ExtensionMessage" -import { getReadablePath } from "../../utils/path" -import { fileExistsAtPath } from "../../utils/fs" -import { RecordSource } from "../context-tracking/FileContextTrackerTypes" -import { DEFAULT_WRITE_DELAY_MS } from "@roo-code/types" -import { EXPERIMENT_IDS, experiments } from "../../shared/experiments" - -/** - * Tool for performing search and replace operations on files - * Supports regex and case-sensitive/insensitive matching - */ - -/** - * Validates required parameters for search and replace operation - */ -async function validateParams( - cline: Task, - relPath: string | undefined, - search: string | undefined, - replace: string | undefined, - pushToolResult: PushToolResult, -): Promise { - if (!relPath) { - cline.consecutiveMistakeCount++ - cline.recordToolError("search_and_replace") - pushToolResult(await cline.sayAndCreateMissingParamError("search_and_replace", "path")) - return false - } - - if (!search) { - cline.consecutiveMistakeCount++ - cline.recordToolError("search_and_replace") - pushToolResult(await cline.sayAndCreateMissingParamError("search_and_replace", "search")) - return false - } - - if (replace === undefined) { - cline.consecutiveMistakeCount++ - cline.recordToolError("search_and_replace") - pushToolResult(await cline.sayAndCreateMissingParamError("search_and_replace", "replace")) - return false - } - - return true -} - -/** - * Performs search and replace operations on a file - * @param cline - Cline instance - * @param block - Tool use parameters - * @param askApproval - Function to request user approval - * @param handleError - Function to handle errors - * @param pushToolResult - Function to push tool results - * @param removeClosingTag - Function to remove closing tags - */ -export async function searchAndReplaceTool( - cline: Task, - block: ToolUse, - askApproval: AskApproval, - handleError: HandleError, - pushToolResult: PushToolResult, - removeClosingTag: RemoveClosingTag, -): Promise { - // Extract and validate parameters - const relPath: string | undefined = block.params.path - const search: string | undefined = block.params.search - const replace: string | undefined = block.params.replace - const useRegex: boolean = block.params.use_regex === "true" - const ignoreCase: boolean = block.params.ignore_case === "true" - const startLine: number | undefined = block.params.start_line ? parseInt(block.params.start_line, 10) : undefined - const endLine: number | undefined = block.params.end_line ? parseInt(block.params.end_line, 10) : undefined - - try { - // Handle partial tool use - if (block.partial) { - const partialMessageProps = { - tool: "searchAndReplace" as const, - path: getReadablePath(cline.cwd, removeClosingTag("path", relPath)), - search: removeClosingTag("search", search), - replace: removeClosingTag("replace", replace), - useRegex: block.params.use_regex === "true", - ignoreCase: block.params.ignore_case === "true", - startLine, - endLine, - } - await cline.ask("tool", JSON.stringify(partialMessageProps), block.partial).catch(() => {}) - return - } - - // Validate required parameters - if (!(await validateParams(cline, relPath, search, replace, pushToolResult))) { - return - } - - // At this point we know relPath, search and replace are defined - const validRelPath = relPath as string - const validSearch = search as string - const validReplace = replace as string - - const sharedMessageProps: ClineSayTool = { - tool: "searchAndReplace", - path: getReadablePath(cline.cwd, validRelPath), - search: validSearch, - replace: validReplace, - useRegex: useRegex, - ignoreCase: ignoreCase, - startLine: startLine, - endLine: endLine, - } - - const accessAllowed = cline.rooIgnoreController?.validateAccess(validRelPath) - - if (!accessAllowed) { - await cline.say("rooignore_error", validRelPath) - pushToolResult(formatResponse.toolError(formatResponse.rooIgnoreError(validRelPath))) - return - } - - // Check if file is write-protected - const isWriteProtected = cline.rooProtectedController?.isWriteProtected(validRelPath) || false - - const absolutePath = path.resolve(cline.cwd, validRelPath) - const fileExists = await fileExistsAtPath(absolutePath) - - if (!fileExists) { - cline.consecutiveMistakeCount++ - cline.recordToolError("search_and_replace") - const formattedError = formatResponse.toolError( - `File does not exist at path: ${absolutePath}\nThe specified file could not be found. Please verify the file path and try again.`, - ) - await cline.say("error", formattedError) - pushToolResult(formattedError) - return - } - - // Reset consecutive mistakes since all validations passed - cline.consecutiveMistakeCount = 0 - - // Read and process file content - let fileContent: string - try { - fileContent = await fs.readFile(absolutePath, "utf-8") - } catch (error) { - cline.consecutiveMistakeCount++ - cline.recordToolError("search_and_replace") - const errorMessage = `Error reading file: ${absolutePath}\nFailed to read the file content: ${ - error instanceof Error ? error.message : String(error) - }\nPlease verify file permissions and try again.` - const formattedError = formatResponse.toolError(errorMessage) - await cline.say("error", formattedError) - pushToolResult(formattedError) - return - } - - // Create search pattern and perform replacement - const flags = ignoreCase ? "gi" : "g" - const searchPattern = useRegex ? new RegExp(validSearch, flags) : new RegExp(escapeRegExp(validSearch), flags) - - let newContent: string - if (startLine !== undefined || endLine !== undefined) { - // Handle line-specific replacement - const lines = fileContent.split("\n") - const start = Math.max((startLine ?? 1) - 1, 0) - const end = Math.min((endLine ?? lines.length) - 1, lines.length - 1) - - // Get content before and after target section - const beforeLines = lines.slice(0, start) - const afterLines = lines.slice(end + 1) - - // Get and modify target section - const targetContent = lines.slice(start, end + 1).join("\n") - const modifiedContent = targetContent.replace(searchPattern, validReplace) - const modifiedLines = modifiedContent.split("\n") - - // Reconstruct full content - newContent = [...beforeLines, ...modifiedLines, ...afterLines].join("\n") - } else { - // Global replacement - newContent = fileContent.replace(searchPattern, validReplace) - } - - // Initialize diff view - cline.diffViewProvider.editType = "modify" - cline.diffViewProvider.originalContent = fileContent - - // Generate and validate diff - const diff = formatResponse.createPrettyPatch(validRelPath, fileContent, newContent) - if (!diff) { - pushToolResult(`No changes needed for '${relPath}'`) - await cline.diffViewProvider.reset() - return - } - - // Check if preventFocusDisruption experiment is enabled - const provider = cline.providerRef.deref() - const state = await provider?.getState() - const diagnosticsEnabled = state?.diagnosticsEnabled ?? true - const writeDelayMs = state?.writeDelayMs ?? DEFAULT_WRITE_DELAY_MS - const isPreventFocusDisruptionEnabled = experiments.isEnabled( - state?.experiments ?? {}, - EXPERIMENT_IDS.PREVENT_FOCUS_DISRUPTION, - ) - - const completeMessage = JSON.stringify({ - ...sharedMessageProps, - diff, - isProtected: isWriteProtected, - } satisfies ClineSayTool) - - // Show diff view if focus disruption prevention is disabled - if (!isPreventFocusDisruptionEnabled) { - await cline.diffViewProvider.open(validRelPath) - await cline.diffViewProvider.update(newContent, true) - cline.diffViewProvider.scrollToFirstDiff() - } - - const didApprove = await askApproval("tool", completeMessage, undefined, isWriteProtected) - - if (!didApprove) { - // Revert changes if diff view was shown - if (!isPreventFocusDisruptionEnabled) { - await cline.diffViewProvider.revertChanges() - } - pushToolResult("Changes were rejected by the user.") - await cline.diffViewProvider.reset() - return - } - - // Save the changes - if (isPreventFocusDisruptionEnabled) { - // Direct file write without diff view or opening the file - await cline.diffViewProvider.saveDirectly(validRelPath, newContent, false, diagnosticsEnabled, writeDelayMs) - } else { - // Call saveChanges to update the DiffViewProvider properties - await cline.diffViewProvider.saveChanges(diagnosticsEnabled, writeDelayMs) - } - - // Track file edit operation - if (relPath) { - await cline.fileContextTracker.trackFileContext(relPath, "roo_edited" as RecordSource) - } - - cline.didEditFile = true - - // Get the formatted response message - const message = await cline.diffViewProvider.pushToolWriteResult( - cline, - cline.cwd, - false, // Always false for search_and_replace - ) - - pushToolResult(message) - - // Record successful tool usage and cleanup - cline.recordToolUsage("search_and_replace") - await cline.diffViewProvider.reset() - - // Process any queued messages after file edit completes - cline.processQueuedMessages() - } catch (error) { - handleError("search and replace", error) - await cline.diffViewProvider.reset() - } -} - -/** - * Escapes special regex characters in a string - * @param input String to escape regex characters in - * @returns Escaped string safe for regex pattern matching - */ -function escapeRegExp(input: string): string { - return input.replace(/[.*+?^${}()|[\]\\]/g, "\\$&") -} diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index d561600ca73..da63f2b2489 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -42,6 +42,7 @@ import { ORGANIZATION_ALLOW_ALL, DEFAULT_MODES, getActiveToolUseStyle, // kilocode_change + DEFAULT_CHECKPOINT_TIMEOUT_SECONDS, } from "@roo-code/types" import { TelemetryService } from "@roo-code/telemetry" import { CloudService, BridgeOrchestrator, getRooCodeApiUrl } from "@roo-code/cloud" @@ -96,6 +97,7 @@ import type { ClineMessage } from "@roo-code/types" import { readApiMessages, saveApiMessages, saveTaskMessages } from "../task-persistence" import { getNonce } from "./getNonce" import { getUri } from "./getUri" +import { REQUESTY_BASE_URL } from "../../shared/utils/requesty" //kilocode_change start import { McpDownloadResponse, McpMarketplaceCatalog } from "../../shared/kilocode/mcp" @@ -161,7 +163,7 @@ export class ClineProvider public isViewLaunched = false public settingsImportedAt?: number - public readonly latestAnnouncementId = "sep-2025-code-supernova-1m" // Code Supernova 1M context window announcement + public readonly latestAnnouncementId = "nov-2025-v3.30.0-pr-fixer" // v3.30.0 PR Fixer announcement public readonly providerSettingsManager: ProviderSettingsManager public readonly customModesManager: CustomModesManager @@ -749,7 +751,12 @@ export class ClineProvider const prompt = supportPrompt.create(promptType, params, customSupportPrompts) if (command === "addToContext") { - await visibleProvider.postMessageToWebview({ type: "invoke", invoke: "setChatBoxMessage", text: prompt }) + await visibleProvider.postMessageToWebview({ + type: "invoke", + invoke: "setChatBoxMessage", + text: `${prompt}\n\n`, + }) + await visibleProvider.postMessageToWebview({ type: "action", action: "focusInput" }) return } @@ -806,7 +813,12 @@ ${prompt} const prompt = supportPrompt.create(promptType, params, customSupportPrompts) if (command === "terminalAddToContext") { - await visibleProvider.postMessageToWebview({ type: "invoke", invoke: "setChatBoxMessage", text: prompt }) + await visibleProvider.postMessageToWebview({ + type: "invoke", + invoke: "setChatBoxMessage", + text: `${prompt}\n\n`, + }) + await visibleProvider.postMessageToWebview({ type: "action", action: "focusInput" }) return } @@ -1005,6 +1017,7 @@ ${prompt} apiConfiguration, diffEnabled: enableDiff, enableCheckpoints, + checkpointTimeout, fuzzyMatchThreshold, experiments, cloudUserInfo, @@ -1017,6 +1030,7 @@ ${prompt} apiConfiguration, enableDiff, enableCheckpoints, + checkpointTimeout, fuzzyMatchThreshold, consecutiveMistakeLimit: apiConfiguration.consecutiveMistakeLimit, historyItem, @@ -1591,8 +1605,8 @@ ${prompt} // Requesty - async handleRequestyCallback(code: string) { - let { apiConfiguration, currentApiConfigName = "default" } = await this.getState() + async handleRequestyCallback(code: string, baseUrl: string | null) { + let { apiConfiguration } = await this.getState() const newConfiguration: ProviderSettings = { ...apiConfiguration, @@ -1601,7 +1615,16 @@ ${prompt} requestyModelId: apiConfiguration?.requestyModelId || requestyDefaultModelId, } - await this.upsertProviderProfile(currentApiConfigName, newConfiguration) + // set baseUrl as undefined if we don't provide one + // or if it is the default requesty url + if (!baseUrl || baseUrl === REQUESTY_BASE_URL) { + newConfiguration.requestyBaseUrl = undefined + } else { + newConfiguration.requestyBaseUrl = baseUrl + } + + const profileName = `Requesty (${new Date().toLocaleString()})` + await this.upsertProviderProfile(profileName, newConfiguration) } // kilocode_change: @@ -1931,6 +1954,7 @@ ${prompt} ttsSpeed, diffEnabled, enableCheckpoints, + checkpointTimeout, // taskHistory, // kilocode_change soundVolume, browserViewportSize, @@ -2005,6 +2029,8 @@ ${prompt} includeDiagnosticMessages, maxDiagnosticMessages, includeTaskHistoryInEnhance, + includeCurrentTime, + includeCurrentCost, taskSyncEnabled, remoteControlEnabled, openRouterImageApiKey, @@ -2025,11 +2051,11 @@ ${prompt} let cloudOrganizations: CloudOrganizationMembership[] = [] try { - cloudOrganizations = await CloudService.instance.getOrganizationMemberships() + if (!CloudService.instance.isCloudAgent) { + cloudOrganizations = await CloudService.instance.getOrganizationMemberships() + } } catch (error) { - console.error( - `[getStateToPostToWebview] failed to get cloud organizations: ${error instanceof Error ? error.message : String(error)}`, - ) + // Ignore this error. } const telemetryKey = process.env.KILOCODE_POSTHOG_API_KEY @@ -2089,6 +2115,7 @@ ${prompt} ttsSpeed: ttsSpeed ?? 1.0, diffEnabled: diffEnabled ?? true, enableCheckpoints: enableCheckpoints ?? true, + checkpointTimeout: checkpointTimeout ?? DEFAULT_CHECKPOINT_TIMEOUT_SECONDS, shouldShowAnnouncement: false, // kilocode_change allowedCommands: mergedAllowedCommands, deniedCommands: mergedDeniedCommands, @@ -2193,6 +2220,8 @@ ${prompt} includeDiagnosticMessages: includeDiagnosticMessages ?? true, maxDiagnosticMessages: maxDiagnosticMessages ?? 50, includeTaskHistoryInEnhance: includeTaskHistoryInEnhance ?? true, + includeCurrentTime: includeCurrentTime ?? true, + includeCurrentCost: includeCurrentCost ?? true, taskSyncEnabled, remoteControlEnabled, openRouterImageApiKey, @@ -2353,6 +2382,7 @@ ${prompt} ttsSpeed: stateValues.ttsSpeed ?? 1.0, diffEnabled: stateValues.diffEnabled ?? true, enableCheckpoints: stateValues.enableCheckpoints ?? true, + checkpointTimeout: stateValues.checkpointTimeout ?? DEFAULT_CHECKPOINT_TIMEOUT_SECONDS, soundVolume: stateValues.soundVolume, browserViewportSize: stateValues.browserViewportSize ?? "900x600", screenshotQuality: stateValues.screenshotQuality ?? 75, @@ -2455,6 +2485,8 @@ ${prompt} includeDiagnosticMessages: stateValues.includeDiagnosticMessages ?? true, maxDiagnosticMessages: stateValues.maxDiagnosticMessages ?? 50, includeTaskHistoryInEnhance: stateValues.includeTaskHistoryInEnhance ?? true, + includeCurrentTime: stateValues.includeCurrentTime ?? true, + includeCurrentCost: stateValues.includeCurrentCost ?? true, taskSyncEnabled, remoteControlEnabled: (() => { try { @@ -2822,6 +2854,7 @@ ${prompt} organizationAllowList, diffEnabled: enableDiff, enableCheckpoints, + checkpointTimeout, fuzzyMatchThreshold, experiments, cloudUserInfo, @@ -2838,6 +2871,7 @@ ${prompt} apiConfiguration, enableDiff, enableCheckpoints, + checkpointTimeout, fuzzyMatchThreshold, consecutiveMistakeLimit: apiConfiguration.consecutiveMistakeLimit, task: text, diff --git a/src/core/webview/__tests__/ClineProvider.spec.ts b/src/core/webview/__tests__/ClineProvider.spec.ts index 25289bd3b6d..007cd0f048c 100644 --- a/src/core/webview/__tests__/ClineProvider.spec.ts +++ b/src/core/webview/__tests__/ClineProvider.spec.ts @@ -7,9 +7,10 @@ import axios from "axios" import { type ProviderSettingsEntry, type ClineMessage, - openRouterDefaultModelId, + openRouterDefaultModelId, // kilocode_change: openRouterDefaultModelId ORGANIZATION_ALLOW_ALL, -} from "@roo-code/types" // kilocode_change: openRouterDefaultModelId + DEFAULT_CHECKPOINT_TIMEOUT_SECONDS, +} from "@roo-code/types" import { TelemetryService } from "@roo-code/telemetry" import { ExtensionMessage, ExtensionState } from "../../../shared/ExtensionMessage" @@ -306,7 +307,6 @@ vi.mock("../../../api", () => ({ buildApiHandler: vi.fn().mockReturnValue({ getModel: vi.fn().mockReturnValue({ id: "claude-3-sonnet", - info: { supportsComputerUse: false }, }), }), })) @@ -586,6 +586,7 @@ describe("ClineProvider", () => { remoteControlEnabled: false, taskSyncEnabled: false, featureRoomoteControlEnabled: false, + checkpointTimeout: DEFAULT_CHECKPOINT_TIMEOUT_SECONDS, } const message: ExtensionMessage = { @@ -2704,7 +2705,6 @@ describe("ClineProvider - Router Models", () => { litellmApiKey: "litellm-key", litellmBaseUrl: "http://localhost:4000", // kilocode_change start - chutesApiKey: "chutes-key", geminiApiKey: "gemini-key", googleGeminiBaseUrl: "https://gemini.example.com", ovhCloudAiEndpointsApiKey: "ovhcloud-key", @@ -2743,7 +2743,6 @@ describe("ClineProvider - Router Models", () => { baseUrl: "https://gemini.example.com", }) expect(getModels).toHaveBeenCalledWith({ provider: "ovhcloud", apiKey: "ovhcloud-key" }) - expect(getModels).toHaveBeenCalledWith({ provider: "chutes", apiKey: "chutes-key" }) expect(getModels).toHaveBeenCalledWith({ provider: "inception", apiKey: "inception-key", @@ -2754,11 +2753,19 @@ describe("ClineProvider - Router Models", () => { expect(getModels).toHaveBeenCalledWith({ provider: "glama" }) expect(getModels).toHaveBeenCalledWith({ provider: "unbound", apiKey: "unbound-key" }) expect(getModels).toHaveBeenCalledWith({ provider: "vercel-ai-gateway" }) + expect(getModels).toHaveBeenCalledWith({ provider: "deepinfra" }) + expect(getModels).toHaveBeenCalledWith( + expect.objectContaining({ + provider: "roo", + baseUrl: expect.any(String), + }), + ) expect(getModels).toHaveBeenCalledWith({ provider: "litellm", apiKey: "litellm-key", baseUrl: "http://localhost:4000", }) + expect(getModels).toHaveBeenCalledWith({ provider: "chutes" }) // Verify response was sent expect(mockPostMessage).toHaveBeenCalledWith({ @@ -2770,9 +2777,10 @@ describe("ClineProvider - Router Models", () => { requesty: mockModels, glama: mockModels, unbound: mockModels, - chutes: mockModels, // kilocode_change + roo: mockModels, + chutes: mockModels, litellm: mockModels, - "kilocode-openrouter": mockModels, + kilocode: mockModels, ollama: mockModels, // kilocode_change lmstudio: {}, "vercel-ai-gateway": mockModels, @@ -2781,6 +2789,7 @@ describe("ClineProvider - Router Models", () => { huggingface: {}, "io-intelligence": {}, }, + values: undefined, }) }) @@ -2819,13 +2828,14 @@ describe("ClineProvider - Router Models", () => { .mockRejectedValueOnce(new Error("Requesty API error")) // requesty fail .mockResolvedValueOnce(mockModels) // glama success .mockRejectedValueOnce(new Error("Unbound API error")) // unbound fail - .mockRejectedValueOnce(new Error("Chutes API error")) // kilocode_change: chutes fail .mockRejectedValueOnce(new Error("Kilocode-OpenRouter API error")) // kilocode-openrouter fail .mockRejectedValueOnce(new Error("Ollama API error")) // kilocode_change .mockResolvedValueOnce(mockModels) // vercel-ai-gateway success .mockResolvedValueOnce(mockModels) // deepinfra success .mockResolvedValueOnce(mockModels) // kilocode_change: ovhcloud .mockResolvedValueOnce(mockModels) // kilocode_change: inception success + .mockResolvedValueOnce(mockModels) // roo success + .mockRejectedValueOnce(new Error("Chutes API error")) // chutes fail .mockRejectedValueOnce(new Error("LiteLLM connection failed")) // litellm fail await messageHandler({ type: "requestRouterModels" }) @@ -2840,17 +2850,19 @@ describe("ClineProvider - Router Models", () => { requesty: {}, glama: mockModels, unbound: {}, - chutes: {}, // kilocode_change + roo: mockModels, + chutes: {}, ollama: {}, lmstudio: {}, litellm: {}, - "kilocode-openrouter": {}, + kilocode: {}, "vercel-ai-gateway": mockModels, ovhcloud: mockModels, // kilocode_change inception: mockModels, // kilocode_change huggingface: {}, "io-intelligence": {}, }, + values: undefined, }) // Verify error messages were sent for failed providers @@ -2881,7 +2893,7 @@ describe("ClineProvider - Router Models", () => { type: "singleRouterModelFetchResponse", success: false, error: "Kilocode-OpenRouter API error", - values: { provider: "kilocode-openrouter" }, + values: { provider: "kilocode" }, }) expect(mockPostMessage).toHaveBeenCalledWith({ @@ -2891,6 +2903,13 @@ describe("ClineProvider - Router Models", () => { values: { provider: "unbound" }, }) + expect(mockPostMessage).toHaveBeenCalledWith({ + type: "singleRouterModelFetchResponse", + success: false, + error: "Chutes API error", + values: { provider: "chutes" }, + }) + expect(mockPostMessage).toHaveBeenCalledWith({ type: "singleRouterModelFetchResponse", success: false, @@ -2983,9 +3002,10 @@ describe("ClineProvider - Router Models", () => { requesty: mockModels, glama: mockModels, unbound: mockModels, - chutes: mockModels, // kilocode_change + roo: mockModels, + chutes: mockModels, litellm: {}, - "kilocode-openrouter": mockModels, + kilocode: mockModels, ollama: mockModels, // kilocode_change lmstudio: {}, "vercel-ai-gateway": mockModels, @@ -2994,6 +3014,7 @@ describe("ClineProvider - Router Models", () => { huggingface: {}, "io-intelligence": {}, }, + values: undefined, }) }) diff --git a/src/core/webview/__tests__/ClineProvider.sticky-mode.spec.ts b/src/core/webview/__tests__/ClineProvider.sticky-mode.spec.ts index 03cd0f94fdd..bb7723d4b9a 100644 --- a/src/core/webview/__tests__/ClineProvider.sticky-mode.spec.ts +++ b/src/core/webview/__tests__/ClineProvider.sticky-mode.spec.ts @@ -92,7 +92,6 @@ vi.mock("../../../api", () => ({ buildApiHandler: vi.fn().mockReturnValue({ getModel: vi.fn().mockReturnValue({ id: "claude-3-sonnet", - info: { supportsComputerUse: false }, }), }), })) diff --git a/src/core/webview/__tests__/generateSystemPrompt.browser-capability.spec.ts b/src/core/webview/__tests__/generateSystemPrompt.browser-capability.spec.ts new file mode 100644 index 00000000000..9b3f94f309b --- /dev/null +++ b/src/core/webview/__tests__/generateSystemPrompt.browser-capability.spec.ts @@ -0,0 +1,83 @@ +import { describe, test, expect, vi } from "vitest" + +// Module under test +import { generateSystemPrompt } from "../generateSystemPrompt" + +// Mock SYSTEM_PROMPT to capture its third argument (browser capability flag) +vi.mock("../../prompts/system", () => ({ + SYSTEM_PROMPT: vi.fn(async (_ctx, _cwd, canUseBrowserTool: boolean) => { + // return a simple string to satisfy return type + return `SYSTEM_PROMPT:${canUseBrowserTool}` + }), +})) + +// Mock API handler so we control model.info flags +vi.mock("../../../api", () => ({ + buildApiHandler: vi.fn((_config) => ({ + getModel: () => ({ + id: "mock-model", + info: { + supportsImages: true, + contextWindow: 200_000, + maxTokens: 8192, + supportsPromptCache: false, + }, + }), + })), +})) + +// Minimal mode utilities: provide a custom mode that includes the "browser" group +const mockCustomModes = [ + { + slug: "test-mode", + name: "Test Mode", + roleDefinition: "Test role", + description: "", + groups: ["browser"], // critical: include browser group + }, +] + +// Minimal ClineProvider stub +function makeProviderStub() { + return { + cwd: "/tmp", + context: {} as any, + customModesManager: { + getCustomModes: async () => mockCustomModes, + }, + getCurrentTask: () => ({ + rooIgnoreController: { getInstructions: () => undefined }, + }), + getMcpHub: () => undefined, + // State must enable browser tool and provide apiConfiguration + getState: async () => ({ + apiConfiguration: { + apiProvider: "openrouter", // not used by the test beyond handler creation + }, + customModePrompts: undefined, + customInstructions: undefined, + browserViewportSize: "900x600", + diffEnabled: false, + mcpEnabled: false, + fuzzyMatchThreshold: 1.0, + experiments: {}, + enableMcpServerCreation: false, + browserToolEnabled: true, // critical: enabled in settings + language: "en", + maxReadFileLine: -1, + maxConcurrentFileReads: 5, + }), + } as any +} + +describe("generateSystemPrompt browser capability (supportsImages=true)", () => { + test("passes canUseBrowserTool=true when mode has browser group and setting enabled", async () => { + const provider = makeProviderStub() + const message = { mode: "test-mode" } as any + + const result = await generateSystemPrompt(provider, message) + + // SYSTEM_PROMPT mock encodes the boolean into the returned string + expect(result).toBe("SYSTEM_PROMPT:true") + }) +}) diff --git a/src/core/webview/__tests__/webviewMessageHandler.autoSwitch.spec.ts b/src/core/webview/__tests__/webviewMessageHandler.autoSwitch.spec.ts index ef3b997ea39..6231ee1230b 100644 --- a/src/core/webview/__tests__/webviewMessageHandler.autoSwitch.spec.ts +++ b/src/core/webview/__tests__/webviewMessageHandler.autoSwitch.spec.ts @@ -149,9 +149,9 @@ describe("webviewMessageHandler - Automatic Organization Switching", () => { expect(refreshOrganizationModes).toHaveBeenCalled() // Verify models were flushed and refetched (via upsertApiConfiguration handler) - expect(flushModels).toHaveBeenCalledWith("kilocode-openrouter") + expect(flushModels).toHaveBeenCalledWith("kilocode") expect(getModels).toHaveBeenCalledWith({ - provider: "kilocode-openrouter", + provider: "kilocode", kilocodeOrganizationId: "org-1", kilocodeToken: "test-token", }) @@ -217,11 +217,11 @@ describe("webviewMessageHandler - Automatic Organization Switching", () => { }) // Verify flushModels was called (via upsertApiConfiguration) - expect(flushModels).toHaveBeenCalledWith("kilocode-openrouter") + expect(flushModels).toHaveBeenCalledWith("kilocode") // Verify getModels was called with organization ID (via upsertApiConfiguration) expect(getModels).toHaveBeenCalledWith({ - provider: "kilocode-openrouter", + provider: "kilocode", kilocodeOrganizationId: "org-1", kilocodeToken: "test-token", }) @@ -229,7 +229,7 @@ describe("webviewMessageHandler - Automatic Organization Switching", () => { // Verify models were sent to webview (via upsertApiConfiguration) expect(mockPostMessageToWebview).toHaveBeenCalledWith({ type: "routerModels", - routerModels: { "kilocode-openrouter": { "model-1": {}, "model-2": {} } }, + routerModels: { kilocode: { "model-1": {}, "model-2": {} } }, }) }) }) diff --git a/src/core/webview/__tests__/webviewMessageHandler.routerModels.spec.ts b/src/core/webview/__tests__/webviewMessageHandler.routerModels.spec.ts new file mode 100644 index 00000000000..65b1e19faf6 --- /dev/null +++ b/src/core/webview/__tests__/webviewMessageHandler.routerModels.spec.ts @@ -0,0 +1,172 @@ +import { describe, it, expect, vi, beforeEach } from "vitest" +import { webviewMessageHandler } from "../webviewMessageHandler" +import type { ClineProvider } from "../ClineProvider" + +// Mock vscode (minimal) +vi.mock("vscode", () => ({ + window: { + showErrorMessage: vi.fn(), + showWarningMessage: vi.fn(), + showInformationMessage: vi.fn(), + // kilocode_change start + createTextEditorDecorationType: vi.fn(() => ({ + dispose: vi.fn(), + })), + // kilocode_change end + }, + workspace: { + workspaceFolders: undefined, + getConfiguration: vi.fn(() => ({ + get: vi.fn(), + update: vi.fn(), + })), + }, + env: { + clipboard: { writeText: vi.fn() }, + openExternal: vi.fn(), + }, + commands: { + executeCommand: vi.fn(), + }, + Uri: { + parse: vi.fn((s: string) => ({ toString: () => s })), + file: vi.fn((p: string) => ({ fsPath: p })), + }, + ConfigurationTarget: { + Global: 1, + Workspace: 2, + WorkspaceFolder: 3, + }, +})) + +// Mock modelCache getModels/flushModels used by the handler +const getModelsMock = vi.fn() +vi.mock("../../../api/providers/fetchers/modelCache", () => ({ + getModels: (...args: any[]) => getModelsMock(...args), + flushModels: vi.fn(), +})) + +describe("webviewMessageHandler - requestRouterModels provider filter", () => { + let mockProvider: ClineProvider & { + postMessageToWebview: ReturnType + getState: ReturnType + contextProxy: any + log: ReturnType + } + + beforeEach(() => { + vi.clearAllMocks() + + mockProvider = { + // Only methods used by this code path + postMessageToWebview: vi.fn(), + getState: vi.fn().mockResolvedValue({ apiConfiguration: {} }), + contextProxy: { + getValue: vi.fn(), + setValue: vi.fn(), + globalStorageUri: { fsPath: "/mock/storage" }, + }, + log: vi.fn(), + } as any + + // Default mock: return distinct model maps per provider so we can verify keys + getModelsMock.mockImplementation(async (options: any) => { + switch (options?.provider) { + case "roo": + return { "roo/sonnet": { contextWindow: 8192, supportsPromptCache: false } } + case "openrouter": + return { "openrouter/qwen2.5": { contextWindow: 32768, supportsPromptCache: false } } + case "requesty": + return { "requesty/model": { contextWindow: 8192, supportsPromptCache: false } } + case "deepinfra": + return { "deepinfra/model": { contextWindow: 8192, supportsPromptCache: false } } + case "glama": + return { "glama/model": { contextWindow: 8192, supportsPromptCache: false } } + case "unbound": + return { "unbound/model": { contextWindow: 8192, supportsPromptCache: false } } + case "vercel-ai-gateway": + return { "vercel/model": { contextWindow: 8192, supportsPromptCache: false } } + case "io-intelligence": + return { "io/model": { contextWindow: 8192, supportsPromptCache: false } } + case "litellm": + return { "litellm/model": { contextWindow: 8192, supportsPromptCache: false } } + default: + return {} + } + }) + }) + + it("fetches only requested provider when values.provider is present ('roo')", async () => { + await webviewMessageHandler( + mockProvider as any, + { + type: "requestRouterModels", + values: { provider: "roo" }, + } as any, + ) + + // Should post a single routerModels message + expect(mockProvider.postMessageToWebview).toHaveBeenCalledWith( + expect.objectContaining({ type: "routerModels", routerModels: expect.any(Object) }), + ) + + const call = (mockProvider.postMessageToWebview as any).mock.calls.find( + (c: any[]) => c[0]?.type === "routerModels", + ) + expect(call).toBeTruthy() + const payload = call[0] + const routerModels = payload.routerModels as Record> + + // Only "roo" key should be present + const keys = Object.keys(routerModels) + expect(keys).toEqual(["roo"]) + expect(Object.keys(routerModels.roo || {})).toContain("roo/sonnet") + + // getModels should have been called exactly once for roo + const providersCalled = getModelsMock.mock.calls.map((c: any[]) => c[0]?.provider) + expect(providersCalled).toEqual(["roo"]) + }) + + it("defaults to aggregate fetching when no provider filter is sent", async () => { + await webviewMessageHandler( + mockProvider as any, + { + type: "requestRouterModels", + } as any, + ) + + const call = (mockProvider.postMessageToWebview as any).mock.calls.find( + (c: any[]) => c[0]?.type === "routerModels", + ) + expect(call).toBeTruthy() + const routerModels = call[0].routerModels as Record> + + // Aggregate handler initializes many known routers - ensure a few expected keys exist + expect(routerModels).toHaveProperty("openrouter") + expect(routerModels).toHaveProperty("roo") + expect(routerModels).toHaveProperty("requesty") + }) + + it("supports filtering another single provider ('openrouter')", async () => { + await webviewMessageHandler( + mockProvider as any, + { + type: "requestRouterModels", + values: { provider: "openrouter" }, + } as any, + ) + + const call = (mockProvider.postMessageToWebview as any).mock.calls.find( + (c: any[]) => c[0]?.type === "routerModels", + ) + expect(call).toBeTruthy() + const routerModels = call[0].routerModels as Record> + const keys = Object.keys(routerModels) + + expect(keys).toEqual(["openrouter"]) + expect(Object.keys(routerModels.openrouter || {})).toContain("openrouter/qwen2.5") + + const providersCalled = getModelsMock.mock.calls.map((c: any[]) => c[0]?.provider) + expect(providersCalled).toEqual(["openrouter"]) + }) +}) diff --git a/src/core/webview/__tests__/webviewMessageHandler.spec.ts b/src/core/webview/__tests__/webviewMessageHandler.spec.ts index eedeaaf687b..7fc24867d1f 100644 --- a/src/core/webview/__tests__/webviewMessageHandler.spec.ts +++ b/src/core/webview/__tests__/webviewMessageHandler.spec.ts @@ -227,7 +227,6 @@ describe("webviewMessageHandler - requestRouterModels", () => { }) // Verify getModels was called for each provider - expect(mockGetModels).toHaveBeenCalledWith({ provider: "deepinfra" }) expect(mockGetModels).toHaveBeenCalledWith({ provider: "openrouter", apiKey: "openrouter-key" }) // kilocode_change: apiKey expect(mockGetModels).toHaveBeenCalledWith({ provider: "requesty", apiKey: "requesty-key" }) expect(mockGetModels).toHaveBeenCalledWith({ provider: "glama" }) @@ -246,6 +245,13 @@ describe("webviewMessageHandler - requestRouterModels", () => { }) // kilocode_change end expect(mockGetModels).toHaveBeenCalledWith({ provider: "vercel-ai-gateway" }) + expect(mockGetModels).toHaveBeenCalledWith({ provider: "deepinfra" }) + expect(mockGetModels).toHaveBeenCalledWith( + expect.objectContaining({ + provider: "roo", + baseUrl: expect.any(String), + }), + ) expect(mockGetModels).toHaveBeenCalledWith({ provider: "litellm", apiKey: "litellm-key", @@ -264,9 +270,10 @@ describe("webviewMessageHandler - requestRouterModels", () => { requesty: mockModels, glama: mockModels, unbound: mockModels, - chutes: mockModels, // kilocode_change litellm: mockModels, - "kilocode-openrouter": mockModels, + kilocode: mockModels, + roo: mockModels, + chutes: mockModels, ollama: mockModels, // kilocode_change lmstudio: {}, "vercel-ai-gateway": mockModels, @@ -275,6 +282,7 @@ describe("webviewMessageHandler - requestRouterModels", () => { ovhcloud: mockModels, // kilocode_change inception: mockModels, // kilocode_change }, + values: undefined, }) }) @@ -365,9 +373,10 @@ describe("webviewMessageHandler - requestRouterModels", () => { requesty: mockModels, glama: mockModels, unbound: mockModels, - chutes: mockModels, // kilocode_change + roo: mockModels, + chutes: mockModels, litellm: {}, - "kilocode-openrouter": mockModels, + kilocode: mockModels, ollama: mockModels, // kilocode_change lmstudio: {}, "vercel-ai-gateway": mockModels, @@ -376,6 +385,7 @@ describe("webviewMessageHandler - requestRouterModels", () => { ovhcloud: mockModels, // kilocode_change inception: mockModels, // kilocode_change }, + values: undefined, }) }) @@ -396,43 +406,21 @@ describe("webviewMessageHandler - requestRouterModels", () => { .mockRejectedValueOnce(new Error("Requesty API error")) // requesty .mockResolvedValueOnce(mockModels) // glama .mockRejectedValueOnce(new Error("Unbound API error")) // unbound - .mockRejectedValueOnce(new Error("Chutes API error")) // chutes // kilocode_change .mockResolvedValueOnce(mockModels) // kilocode-openrouter .mockRejectedValueOnce(new Error("Ollama API error")) // kilocode_change .mockResolvedValueOnce(mockModels) // vercel-ai-gateway .mockResolvedValueOnce(mockModels) // deepinfra .mockResolvedValueOnce(mockModels) // kilocode_change ovhcloud .mockRejectedValueOnce(new Error("Inception API error")) // kilocode_change + .mockResolvedValueOnce(mockModels) // roo + .mockRejectedValueOnce(new Error("Chutes API error")) // chutes .mockRejectedValueOnce(new Error("LiteLLM connection failed")) // litellm await webviewMessageHandler(mockClineProvider, { type: "requestRouterModels", }) - // Verify successful providers are included - expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ - type: "routerModels", - routerModels: { - deepinfra: mockModels, - openrouter: mockModels, - gemini: mockModels, // kilocode_change - requesty: {}, - glama: mockModels, - unbound: {}, - chutes: {}, // kilocode_change - litellm: {}, - "kilocode-openrouter": mockModels, - ollama: {}, - ovhcloud: mockModels, // kilocode_change - inception: {}, // kilocode_change - lmstudio: {}, - "vercel-ai-gateway": mockModels, - huggingface: {}, - "io-intelligence": {}, - }, - }) - - // Verify error messages were sent for failed providers + // Verify error messages were sent for failed providers (these come first) expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ type: "singleRouterModelFetchResponse", success: false, @@ -451,16 +439,17 @@ describe("webviewMessageHandler - requestRouterModels", () => { expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ type: "singleRouterModelFetchResponse", success: false, - error: "Chutes API error", - values: { provider: "chutes" }, + error: "Inception API error", + values: { provider: "inception" }, }) + // kilocode_change end + expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ type: "singleRouterModelFetchResponse", success: false, - error: "Inception API error", - values: { provider: "inception" }, + error: "Chutes API error", + values: { provider: "chutes" }, }) - // kilocode_change end expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ type: "singleRouterModelFetchResponse", @@ -468,6 +457,33 @@ describe("webviewMessageHandler - requestRouterModels", () => { error: "LiteLLM connection failed", values: { provider: "litellm" }, }) + + // Verify final routerModels response includes successful providers and empty objects for failed ones + expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ + type: "routerModels", + routerModels: { + deepinfra: mockModels, + openrouter: mockModels, + requesty: {}, + glama: mockModels, + unbound: {}, + roo: mockModels, + chutes: {}, + litellm: {}, + ollama: {}, + lmstudio: {}, + "vercel-ai-gateway": mockModels, + huggingface: {}, + "io-intelligence": {}, + // kilocode_change start + kilocode: mockModels, + inception: {}, + gemini: mockModels, + ovhcloud: mockModels, + // kilocode_change end + }, + values: undefined, + }) }) it("handles Error objects and string errors correctly", async () => { @@ -478,13 +494,14 @@ describe("webviewMessageHandler - requestRouterModels", () => { .mockRejectedValueOnce(new Error("Requesty API error")) // requesty .mockRejectedValueOnce(new Error("Glama API error")) // glama .mockRejectedValueOnce(new Error("Unbound API error")) // unbound - .mockRejectedValueOnce(new Error("Chutes API error")) // chutes // kilocode_change .mockResolvedValueOnce({}) // kilocode-openrouter - Success .mockRejectedValueOnce(new Error("Ollama API error")) // ollama .mockRejectedValueOnce(new Error("Vercel AI Gateway error")) // vercel-ai-gateway .mockRejectedValueOnce(new Error("DeepInfra API error")) // deepinfra .mockRejectedValueOnce(new Error("OVHcloud AI Endpoints error")) // ovhcloud // kilocode_change .mockRejectedValueOnce(new Error("Inception API error")) // kilocode_change inception + .mockRejectedValueOnce(new Error("Roo API error")) // roo + .mockRejectedValueOnce(new Error("Chutes API error")) // chutes .mockRejectedValueOnce(new Error("LiteLLM connection failed")) // litellm await webviewMessageHandler(mockClineProvider, { @@ -530,18 +547,33 @@ describe("webviewMessageHandler - requestRouterModels", () => { }) // kilocode_change start + expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ + type: "singleRouterModelFetchResponse", + success: false, + error: "Ollama API error", + values: { provider: "ollama" }, + }) + + expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ + type: "singleRouterModelFetchResponse", + success: false, + error: "Vercel AI Gateway error", + values: { provider: "vercel-ai-gateway" }, + }) + expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ type: "singleRouterModelFetchResponse", success: false, error: "Chutes API error", values: { provider: "chutes" }, }) + // kilocode_change end expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ type: "singleRouterModelFetchResponse", success: false, - error: "Ollama API error", - values: { provider: "ollama" }, + error: "DeepInfra API error", + values: { provider: "deepinfra" }, }) expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ @@ -550,13 +582,19 @@ describe("webviewMessageHandler - requestRouterModels", () => { error: "Vercel AI Gateway error", values: { provider: "vercel-ai-gateway" }, }) - // kilocode_change end expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ type: "singleRouterModelFetchResponse", success: false, - error: "DeepInfra API error", - values: { provider: "deepinfra" }, + error: "Roo API error", + values: { provider: "roo" }, + }) + + expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ + type: "singleRouterModelFetchResponse", + success: false, + error: "Chutes API error", + values: { provider: "chutes" }, }) expect(mockClineProvider.postMessageToWebview).toHaveBeenCalledWith({ diff --git a/src/core/webview/generateSystemPrompt.ts b/src/core/webview/generateSystemPrompt.ts index 2f2f9364122..1adbd4006df 100644 --- a/src/core/webview/generateSystemPrompt.ts +++ b/src/core/webview/generateSystemPrompt.ts @@ -47,25 +47,27 @@ export const generateSystemPrompt = async (provider: ClineProvider, message: Web const rooIgnoreInstructions = provider.getCurrentTask()?.rooIgnoreController?.getInstructions() // Determine if browser tools can be used based on model support, mode, and user settings - let modelSupportsComputerUse = false + let modelInfo: any = undefined - // Create a temporary API handler to check if the model supports computer use + // Create a temporary API handler to check if the model supports browser capability // This avoids relying on an active Cline instance which might not exist during preview try { const tempApiHandler = buildApiHandler(apiConfiguration) - // kilocode_change: supports images => supports browser - modelSupportsComputerUse = tempApiHandler.getModel().info.supportsImages ?? false + modelInfo = tempApiHandler.getModel().info } catch (error) { - console.error("Error checking if model supports computer use:", error) + console.error("Error checking if model supports browser capability:", error) } // Check if the current mode includes the browser tool group const modeConfig = getModeBySlug(mode, customModes) const modeSupportsBrowser = modeConfig?.groups.some((group) => getGroupName(group) === "browser") ?? false + // Check if model supports browser capability (images) + const modelSupportsBrowser = modelInfo && (modelInfo as any)?.supportsImages === true + // Only enable browser tools if the model supports it, the mode includes browser tools, // and browser tools are enabled in settings - const canUseBrowserTool = modelSupportsComputerUse && modeSupportsBrowser && (browserToolEnabled ?? true) + const canUseBrowserTool = modelSupportsBrowser && modeSupportsBrowser && (browserToolEnabled ?? true) const systemPrompt = await SYSTEM_PROMPT( provider.context, diff --git a/src/core/webview/webviewMessageHandler.ts b/src/core/webview/webviewMessageHandler.ts index f4efa3f3992..49e9ebb414a 100644 --- a/src/core/webview/webviewMessageHandler.ts +++ b/src/core/webview/webviewMessageHandler.ts @@ -29,6 +29,7 @@ import { fastApplyModelSchema, // kilocode_change end UserSettingsConfig, + DEFAULT_CHECKPOINT_TIMEOUT_SECONDS, } from "@roo-code/types" import { CloudService } from "@roo-code/cloud" import { TelemetryService } from "@roo-code/telemetry" @@ -799,24 +800,33 @@ export const webviewMessageHandler = async ( case "requestRouterModels": const { apiConfiguration } = await provider.getState() - const routerModels: Record = { - openrouter: {}, - gemini: {}, // kilocode_change - "vercel-ai-gateway": {}, - huggingface: {}, - litellm: {}, - "kilocode-openrouter": {}, // kilocode_change - deepinfra: {}, - "io-intelligence": {}, - requesty: {}, - unbound: {}, - glama: {}, - chutes: {}, // kilocode_change - ollama: {}, - lmstudio: {}, - ovhcloud: {}, // kilocode_change - inception: {}, // kilocode_change - } + // Optional single provider filter from webview + const requestedProvider = message?.values?.provider + const providerFilter = requestedProvider ? toRouterName(requestedProvider) : undefined + + const routerModels: Record = providerFilter + ? ({} as Record) + : { + // kilocode_change start + ovhcloud: {}, + inception: {}, + kilocode: {}, + gemini: {}, + // kilocode_change end + openrouter: {}, + "vercel-ai-gateway": {}, + huggingface: {}, + litellm: {}, + deepinfra: {}, + "io-intelligence": {}, + requesty: {}, + unbound: {}, + glama: {}, + ollama: {}, + lmstudio: {}, + roo: {}, + chutes: {}, + } const safeGetModels = async (options: GetModelsOptions): Promise => { try { @@ -835,7 +845,8 @@ export const webviewMessageHandler = async ( const openRouterApiKey = apiConfiguration.openRouterApiKey || message?.values?.openRouterApiKey const openRouterBaseUrl = apiConfiguration.openRouterBaseUrl || message?.values?.openRouterBaseUrl - const modelFetchPromises: Array<{ key: RouterName; options: GetModelsOptions }> = [ + // Base candidates (only those handled by this aggregate fetcher) + const candidates: { key: RouterName; options: GetModelsOptions }[] = [ { key: "openrouter", options: { provider: "openrouter", apiKey: openRouterApiKey, baseUrl: openRouterBaseUrl }, @@ -858,11 +869,10 @@ export const webviewMessageHandler = async ( }, { key: "glama", options: { provider: "glama" } }, { key: "unbound", options: { provider: "unbound", apiKey: apiConfiguration.unboundApiKey } }, - { key: "chutes", options: { provider: "chutes", apiKey: apiConfiguration.chutesApiKey } }, // kilocode_change { - key: "kilocode-openrouter", + key: "kilocode", options: { - provider: "kilocode-openrouter", + provider: "kilocode", kilocodeToken: apiConfiguration.kilocodeToken, kilocodeOrganizationId: apiConfiguration.kilocodeOrganizationId, }, @@ -893,32 +903,47 @@ export const webviewMessageHandler = async ( baseUrl: apiConfiguration.inceptionLabsBaseUrl, }, }, + { + key: "roo", + options: { + provider: "roo", + baseUrl: process.env.ROO_CODE_PROVIDER_URL ?? "https://api.roocode.com/proxy", + apiKey: CloudService.hasInstance() + ? CloudService.instance.authService?.getSessionToken() + : undefined, + }, + }, + { + key: "chutes", + options: { provider: "chutes", apiKey: apiConfiguration.chutesApiKey }, + }, ] // kilocode_change end - // Add IO Intelligence if API key is provided. - const ioIntelligenceApiKey = apiConfiguration.ioIntelligenceApiKey - - if (ioIntelligenceApiKey) { - modelFetchPromises.push({ + // IO Intelligence is conditional on api key + if (apiConfiguration.ioIntelligenceApiKey) { + candidates.push({ key: "io-intelligence", - options: { provider: "io-intelligence", apiKey: ioIntelligenceApiKey }, + options: { provider: "io-intelligence", apiKey: apiConfiguration.ioIntelligenceApiKey }, }) } - // Don't fetch Ollama and LM Studio models by default anymore. - // They have their own specific handlers: requestOllamaModels and requestLmStudioModels. - + // LiteLLM is conditional on baseUrl+apiKey const litellmApiKey = apiConfiguration.litellmApiKey || message?.values?.litellmApiKey const litellmBaseUrl = apiConfiguration.litellmBaseUrl || message?.values?.litellmBaseUrl if (litellmApiKey && litellmBaseUrl) { - modelFetchPromises.push({ + candidates.push({ key: "litellm", options: { provider: "litellm", apiKey: litellmApiKey, baseUrl: litellmBaseUrl }, }) } + // Apply single provider filter if specified + const modelFetchPromises = providerFilter + ? candidates.filter(({ key }) => key === providerFilter) + : candidates + const results = await Promise.allSettled( modelFetchPromises.map(async ({ key, options }) => { const models = await safeGetModels(options) @@ -932,18 +957,7 @@ export const webviewMessageHandler = async ( if (result.status === "fulfilled") { routerModels[routerName] = result.value.models - // Ollama and LM Studio settings pages still need these events. - if (routerName === "ollama" && Object.keys(result.value.models).length > 0) { - provider.postMessageToWebview({ - type: "ollamaModels", - ollamaModels: result.value.models, - }) - } else if (routerName === "lmstudio" && Object.keys(result.value.models).length > 0) { - provider.postMessageToWebview({ - type: "lmStudioModels", - lmStudioModels: result.value.models, - }) - } + // Ollama and LM Studio settings pages still need these events. They are not fetched here. } else { // Handle rejection: Post a specific error message for this provider. const errorMessage = result.reason instanceof Error ? result.reason.message : String(result.reason) @@ -960,7 +974,11 @@ export const webviewMessageHandler = async ( } }) - provider.postMessageToWebview({ type: "routerModels", routerModels }) + provider.postMessageToWebview({ + type: "routerModels", + routerModels, + values: providerFilter ? { provider: requestedProvider } : undefined, + }) break case "requestOllamaModels": { // Specific handler for Ollama models only. @@ -1009,6 +1027,38 @@ export const webviewMessageHandler = async ( } break } + case "requestRooModels": { + // Specific handler for Roo models only - flushes cache to ensure fresh auth token is used + try { + // Flush cache first to ensure fresh models with current auth state + await flushModels("roo") + + const rooModels = await getModels({ + provider: "roo", + baseUrl: process.env.ROO_CODE_PROVIDER_URL ?? "https://api.roocode.com/proxy", + apiKey: CloudService.hasInstance() + ? CloudService.instance.authService?.getSessionToken() + : undefined, + }) + + // Always send a response, even if no models are returned + provider.postMessageToWebview({ + type: "singleRouterModelFetchResponse", + success: true, + values: { provider: "roo", models: rooModels }, + }) + } catch (error) { + // Send error response + const errorMessage = error instanceof Error ? error.message : String(error) + provider.postMessageToWebview({ + type: "singleRouterModelFetchResponse", + success: false, + error: errorMessage, + values: { provider: "roo" }, + }) + } + break + } case "requestOpenAiModels": if (message?.values?.baseUrl && message?.values?.apiKey) { const openAiModels = await getOpenAiModels( @@ -1407,6 +1457,11 @@ export const webviewMessageHandler = async ( await updateGlobalState("enableCheckpoints", enableCheckpoints) await provider.postStateToWebview() break + case "checkpointTimeout": + const checkpointTimeout = message.value ?? DEFAULT_CHECKPOINT_TIMEOUT_SECONDS + await updateGlobalState("checkpointTimeout", checkpointTimeout) + await provider.postStateToWebview() + break case "browserViewportSize": const browserViewportSize = message.text ?? "900x600" await updateGlobalState("browserViewportSize", browserViewportSize) @@ -1798,6 +1853,14 @@ export const webviewMessageHandler = async ( await updateGlobalState("includeDiagnosticMessages", includeValue) await provider.postStateToWebview() break + case "includeCurrentTime": + await updateGlobalState("includeCurrentTime", message.bool ?? true) + await provider.postStateToWebview() + break + case "includeCurrentCost": + await updateGlobalState("includeCurrentCost", message.bool ?? true) + await provider.postStateToWebview() + break case "maxDiagnosticMessages": await updateGlobalState("maxDiagnosticMessages", message.value ?? 50) await provider.postStateToWebview() @@ -2090,15 +2153,15 @@ export const webviewMessageHandler = async ( await refreshOrganizationModes(message, provider, updateGlobalState) // Flush and refetch models - await flushModels("kilocode-openrouter") + await flushModels("kilocode") const models = await getModels({ - provider: "kilocode-openrouter", + provider: "kilocode", kilocodeOrganizationId: message.apiConfiguration.kilocodeOrganizationId, kilocodeToken: message.apiConfiguration.kilocodeToken, }) provider.postMessageToWebview({ type: "routerModels", - routerModels: { "kilocode-openrouter": models } as Record, + routerModels: { kilocode: models } as Record, }) } } catch (error) { @@ -3038,6 +3101,12 @@ export const webviewMessageHandler = async ( settings.codebaseIndexVercelAiGatewayApiKey, ) } + if (settings.codebaseIndexOpenRouterApiKey !== undefined) { + await provider.contextProxy.storeSecret( + "codebaseIndexOpenRouterApiKey", + settings.codebaseIndexOpenRouterApiKey, + ) + } // Send success response first - settings are saved regardless of validation await provider.postMessageToWebview({ @@ -3175,6 +3244,7 @@ export const webviewMessageHandler = async ( const hasVercelAiGatewayApiKey = !!(await provider.context.secrets.get( "codebaseIndexVercelAiGatewayApiKey", )) + const hasOpenRouterApiKey = !!(await provider.context.secrets.get("codebaseIndexOpenRouterApiKey")) provider.postMessageToWebview({ type: "codeIndexSecretStatus", @@ -3185,6 +3255,7 @@ export const webviewMessageHandler = async ( hasGeminiApiKey, hasMistralApiKey, hasVercelAiGatewayApiKey, + hasOpenRouterApiKey, }, }) break @@ -3208,18 +3279,26 @@ export const webviewMessageHandler = async ( return } if (manager.isFeatureEnabled && manager.isFeatureConfigured) { - if (!manager.isInitialized) { - await manager.initialize(provider.contextProxy) - } - - // startIndexing now handles error recovery internally - manager.startIndexing() - - // If startIndexing recovered from error, we need to reinitialize - if (!manager.isInitialized) { - await manager.initialize(provider.contextProxy) - // Try starting again after initialization + // Mimic extension startup behavior: initialize first, which will + // check if Qdrant container is active and reuse existing collection + await manager.initialize(provider.contextProxy) + + // Only call startIndexing if we're in a state that requires it + // (e.g., Standby or Error). If already Indexed or Indexing, the + // initialize() call above will have already started the watcher. + const currentState = manager.state + if (currentState === "Standby" || currentState === "Error") { + // startIndexing now handles error recovery internally manager.startIndexing() + + // If startIndexing recovered from error, we need to reinitialize + if (!manager.isInitialized) { + await manager.initialize(provider.contextProxy) + // Try starting again after initialization + if (manager.state === "Standby" || manager.state === "Error") { + manager.startIndexing() + } + } } } } catch (error) { diff --git a/src/extension.ts b/src/extension.ts index 7b7fbbf4a8e..d6d5afb12fd 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -45,6 +45,7 @@ import { initializeI18n } from "./i18n" import { registerGhostProvider } from "./services/ghost" // kilocode_change import { registerMainThreadForwardingLogger } from "./utils/fowardingLogger" // kilocode_change import { getKiloCodeWrapperProperties } from "./core/kilocode/wrapper" // kilocode_change +import { flushModels, getModels } from "./api/providers/fetchers/modelCache" /** * Built using https://github.com/microsoft/vscode-webview-ui-toolkit @@ -140,13 +141,13 @@ export async function activate(context: vscode.ExtensionContext) { if (manager) { codeIndexManagers.push(manager) - try { - await manager.initialize(contextProxy) - } catch (error) { + // Initialize in background; do not block extension activation + void manager.initialize(contextProxy).catch((error) => { + const message = error instanceof Error ? error.message : String(error) outputChannel.appendLine( - `[CodeIndexManager] Error during background CodeIndexManager configuration/indexing for ${folder.uri.fsPath}: ${error.message || error}`, + `[CodeIndexManager] Error during background CodeIndexManager configuration/indexing for ${folder.uri.fsPath}: ${message}`, ) - } + }) context.subscriptions.push(manager) } @@ -171,6 +172,34 @@ export async function activate(context: vscode.ExtensionContext) { ) } } + + // Handle Roo models cache based on auth state + const handleRooModelsCache = async () => { + try { + await flushModels("roo") + + if (data.state === "active-session") { + // Reload models with the new auth token + const sessionToken = cloudService?.authService?.getSessionToken() + await getModels({ + provider: "roo", + baseUrl: process.env.ROO_CODE_PROVIDER_URL ?? "https://api.roocode.com/proxy", + apiKey: sessionToken, + }) + cloudLogger(`[authStateChangedHandler] Reloaded Roo models cache for active session`) + } else { + cloudLogger(`[authStateChangedHandler] Flushed Roo models cache on logout`) + } + } catch (error) { + cloudLogger( + `[authStateChangedHandler] Failed to handle Roo models cache: ${error instanceof Error ? error.message : String(error)}`, + ) + } + } + + if (data.state === "active-session" || data.state === "logged-out") { + // kilocode_change: await handleRooModelsCache() + } } settingsUpdatedHandler = async () => { diff --git a/src/i18n/locales/ar/common.json b/src/i18n/locales/ar/common.json index f5eef0b0f69..e90f7990749 100644 --- a/src/i18n/locales/ar/common.json +++ b/src/i18n/locales/ar/common.json @@ -42,6 +42,12 @@ "checkpoint_timeout": "انتهى الوقت أثناء محاولة استرجاع الحالة.", "checkpoint_failed": "فشل في استرجاع الحالة.", "git_not_installed": "Git مطلوب لميزة نقاط الحفظ. رجاءً ثبت Git لتفعيل نقاط الحفظ.", + "checkpoint_no_first": "لا توجد نقطة حفظ أولى للمقارنة.", + "checkpoint_no_previous": "لا توجد نقطة حفظ سابقة للمقارنة.", + "checkpoint_no_changes": "لم يتم العثور على تغييرات.", + "checkpoint_diff_with_next": "التغييرات مقارنة بنقطة الحفظ التالية", + "checkpoint_diff_since_first": "التغييرات منذ نقطة الحفظ الأولى", + "checkpoint_diff_to_current": "التغييرات إلى مساحة العمل الحالية", "nested_git_repos_warning": "تحذير: تم العثور على مستودعات git متداخلة في هذه المسارات. قد يسبب هذا سلوك غير متوقع مع نقاط الحفظ: {{paths}}", "no_workspace": "افتح مجلد مشروع أولاً", "update_support_prompt": "فشل تحديث الرسالة المساعدة", diff --git a/src/i18n/locales/ar/embeddings.json b/src/i18n/locales/ar/embeddings.json index 81fa87c45b6..2ac2453121e 100644 --- a/src/i18n/locales/ar/embeddings.json +++ b/src/i18n/locales/ar/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "إعدادات OpenAI المتوافق مفقودة لإنشاء التضمين", "geminiConfigMissing": "إعدادات Gemini مفقودة لإنشاء التضمين", "mistralConfigMissing": "إعدادات Mistral مفقودة لإنشاء التضمين", + "openRouterConfigMissing": "إعدادات OpenRouter مفقودة لإنشاء التضمين", "vercelAiGatewayConfigMissing": "إعدادات Vercel AI Gateway مفقودة لإنشاء التضمين", "invalidEmbedderType": "نوع التضمين المُعدّ غير صالح: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "لا يمكن تحديد أبعاد المتجه للنموذج '{{modelId}}' مع المزوّد '{{provider}}'. يرجى التأكد من ضبط 'أبعاد التضمين' بشكل صحيح في إعدادات مزوّد OpenAI المتوافق.", diff --git a/src/i18n/locales/ca/common.json b/src/i18n/locales/ca/common.json index ad25e3c7a60..0c26177e3e4 100644 --- a/src/i18n/locales/ca/common.json +++ b/src/i18n/locales/ca/common.json @@ -41,6 +41,12 @@ "checkpoint_timeout": "S'ha esgotat el temps en intentar restaurar el punt de control.", "checkpoint_failed": "Ha fallat la restauració del punt de control.", "git_not_installed": "Git és necessari per a la funció de punts de control. Si us plau, instal·la Git per activar els punts de control.", + "checkpoint_no_first": "No hi ha un primer punt de control per comparar.", + "checkpoint_no_previous": "No hi ha un punt de control anterior per comparar.", + "checkpoint_no_changes": "No s'han trobat canvis.", + "checkpoint_diff_with_next": "Canvis comparats amb el següent punt de control", + "checkpoint_diff_since_first": "Canvis des del primer punt de control", + "checkpoint_diff_to_current": "Canvis a l'espai de treball actual", "nested_git_repos_warning": "Els punts de control estan deshabilitats perquè s'ha detectat un repositori git niat a: {{path}}. Per utilitzar punts de control, si us plau elimina o reubica aquest repositori git niat.", "no_workspace": "Si us plau, obre primer una carpeta de projecte", "update_support_prompt": "Ha fallat l'actualització del missatge de suport", diff --git a/src/i18n/locales/ca/embeddings.json b/src/i18n/locales/ca/embeddings.json index 548dec0717d..96cef1d6b77 100644 --- a/src/i18n/locales/ca/embeddings.json +++ b/src/i18n/locales/ca/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Falta la configuració compatible amb OpenAI per crear l'embedder", "geminiConfigMissing": "Falta la configuració de Gemini per crear l'embedder", "mistralConfigMissing": "Falta la configuració de Mistral per crear l'embedder", + "openRouterConfigMissing": "Falta la configuració d'OpenRouter per crear l'embedder", "vercelAiGatewayConfigMissing": "Falta la configuració de Vercel AI Gateway per crear l'embedder", "invalidEmbedderType": "Tipus d'embedder configurat no vàlid: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "No s'ha pogut determinar la dimensió del vector per al model '{{modelId}}' amb el proveïdor '{{provider}}'. Assegura't que la 'Dimensió d'incrustació' estigui configurada correctament als paràmetres del proveïdor compatible amb OpenAI.", diff --git a/src/i18n/locales/cs/common.json b/src/i18n/locales/cs/common.json index 3781a0cf4a7..93b0f214180 100644 --- a/src/i18n/locales/cs/common.json +++ b/src/i18n/locales/cs/common.json @@ -42,6 +42,12 @@ "checkpoint_timeout": "Vypršel časový limit při pokusu o obnovení checkpointu.", "checkpoint_failed": "Obnovení checkpointu selhalo.", "git_not_installed": "Git je vyžadován pro funkci checkpointů. Prosím nainstaluj Git pro aktivaci checkpointů.", + "checkpoint_no_first": "Žádný první checkpoint k porovnání.", + "checkpoint_no_previous": "Žádný předchozí checkpoint k porovnání.", + "checkpoint_no_changes": "Nebyly nalezeny žádné změny.", + "checkpoint_diff_with_next": "Změny porovnané s dalším checkpointem", + "checkpoint_diff_since_first": "Změny od prvního checkpointu", + "checkpoint_diff_to_current": "Změny k aktuálnímu workspace", "nested_git_repos_warning": "Varování: Nalezeny vnořené git repozitáře v těchto cestách. To může způsobit neočekávané chování s kontrolními body: {{paths}}", "no_workspace": "Nejprve prosím otevři složku projektu", "update_support_prompt": "Aktualizace support promptu selhala", diff --git a/src/i18n/locales/cs/embeddings.json b/src/i18n/locales/cs/embeddings.json index 7ad2fc5757d..a0bc4620fba 100644 --- a/src/i18n/locales/cs/embeddings.json +++ b/src/i18n/locales/cs/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Chybí konfigurace OpenAI Compatible pro vytvoření embedderu", "geminiConfigMissing": "Chybí konfigurace Gemini pro vytvoření embedderu", "mistralConfigMissing": "Chybí konfigurace Mistral pro vytvoření embedderu", + "openRouterConfigMissing": "Chybí konfigurace OpenRouter pro vytvoření embedderu", "vercelAiGatewayConfigMissing": "Chybí konfigurace Vercel AI Gateway pro vytvoření embedderu", "invalidEmbedderType": "Nakonfigurován neplatný typ embedderu: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Nelze určit rozměr vektoru pro model '{{modelId}}' s poskytovatelem '{{provider}}'. Ujisti se prosím, že 'Rozměr Embeddingu' je správně nastaven v nastavení OpenAI-Compatible poskytovatele.", diff --git a/src/i18n/locales/de/common.json b/src/i18n/locales/de/common.json index ae0d6051637..4949710ce0d 100644 --- a/src/i18n/locales/de/common.json +++ b/src/i18n/locales/de/common.json @@ -37,6 +37,12 @@ "checkpoint_timeout": "Zeitüberschreitung beim Versuch, den Checkpoint wiederherzustellen.", "checkpoint_failed": "Fehler beim Wiederherstellen des Checkpoints.", "git_not_installed": "Git ist für die Checkpoint-Funktion erforderlich. Bitte installiere Git, um Checkpoints zu aktivieren.", + "checkpoint_no_first": "Kein erster Checkpoint zum Vergleich vorhanden.", + "checkpoint_no_previous": "Kein vorheriger Checkpoint zum Vergleich vorhanden.", + "checkpoint_no_changes": "Keine Änderungen gefunden.", + "checkpoint_diff_with_next": "Änderungen im Vergleich zum nächsten Checkpoint", + "checkpoint_diff_since_first": "Änderungen seit dem ersten Checkpoint", + "checkpoint_diff_to_current": "Änderungen am aktuellen Arbeitsbereich", "nested_git_repos_warning": "Checkpoints sind deaktiviert, da ein verschachteltes Git-Repository erkannt wurde unter: {{path}}. Um Checkpoints zu verwenden, entferne oder verschiebe bitte dieses verschachtelte Git-Repository.", "no_workspace": "Bitte öffne zuerst einen Projektordner", "update_support_prompt": "Fehler beim Aktualisieren der Support-Nachricht", diff --git a/src/i18n/locales/de/embeddings.json b/src/i18n/locales/de/embeddings.json index 0bb1ff73427..cfeb17d741a 100644 --- a/src/i18n/locales/de/embeddings.json +++ b/src/i18n/locales/de/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "OpenAI-kompatible Konfiguration fehlt für die Erstellung des Embedders", "geminiConfigMissing": "Gemini-Konfiguration fehlt für die Erstellung des Embedders", "mistralConfigMissing": "Mistral-Konfiguration fehlt für die Erstellung des Embedders", + "openRouterConfigMissing": "OpenRouter-Konfiguration fehlt für die Erstellung des Embedders", "vercelAiGatewayConfigMissing": "Vercel AI Gateway-Konfiguration fehlt für die Erstellung des Embedders", "invalidEmbedderType": "Ungültiger Embedder-Typ konfiguriert: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Konnte die Vektordimension für Modell '{{modelId}}' mit Anbieter '{{provider}}' nicht bestimmen. Stelle sicher, dass die 'Embedding-Dimension' in den OpenAI-kompatiblen Anbietereinstellungen korrekt eingestellt ist.", diff --git a/src/i18n/locales/en/common.json b/src/i18n/locales/en/common.json index dd4bbcde6b4..8e7b42b1556 100644 --- a/src/i18n/locales/en/common.json +++ b/src/i18n/locales/en/common.json @@ -37,6 +37,12 @@ "checkpoint_timeout": "Timed out when attempting to restore checkpoint.", "checkpoint_failed": "Failed to restore checkpoint.", "git_not_installed": "Git is required for the checkpoints feature. Please install Git to enable checkpoints.", + "checkpoint_no_first": "No first checkpoint to compare.", + "checkpoint_no_previous": "No previous checkpoint to compare.", + "checkpoint_no_changes": "No changes found.", + "checkpoint_diff_with_next": "Changes compared with next checkpoint", + "checkpoint_diff_since_first": "Changes since first checkpoint", + "checkpoint_diff_to_current": "Changes to current workspace", "nested_git_repos_warning": "Checkpoints are disabled because a nested git repository was detected at: {{path}}. To use checkpoints, please remove or relocate this nested git repository.", "no_workspace": "Please open a project folder first", "update_support_prompt": "Failed to update support prompt", diff --git a/src/i18n/locales/en/embeddings.json b/src/i18n/locales/en/embeddings.json index ef2088c05e9..c6bcc4e2651 100644 --- a/src/i18n/locales/en/embeddings.json +++ b/src/i18n/locales/en/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "OpenAI Compatible configuration missing for embedder creation", "geminiConfigMissing": "Gemini configuration missing for embedder creation", "mistralConfigMissing": "Mistral configuration missing for embedder creation", + "openRouterConfigMissing": "OpenRouter configuration missing for embedder creation", "vercelAiGatewayConfigMissing": "Vercel AI Gateway configuration missing for embedder creation", "invalidEmbedderType": "Invalid embedder type configured: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Could not determine vector dimension for model '{{modelId}}' with provider '{{provider}}'. Please ensure the 'Embedding Dimension' is correctly set in the OpenAI-Compatible provider settings.", diff --git a/src/i18n/locales/es/common.json b/src/i18n/locales/es/common.json index 686122a6846..beba1a32ec6 100644 --- a/src/i18n/locales/es/common.json +++ b/src/i18n/locales/es/common.json @@ -37,6 +37,12 @@ "checkpoint_timeout": "Se agotó el tiempo al intentar restaurar el punto de control.", "checkpoint_failed": "Error al restaurar el punto de control.", "git_not_installed": "Git es necesario para la función de puntos de control. Por favor, instala Git para activar los puntos de control.", + "checkpoint_no_first": "No hay primer punto de control para comparar.", + "checkpoint_no_previous": "No hay punto de control anterior para comparar.", + "checkpoint_no_changes": "No se encontraron cambios.", + "checkpoint_diff_with_next": "Cambios comparados con el siguiente punto de control", + "checkpoint_diff_since_first": "Cambios desde el primer punto de control", + "checkpoint_diff_to_current": "Cambios en el espacio de trabajo actual", "nested_git_repos_warning": "Los puntos de control están deshabilitados porque se detectó un repositorio git anidado en: {{path}}. Para usar puntos de control, por favor elimina o reubica este repositorio git anidado.", "no_workspace": "Por favor, abre primero una carpeta de proyecto", "update_support_prompt": "Error al actualizar el mensaje de soporte", diff --git a/src/i18n/locales/es/embeddings.json b/src/i18n/locales/es/embeddings.json index 3cd1802a507..b5c1d3208c1 100644 --- a/src/i18n/locales/es/embeddings.json +++ b/src/i18n/locales/es/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Falta la configuración compatible con OpenAI para crear el incrustador", "geminiConfigMissing": "Falta la configuración de Gemini para crear el incrustador", "mistralConfigMissing": "Falta la configuración de Mistral para la creación del incrustador", + "openRouterConfigMissing": "Falta la configuración de OpenRouter para la creación del incrustador", "vercelAiGatewayConfigMissing": "Falta la configuración de Vercel AI Gateway para la creación del incrustador", "invalidEmbedderType": "Tipo de incrustador configurado inválido: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "No se pudo determinar la dimensión del vector para el modelo '{{modelId}}' con el proveedor '{{provider}}'. Asegúrate de que la 'Dimensión de incrustación' esté configurada correctamente en los ajustes del proveedor compatible con OpenAI.", diff --git a/src/i18n/locales/fr/common.json b/src/i18n/locales/fr/common.json index d517991ebf3..1a5cf3d0f18 100644 --- a/src/i18n/locales/fr/common.json +++ b/src/i18n/locales/fr/common.json @@ -37,6 +37,12 @@ "checkpoint_timeout": "Expiration du délai lors de la tentative de rétablissement du checkpoint.", "checkpoint_failed": "Échec du rétablissement du checkpoint.", "git_not_installed": "Git est requis pour la fonctionnalité des points de contrôle. Veuillez installer Git pour activer les points de contrôle.", + "checkpoint_no_first": "Aucun premier point de contrôle à comparer.", + "checkpoint_no_previous": "Aucun point de contrôle précédent à comparer.", + "checkpoint_no_changes": "Aucun changement trouvé.", + "checkpoint_diff_with_next": "Modifications comparées au prochain point de contrôle", + "checkpoint_diff_since_first": "Modifications depuis le premier point de contrôle", + "checkpoint_diff_to_current": "Modifications de l'espace de travail actuel", "nested_git_repos_warning": "Les points de contrôle sont désactivés car un dépôt git imbriqué a été détecté à : {{path}}. Pour utiliser les points de contrôle, veuillez supprimer ou déplacer ce dépôt git imbriqué.", "no_workspace": "Veuillez d'abord ouvrir un espace de travail", "update_support_prompt": "Erreur lors de la mise à jour du prompt de support", diff --git a/src/i18n/locales/fr/embeddings.json b/src/i18n/locales/fr/embeddings.json index 84237dc4652..c1ab56c45ef 100644 --- a/src/i18n/locales/fr/embeddings.json +++ b/src/i18n/locales/fr/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Configuration compatible OpenAI manquante pour la création de l'embedder", "geminiConfigMissing": "Configuration Gemini manquante pour la création de l'embedder", "mistralConfigMissing": "Configuration Mistral manquante pour la création de l'embedder", + "openRouterConfigMissing": "Configuration OpenRouter manquante pour la création de l'embedder", "vercelAiGatewayConfigMissing": "Configuration Vercel AI Gateway manquante pour la création de l'embedder", "invalidEmbedderType": "Type d'embedder configuré invalide : {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Impossible de déterminer la dimension du vecteur pour le modèle '{{modelId}}' avec le fournisseur '{{provider}}'. Assure-toi que la 'Dimension d'embedding' est correctement définie dans les paramètres du fournisseur compatible OpenAI.", diff --git a/src/i18n/locales/hi/common.json b/src/i18n/locales/hi/common.json index 83b84ddef9d..cf4fb1fc9cf 100644 --- a/src/i18n/locales/hi/common.json +++ b/src/i18n/locales/hi/common.json @@ -36,7 +36,13 @@ "could_not_open_file_generic": "फ़ाइल नहीं खोली जा सकी!", "checkpoint_timeout": "चेकपॉइंट को पुनर्स्थापित करने का प्रयास करते समय टाइमआउट हो गया।", "checkpoint_failed": "चेकपॉइंट पुनर्स्थापित करने में विफल।", - "git_not_installed": "चेकपॉइंट सुविधा के लिए Git आवश्यक है। कृपया चेकपॉइंट সক্ষম करने के लिए Git इंस्टॉल करें।", + "git_not_installed": "चेकपॉइंट सुविधा के लिए Git आवश्यक है। कृपया चेकपॉइंट सक्षम करने के लिए Git इंस्टॉल करें।", + "checkpoint_no_first": "तुलना करने के लिए कोई पहला चेकपॉइंट नहीं है।", + "checkpoint_no_previous": "तुलना करने के लिए कोई पिछला चेकपॉइंट नहीं है।", + "checkpoint_no_changes": "कोई बदलाव नहीं मिला।", + "checkpoint_diff_with_next": "अगले चेकपॉइंट के साथ तुलना किए गए बदलाव", + "checkpoint_diff_since_first": "पहले चेकपॉइंट के बाद से बदलाव", + "checkpoint_diff_to_current": "वर्तमान कार्यक्षेत्र में बदलाव", "nested_git_repos_warning": "चेकपॉइंट अक्षम हैं क्योंकि {{path}} पर नेस्टेड git रिपॉजिटरी का पता चला है। चेकपॉइंट का उपयोग करने के लिए, कृपया इस नेस्टेड git रिपॉजिटरी को हटाएं या स्थानांतरित करें।", "no_workspace": "कृपया पहले प्रोजेक्ट फ़ोल्डर खोलें", "update_support_prompt": "सपोर्ट प्रॉम्प्ट अपडेट करने में विफल", @@ -202,8 +208,7 @@ "getGroqApiKey": "ग्रोक एपीआई कुंजी प्राप्त करें", "claudeCode": { "pathLabel": "क्लाउड कोड पाथ", - "description": "आपके क्लाउड कोड CLI का वैकल्पिक पाथ। सेट न होने पर डिफ़ॉल्ट रूप से 'claude'।", - "placeholder": "डिफ़ॉल्ट: claude" + "description": "आपके क्लाउड कोड CLI का वैकल्पिक पाथ। सेट न होने पर डिफ़ॉल्ट रूप से 'claude'।" } } }, diff --git a/src/i18n/locales/hi/embeddings.json b/src/i18n/locales/hi/embeddings.json index 80c9f3440ae..f1840513383 100644 --- a/src/i18n/locales/hi/embeddings.json +++ b/src/i18n/locales/hi/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "एम्बेडर बनाने के लिए OpenAI संगत कॉन्फ़िगरेशन गायब है", "geminiConfigMissing": "एम्बेडर बनाने के लिए Gemini कॉन्फ़िगरेशन गायब है", "mistralConfigMissing": "एम्बेडर निर्माण के लिए मिस्ट्रल कॉन्फ़िगरेशन गायब है", + "openRouterConfigMissing": "एम्बेडर निर्माण के लिए OpenRouter कॉन्फ़िगरेशन गायब है", "vercelAiGatewayConfigMissing": "एम्बेडर निर्माण के लिए Vercel AI Gateway कॉन्फ़िगरेशन गायब है", "invalidEmbedderType": "अमान्य एम्बेडर प्रकार कॉन्फ़िगर किया गया: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "प्रदाता '{{provider}}' के साथ मॉडल '{{modelId}}' के लिए वेक्टर आयाम निर्धारित नहीं कर सका। कृपया सुनिश्चित करें कि OpenAI-संगत प्रदाता सेटिंग्स में 'एम्बेडिंग आयाम' सही तरीके से सेट है।", diff --git a/src/i18n/locales/id/common.json b/src/i18n/locales/id/common.json index beab716b33e..d16103da9ab 100644 --- a/src/i18n/locales/id/common.json +++ b/src/i18n/locales/id/common.json @@ -37,6 +37,12 @@ "checkpoint_timeout": "Timeout saat mencoba memulihkan checkpoint.", "checkpoint_failed": "Gagal memulihkan checkpoint.", "git_not_installed": "Git diperlukan untuk fitur checkpoint. Silakan instal Git untuk mengaktifkan checkpoint.", + "checkpoint_no_first": "Tidak ada checkpoint pertama untuk dibandingkan.", + "checkpoint_no_previous": "Tidak ada checkpoint sebelumnya untuk dibandingkan.", + "checkpoint_no_changes": "Tidak ada perubahan yang ditemukan.", + "checkpoint_diff_with_next": "Perubahan dibandingkan dengan checkpoint berikutnya", + "checkpoint_diff_since_first": "Perubahan sejak checkpoint pertama", + "checkpoint_diff_to_current": "Perubahan ke ruang kerja saat ini", "nested_git_repos_warning": "Checkpoint dinonaktifkan karena repositori git bersarang terdeteksi di: {{path}}. Untuk menggunakan checkpoint, silakan hapus atau pindahkan repositori git bersarang ini.", "no_workspace": "Silakan buka folder proyek terlebih dahulu", "update_support_prompt": "Gagal memperbarui support prompt", diff --git a/src/i18n/locales/id/embeddings.json b/src/i18n/locales/id/embeddings.json index 34a8e837030..84410c235be 100644 --- a/src/i18n/locales/id/embeddings.json +++ b/src/i18n/locales/id/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Konfigurasi yang kompatibel dengan OpenAI tidak ada untuk membuat embedder", "geminiConfigMissing": "Konfigurasi Gemini tidak ada untuk membuat embedder", "mistralConfigMissing": "Konfigurasi Mistral hilang untuk pembuatan embedder", + "openRouterConfigMissing": "Konfigurasi OpenRouter hilang untuk pembuatan embedder", "vercelAiGatewayConfigMissing": "Konfigurasi Vercel AI Gateway hilang untuk pembuatan embedder", "invalidEmbedderType": "Tipe embedder yang dikonfigurasi tidak valid: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Tidak dapat menentukan dimensi vektor untuk model '{{modelId}}' dengan penyedia '{{provider}}'. Pastikan 'Dimensi Embedding' diatur dengan benar di pengaturan penyedia yang kompatibel dengan OpenAI.", diff --git a/src/i18n/locales/it/common.json b/src/i18n/locales/it/common.json index a2f40729be7..279e60f6b09 100644 --- a/src/i18n/locales/it/common.json +++ b/src/i18n/locales/it/common.json @@ -37,6 +37,12 @@ "checkpoint_timeout": "Timeout durante il tentativo di ripristinare il checkpoint.", "checkpoint_failed": "Impossibile ripristinare il checkpoint.", "git_not_installed": "Git è richiesto per la funzione di checkpoint. Per favore, installa Git per abilitare i checkpoint.", + "checkpoint_no_first": "Nessun primo checkpoint da confrontare.", + "checkpoint_no_previous": "Nessun checkpoint precedente da confrontare.", + "checkpoint_no_changes": "Nessuna modifica trovata.", + "checkpoint_diff_with_next": "Modifiche confrontate con il checkpoint successivo", + "checkpoint_diff_since_first": "Modifiche dal primo checkpoint", + "checkpoint_diff_to_current": "Modifiche all'area di lavoro corrente", "nested_git_repos_warning": "I checkpoint sono disabilitati perché è stato rilevato un repository git annidato in: {{path}}. Per utilizzare i checkpoint, rimuovi o sposta questo repository git annidato.", "no_workspace": "Per favore, apri prima una cartella di progetto", "update_support_prompt": "Errore durante l'aggiornamento del messaggio di supporto", diff --git a/src/i18n/locales/it/embeddings.json b/src/i18n/locales/it/embeddings.json index e4b9281b83d..6a8a266ef9a 100644 --- a/src/i18n/locales/it/embeddings.json +++ b/src/i18n/locales/it/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Configurazione compatibile con OpenAI mancante per la creazione dell'embedder", "geminiConfigMissing": "Configurazione Gemini mancante per la creazione dell'embedder", "mistralConfigMissing": "Configurazione di Mistral mancante per la creazione dell'embedder", + "openRouterConfigMissing": "Configurazione di OpenRouter mancante per la creazione dell'embedder", "vercelAiGatewayConfigMissing": "Configurazione di Vercel AI Gateway mancante per la creazione dell'embedder", "invalidEmbedderType": "Tipo di embedder configurato non valido: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Impossibile determinare la dimensione del vettore per il modello '{{modelId}}' con il provider '{{provider}}'. Assicurati che la 'Dimensione di embedding' sia impostata correttamente nelle impostazioni del provider compatibile con OpenAI.", diff --git a/src/i18n/locales/ja/common.json b/src/i18n/locales/ja/common.json index 0ae8b2c2edb..87ee5e4df65 100644 --- a/src/i18n/locales/ja/common.json +++ b/src/i18n/locales/ja/common.json @@ -37,6 +37,12 @@ "checkpoint_timeout": "チェックポイントの復元を試みる際にタイムアウトしました。", "checkpoint_failed": "チェックポイントの復元に失敗しました。", "git_not_installed": "チェックポイント機能にはGitが必要です。チェックポイントを有効にするにはGitをインストールしてください。", + "checkpoint_no_first": "比較する最初のチェックポイントがありません。", + "checkpoint_no_previous": "比較する前のチェックポイントがありません。", + "checkpoint_no_changes": "変更は見つかりませんでした。", + "checkpoint_diff_with_next": "次のチェックポイントと比較した変更点", + "checkpoint_diff_since_first": "最初のチェックポイントからの変更点", + "checkpoint_diff_to_current": "現在のワークスペースへの変更点", "nested_git_repos_warning": "{{path}} でネストされたgitリポジトリが検出されたため、チェックポイントが無効になっています。チェックポイントを使用するには、このネストされたgitリポジトリを削除または移動してください。", "no_workspace": "まずプロジェクトフォルダを開いてください", "update_support_prompt": "サポートメッセージの更新に失敗しました", diff --git a/src/i18n/locales/ja/embeddings.json b/src/i18n/locales/ja/embeddings.json index 73e287dfc99..7282aac5de2 100644 --- a/src/i18n/locales/ja/embeddings.json +++ b/src/i18n/locales/ja/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "エンベッダー作成のためのOpenAI互換設定がありません", "geminiConfigMissing": "エンベッダー作成のためのGemini設定がありません", "mistralConfigMissing": "エンベッダー作成のためのMistral設定がありません", + "openRouterConfigMissing": "エンベッダー作成のためのOpenRouter設定がありません", "vercelAiGatewayConfigMissing": "エンベッダー作成のためのVercel AI Gateway設定がありません", "invalidEmbedderType": "無効なエンベッダータイプが設定されています: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "プロバイダー '{{provider}}' のモデル '{{modelId}}' の埋め込み次元を決定できませんでした。OpenAI互換プロバイダー設定で「埋め込み次元」が正しく設定されていることを確認してください。", diff --git a/src/i18n/locales/ko/common.json b/src/i18n/locales/ko/common.json index a7cf94e70c2..e888ec4e82e 100644 --- a/src/i18n/locales/ko/common.json +++ b/src/i18n/locales/ko/common.json @@ -37,6 +37,12 @@ "checkpoint_timeout": "체크포인트 복원을 시도하는 중 시간 초과되었습니다.", "checkpoint_failed": "체크포인트 복원에 실패했습니다.", "git_not_installed": "체크포인트 기능을 사용하려면 Git이 필요합니다. 체크포인트를 활성화하려면 Git을 설치하세요.", + "checkpoint_no_first": "비교할 첫 번째 체크포인트가 없습니다.", + "checkpoint_no_previous": "비교할 이전 체크포인트가 없습니다.", + "checkpoint_no_changes": "변경된 내용이 없습니다.", + "checkpoint_diff_with_next": "다음 체크포인트와 비교한 변경 사항", + "checkpoint_diff_since_first": "첫 번째 체크포인트 이후의 변경 사항", + "checkpoint_diff_to_current": "현재 작업 공간으로의 변경 사항", "nested_git_repos_warning": "{{path}}에서 중첩된 git 저장소가 감지되어 체크포인트가 비활성화되었습니다. 체크포인트를 사용하려면 이 중첩된 git 저장소를 제거하거나 이동해주세요.", "no_workspace": "먼저 프로젝트 폴더를 열어주세요", "update_support_prompt": "지원 프롬프트 업데이트에 실패했습니다", diff --git a/src/i18n/locales/ko/embeddings.json b/src/i18n/locales/ko/embeddings.json index daaf25b350a..1a252169f25 100644 --- a/src/i18n/locales/ko/embeddings.json +++ b/src/i18n/locales/ko/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "임베더 생성을 위한 OpenAI 호환 구성이 누락되었습니다", "geminiConfigMissing": "임베더 생성을 위한 Gemini 구성이 누락되었습니다", "mistralConfigMissing": "임베더 생성을 위한 Mistral 구성이 없습니다", + "openRouterConfigMissing": "임베더 생성을 위한 OpenRouter 구성이 없습니다", "vercelAiGatewayConfigMissing": "임베더 생성을 위한 Vercel AI Gateway 구성이 없습니다", "invalidEmbedderType": "잘못된 임베더 유형이 구성되었습니다: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "프로바이더 '{{provider}}'의 모델 '{{modelId}}'에 대한 벡터 차원을 결정할 수 없습니다. OpenAI 호환 프로바이더 설정에서 '임베딩 차원'이 올바르게 설정되어 있는지 확인하세요.", diff --git a/src/i18n/locales/nl/common.json b/src/i18n/locales/nl/common.json index 2122ad8968a..79bf92e821c 100644 --- a/src/i18n/locales/nl/common.json +++ b/src/i18n/locales/nl/common.json @@ -37,6 +37,12 @@ "checkpoint_timeout": "Time-out bij het herstellen van checkpoint.", "checkpoint_failed": "Herstellen van checkpoint mislukt.", "git_not_installed": "Git is vereist voor de checkpoint-functie. Installeer Git om checkpoints in te schakelen.", + "checkpoint_no_first": "Geen eerste checkpoint om mee te vergelijken.", + "checkpoint_no_previous": "Geen vorig checkpoint om mee te vergelijken.", + "checkpoint_no_changes": "Geen wijzigingen gevonden.", + "checkpoint_diff_with_next": "Wijzigingen vergeleken met volgend checkpoint", + "checkpoint_diff_since_first": "Wijzigingen sinds eerste checkpoint", + "checkpoint_diff_to_current": "Wijzigingen in huidige werkruimte", "nested_git_repos_warning": "Checkpoints zijn uitgeschakeld omdat een geneste git-repository is gedetecteerd op: {{path}}. Om checkpoints te gebruiken, verwijder of verplaats deze geneste git-repository.", "no_workspace": "Open eerst een projectmap", "update_support_prompt": "Bijwerken van ondersteuningsprompt mislukt", diff --git a/src/i18n/locales/nl/embeddings.json b/src/i18n/locales/nl/embeddings.json index 03f1bfb2ecf..868407765d8 100644 --- a/src/i18n/locales/nl/embeddings.json +++ b/src/i18n/locales/nl/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "OpenAI-compatibele configuratie ontbreekt voor het maken van embedder", "geminiConfigMissing": "Gemini-configuratie ontbreekt voor het maken van embedder", "mistralConfigMissing": "Mistral-configuratie ontbreekt voor het maken van de embedder", + "openRouterConfigMissing": "OpenRouter-configuratie ontbreekt voor het maken van de embedder", "vercelAiGatewayConfigMissing": "Vercel AI Gateway-configuratie ontbreekt voor het maken van de embedder", "invalidEmbedderType": "Ongeldig embedder-type geconfigureerd: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Kan de vectordimensie voor model '{{modelId}}' met provider '{{provider}}' niet bepalen. Zorg ervoor dat de 'Embedding Dimensie' correct is ingesteld in de OpenAI-compatibele provider-instellingen.", diff --git a/src/i18n/locales/pl/common.json b/src/i18n/locales/pl/common.json index 1745fa4f9b0..80ba19cbe18 100644 --- a/src/i18n/locales/pl/common.json +++ b/src/i18n/locales/pl/common.json @@ -37,6 +37,12 @@ "checkpoint_timeout": "Upłynął limit czasu podczas próby przywrócenia punktu kontrolnego.", "checkpoint_failed": "Nie udało się przywrócić punktu kontrolnego.", "git_not_installed": "Funkcja punktów kontrolnych wymaga oprogramowania Git. Zainstaluj Git, aby włączyć punkty kontrolne.", + "checkpoint_no_first": "Brak pierwszego punktu kontrolnego do porównania.", + "checkpoint_no_previous": "Brak poprzedniego punktu kontrolnego do porównania.", + "checkpoint_no_changes": "Nie znaleziono zmian.", + "checkpoint_diff_with_next": "Zmiany w porównaniu z następnym punktem kontrolnym", + "checkpoint_diff_since_first": "Zmiany od pierwszego punktu kontrolnego", + "checkpoint_diff_to_current": "Zmiany w bieżącym obszarze roboczym", "nested_git_repos_warning": "Punkty kontrolne są wyłączone, ponieważ wykryto zagnieżdżone repozytorium git w: {{path}}. Aby używać punktów kontrolnych, usuń lub przenieś to zagnieżdżone repozytorium git.", "no_workspace": "Najpierw otwórz folder projektu", "update_support_prompt": "Nie udało się zaktualizować komunikatu wsparcia", diff --git a/src/i18n/locales/pl/embeddings.json b/src/i18n/locales/pl/embeddings.json index 9d272b7742c..757ba3edccb 100644 --- a/src/i18n/locales/pl/embeddings.json +++ b/src/i18n/locales/pl/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Brak konfiguracji kompatybilnej z OpenAI do utworzenia embeddera", "geminiConfigMissing": "Brak konfiguracji Gemini do utworzenia embeddera", "mistralConfigMissing": "Brak konfiguracji Mistral do utworzenia embeddera", + "openRouterConfigMissing": "Brak konfiguracji OpenRouter do utworzenia embeddera", "vercelAiGatewayConfigMissing": "Brak konfiguracji Vercel AI Gateway do utworzenia embeddera", "invalidEmbedderType": "Skonfigurowano nieprawidłowy typ embeddera: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Nie można określić wymiaru wektora dla modelu '{{modelId}}' z dostawcą '{{provider}}'. Upewnij się, że 'Wymiar osadzania' jest poprawnie ustawiony w ustawieniach dostawcy kompatybilnego z OpenAI.", diff --git a/src/i18n/locales/pt-BR/common.json b/src/i18n/locales/pt-BR/common.json index 1edf789415a..88a8998472a 100644 --- a/src/i18n/locales/pt-BR/common.json +++ b/src/i18n/locales/pt-BR/common.json @@ -41,6 +41,12 @@ "checkpoint_timeout": "Tempo esgotado ao tentar restaurar o ponto de verificação.", "checkpoint_failed": "Falha ao restaurar o ponto de verificação.", "git_not_installed": "O Git é necessário para o recurso de checkpoints. Por favor, instale o Git para habilitar os checkpoints.", + "checkpoint_no_first": "Nenhum primeiro ponto de verificação para comparar.", + "checkpoint_no_previous": "Nenhum ponto de verificação anterior para comparar.", + "checkpoint_no_changes": "Nenhuma alteração encontrada.", + "checkpoint_diff_with_next": "Alterações comparadas com o próximo ponto de verificação", + "checkpoint_diff_since_first": "Alterações desde o primeiro ponto de verificação", + "checkpoint_diff_to_current": "Alterações no espaço de trabalho atual", "nested_git_repos_warning": "Os checkpoints estão desabilitados porque um repositório git aninhado foi detectado em: {{path}}. Para usar checkpoints, por favor remova ou realoque este repositório git aninhado.", "no_workspace": "Por favor, abra primeiro uma pasta de projeto", "update_support_prompt": "Falha ao atualizar o prompt de suporte", diff --git a/src/i18n/locales/pt-BR/embeddings.json b/src/i18n/locales/pt-BR/embeddings.json index c2c2d79bb9c..c3b60da9d7d 100644 --- a/src/i18n/locales/pt-BR/embeddings.json +++ b/src/i18n/locales/pt-BR/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Configuração compatível com OpenAI ausente para criação do embedder", "geminiConfigMissing": "Configuração do Gemini ausente para criação do embedder", "mistralConfigMissing": "Configuração do Mistral ausente para a criação do embedder", + "openRouterConfigMissing": "Configuração do OpenRouter ausente para a criação do embedder", "vercelAiGatewayConfigMissing": "Configuração do Vercel AI Gateway ausente para a criação do embedder", "invalidEmbedderType": "Tipo de embedder configurado inválido: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Não foi possível determinar a dimensão do vetor para o modelo '{{modelId}}' com o provedor '{{provider}}'. Certifique-se de que a 'Dimensão de Embedding' esteja configurada corretamente nas configurações do provedor compatível com OpenAI.", diff --git a/src/i18n/locales/ru/common.json b/src/i18n/locales/ru/common.json index 3cd300410f6..e72171d9318 100644 --- a/src/i18n/locales/ru/common.json +++ b/src/i18n/locales/ru/common.json @@ -37,6 +37,12 @@ "checkpoint_timeout": "Превышено время ожидания при попытке восстановления контрольной точки.", "checkpoint_failed": "Не удалось восстановить контрольную точку.", "git_not_installed": "Для функции контрольных точек требуется Git. Пожалуйста, установите Git, чтобы включить контрольные точки.", + "checkpoint_no_first": "Нет первой контрольной точки для сравнения.", + "checkpoint_no_previous": "Нет предыдущей контрольной точки для сравнения.", + "checkpoint_no_changes": "Изменений не найдено.", + "checkpoint_diff_with_next": "Изменения по сравнению со следующей контрольной точкой", + "checkpoint_diff_since_first": "Изменения с первой контрольной точки", + "checkpoint_diff_to_current": "Изменения в текущем рабочем пространстве", "nested_git_repos_warning": "Контрольные точки отключены, поскольку обнаружен вложенный git-репозиторий в: {{path}}. Чтобы использовать контрольные точки, пожалуйста, удалите или переместите этот вложенный git-репозиторий.", "no_workspace": "Пожалуйста, сначала откройте папку проекта", "update_support_prompt": "Не удалось обновить промпт поддержки", diff --git a/src/i18n/locales/ru/embeddings.json b/src/i18n/locales/ru/embeddings.json index f9d0a115392..6b397a31475 100644 --- a/src/i18n/locales/ru/embeddings.json +++ b/src/i18n/locales/ru/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Отсутствует конфигурация, совместимая с OpenAI, для создания эмбеддера", "geminiConfigMissing": "Отсутствует конфигурация Gemini для создания эмбеддера", "mistralConfigMissing": "Конфигурация Mistral отсутствует для создания эмбеддера", + "openRouterConfigMissing": "Конфигурация OpenRouter отсутствует для создания эмбеддера", "vercelAiGatewayConfigMissing": "Конфигурация Vercel AI Gateway отсутствует для создания эмбеддера", "invalidEmbedderType": "Настроен недопустимый тип эмбеддера: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Не удалось определить размерность вектора для модели '{{modelId}}' с провайдером '{{provider}}'. Убедитесь, что 'Размерность эмбеддинга' правильно установлена в настройках провайдера, совместимого с OpenAI.", diff --git a/src/i18n/locales/th/common.json b/src/i18n/locales/th/common.json index 1020bfa4129..7ffda1f6e34 100644 --- a/src/i18n/locales/th/common.json +++ b/src/i18n/locales/th/common.json @@ -41,6 +41,12 @@ "could_not_open_file_generic": "ไม่สามารถเปิดไฟล์!", "checkpoint_timeout": "หมดเวลาเมื่อพยายามกู้คืน checkpoint", "checkpoint_failed": "ล้มเหลวในการกู้คืน checkpoint", + "checkpoint_no_first": "ไม่มี checkpoint แรกเพื่อเปรียบเทียบ", + "checkpoint_no_previous": "ไม่มี checkpoint ก่อนหน้าเพื่อเปรียบเทียบ", + "checkpoint_no_changes": "ไม่พบการเปลี่ยนแปลง", + "checkpoint_diff_with_next": "การเปลี่ยนแปลงเมื่อเปรียบเทียบกับ checkpoint ถัดไป", + "checkpoint_diff_since_first": "การเปลี่ยนแปลงตั้งแต่ checkpoint แรก", + "checkpoint_diff_to_current": "การเปลี่ยนแปลงไปยังพื้นที่ทำงานปัจจุบัน", "git_not_installed": "ต้องใช้ Git สำหรับฟีเจอร์ checkpoint กรุณาติดตั้ง Git เพื่อเปิดใช้งาน checkpoint", "nested_git_repos_warning": "คำเตือน: พบ git repositories ที่ซ้อนกันในเส้นทางเหล่านี้ อาจทำให้เกิดพฤติกรรมที่ไม่คาดคิดกับเชคพอยท์: {{paths}}", "no_workspace": "กรุณาเปิดโฟลเดอร์โปรเจ็กต์ก่อน", diff --git a/src/i18n/locales/th/embeddings.json b/src/i18n/locales/th/embeddings.json index 0e41d72bba1..df138da84b6 100644 --- a/src/i18n/locales/th/embeddings.json +++ b/src/i18n/locales/th/embeddings.json @@ -48,6 +48,7 @@ "geminiConfigMissing": "การกำหนดค่า Gemini หายไปสำหรับการสร้าง embedder", "mistralConfigMissing": "การกำหนดค่า Mistral หายไปสำหรับการสร้าง embedder", "vercelAiGatewayConfigMissing": "การกำหนดค่า Vercel AI Gateway หายไปสำหรับการสร้าง embedder", + "openRouterConfigMissing": "การกำหนดค่า OpenRouter หายไปสำหรับการสร้าง embedder", "invalidEmbedderType": "ประเภท embedder ที่กำหนดค่าไม่ถูกต้อง: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "ไม่สามารถกำหนดมิติเวกเตอร์สำหรับโมเดล '{{modelId}}' กับผู้ให้บริการ '{{provider}}' กรุณาตรวจสอบให้แน่ใจว่า 'Embedding Dimension' ถูกตั้งค่าอย่างถูกต้องในการตั้งค่าผู้ให้บริการ OpenAI-Compatible", "vectorDimensionNotDetermined": "ไม่สามารถกำหนดมิติเวกเตอร์สำหรับโมเดล '{{modelId}}' กับผู้ให้บริการ '{{provider}}' ตรวจสอบโปรไฟล์โมเดลหรือการกำหนดค่า", diff --git a/src/i18n/locales/tr/common.json b/src/i18n/locales/tr/common.json index eb1532a07c3..6d12cf1d89e 100644 --- a/src/i18n/locales/tr/common.json +++ b/src/i18n/locales/tr/common.json @@ -37,6 +37,12 @@ "checkpoint_timeout": "Kontrol noktasını geri yüklemeye çalışırken zaman aşımına uğradı.", "checkpoint_failed": "Kontrol noktası geri yüklenemedi.", "git_not_installed": "Kontrol noktaları özelliği için Git gereklidir. Kontrol noktalarını etkinleştirmek için lütfen Git'i yükleyin.", + "checkpoint_no_first": "Karşılaştırılacak ilk kontrol noktası yok.", + "checkpoint_no_previous": "Karşılaştırılacak önceki kontrol noktası yok.", + "checkpoint_no_changes": "Değişiklik bulunamadı.", + "checkpoint_diff_with_next": "Sonraki kontrol noktasıyla karşılaştırılan değişiklikler", + "checkpoint_diff_since_first": "İlk kontrol noktasından bu yana yapılan değişiklikler", + "checkpoint_diff_to_current": "Mevcut çalışma alanındaki değişiklikler", "nested_git_repos_warning": "{{path}} konumunda iç içe git deposu tespit edildiği için kontrol noktaları devre dışı bırakıldı. Kontrol noktalarını kullanmak için lütfen bu iç içe git deposunu kaldırın veya taşıyın.", "no_workspace": "Lütfen önce bir proje klasörü açın", "update_support_prompt": "Destek istemi güncellenemedi", diff --git a/src/i18n/locales/tr/embeddings.json b/src/i18n/locales/tr/embeddings.json index 4d1182fb50f..9e96ab41860 100644 --- a/src/i18n/locales/tr/embeddings.json +++ b/src/i18n/locales/tr/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Gömücü oluşturmak için OpenAI uyumlu yapılandırması eksik", "geminiConfigMissing": "Gömücü oluşturmak için Gemini yapılandırması eksik", "mistralConfigMissing": "Gömücü oluşturmak için Mistral yapılandırması eksik", + "openRouterConfigMissing": "Gömücü oluşturmak için OpenRouter yapılandırması eksik", "vercelAiGatewayConfigMissing": "Gömücü oluşturmak için Vercel AI Gateway yapılandırması eksik", "invalidEmbedderType": "Geçersiz gömücü türü yapılandırıldı: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "'{{provider}}' sağlayıcısı ile '{{modelId}}' modeli için vektör boyutu belirlenemedi. OpenAI uyumlu sağlayıcı ayarlarında 'Gömme Boyutu'nun doğru ayarlandığından emin ol.", diff --git a/src/i18n/locales/uk/common.json b/src/i18n/locales/uk/common.json index cc5de4eb151..487a3ecac65 100644 --- a/src/i18n/locales/uk/common.json +++ b/src/i18n/locales/uk/common.json @@ -41,6 +41,12 @@ "could_not_open_file_generic": "Не вдалося відкрити файл!", "checkpoint_timeout": "Час очікування вичерпано під час спроби відновити checkpoint.", "checkpoint_failed": "Не вдалося відновити checkpoint.", + "checkpoint_no_first": "Немає першої контрольної точки для порівняння.", + "checkpoint_no_previous": "Немає попередньої контрольної точки для порівняння.", + "checkpoint_no_changes": "Змін не знайдено.", + "checkpoint_diff_with_next": "Зміни порівняно з наступною контрольною точкою", + "checkpoint_diff_since_first": "Зміни з першої контрольної точки", + "checkpoint_diff_to_current": "Зміни до поточного робочого простору", "git_not_installed": "Git потрібен для функції checkpoint. Будь ласка, встанови Git, щоб увімкнути checkpoint.", "nested_git_repos_warning": "Попередження: Знайдено вкладені git репозиторії за цими шляхами. Це може спричинити неочікувану поведінку з контрольними точками: {{paths}}", "no_workspace": "Будь ласка, спочатку відкрий папку проекту", diff --git a/src/i18n/locales/uk/embeddings.json b/src/i18n/locales/uk/embeddings.json index 9689ab764e2..5427d4574ef 100644 --- a/src/i18n/locales/uk/embeddings.json +++ b/src/i18n/locales/uk/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Відсутня конфігурація OpenAI Compatible для створення embedder", "geminiConfigMissing": "Відсутня конфігурація Gemini для створення embedder", "mistralConfigMissing": "Відсутня конфігурація Mistral для створення embedder", + "openRouterConfigMissing": "Відсутня конфігурація OpenRouter для створення embedder", "vercelAiGatewayConfigMissing": "Відсутня конфігурація Vercel AI Gateway для створення embedder", "invalidEmbedderType": "Налаштовано недійсний тип embedder: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Не вдалося визначити розмірність вектора для моделі '{{modelId}}' з провайдером '{{provider}}'. Будь ласка, переконайся, що 'Розмірність Embedding' правильно встановлена в налаштуваннях провайдера OpenAI-Compatible.", diff --git a/src/i18n/locales/vi/common.json b/src/i18n/locales/vi/common.json index 24b6afed325..9d061c489b7 100644 --- a/src/i18n/locales/vi/common.json +++ b/src/i18n/locales/vi/common.json @@ -37,6 +37,12 @@ "checkpoint_timeout": "Đã hết thời gian khi cố gắng khôi phục điểm kiểm tra.", "checkpoint_failed": "Không thể khôi phục điểm kiểm tra.", "git_not_installed": "Yêu cầu Git cho tính năng điểm kiểm tra. Vui lòng cài đặt Git để bật điểm kiểm tra.", + "checkpoint_no_first": "Không có điểm kiểm tra đầu tiên để so sánh.", + "checkpoint_no_previous": "Không có điểm kiểm tra trước đó để so sánh.", + "checkpoint_no_changes": "Không tìm thấy thay đổi.", + "checkpoint_diff_with_next": "Các thay đổi được so sánh với điểm kiểm tra tiếp theo", + "checkpoint_diff_since_first": "Các thay đổi kể từ điểm kiểm tra đầu tiên", + "checkpoint_diff_to_current": "Các thay đổi đối với không gian làm việc hiện tại", "nested_git_repos_warning": "Điểm kiểm tra bị vô hiệu hóa vì phát hiện kho git lồng nhau tại: {{path}}. Để sử dụng điểm kiểm tra, vui lòng xóa hoặc di chuyển kho git lồng nhau này.", "no_workspace": "Vui lòng mở thư mục dự án trước", "update_support_prompt": "Không thể cập nhật lời nhắc hỗ trợ", diff --git a/src/i18n/locales/vi/embeddings.json b/src/i18n/locales/vi/embeddings.json index 8b3b9fce9dd..d3844c50890 100644 --- a/src/i18n/locales/vi/embeddings.json +++ b/src/i18n/locales/vi/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "Thiếu cấu hình tương thích OpenAI để tạo embedder", "geminiConfigMissing": "Thiếu cấu hình Gemini để tạo embedder", "mistralConfigMissing": "Thiếu cấu hình Mistral để tạo trình nhúng", + "openRouterConfigMissing": "Thiếu cấu hình OpenRouter để tạo trình nhúng", "vercelAiGatewayConfigMissing": "Thiếu cấu hình Vercel AI Gateway để tạo trình nhúng", "invalidEmbedderType": "Loại embedder được cấu hình không hợp lệ: {{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "Không thể xác định kích thước vector cho mô hình '{{modelId}}' với nhà cung cấp '{{provider}}'. Hãy đảm bảo 'Kích thước Embedding' được cài đặt đúng trong cài đặt nhà cung cấp tương thích OpenAI.", diff --git a/src/i18n/locales/zh-CN/common.json b/src/i18n/locales/zh-CN/common.json index 43b156c3b15..b82811fa362 100644 --- a/src/i18n/locales/zh-CN/common.json +++ b/src/i18n/locales/zh-CN/common.json @@ -41,7 +41,13 @@ "could_not_open_file_generic": "无法打开文件!", "checkpoint_timeout": "尝试恢复检查点时超时。", "checkpoint_failed": "恢复检查点失败。", - "git_not_installed": "存档点功能需要 Git。请安装 Git 以启用存档点。", + "git_not_installed": "检查点功能需要 Git。请安装 Git 以启用检查点。", + "checkpoint_no_first": "没有第一个存档点可供比较。", + "checkpoint_no_previous": "没有上一个存档点可供比较。", + "checkpoint_no_changes": "未发现任何更改。", + "checkpoint_diff_with_next": "与下一个存档点比较的更改", + "checkpoint_diff_since_first": "自第一个存档点以来的更改", + "checkpoint_diff_to_current": "对当前工作区的更改", "nested_git_repos_warning": "存档点已禁用,因为在 {{path}} 检测到嵌套的 git 仓库。要使用存档点,请移除或重新定位此嵌套的 git 仓库。", "no_workspace": "请先打开项目文件夹", "update_support_prompt": "更新支持消息失败", diff --git a/src/i18n/locales/zh-CN/embeddings.json b/src/i18n/locales/zh-CN/embeddings.json index dc4d93f0da2..e8b9a61d169 100644 --- a/src/i18n/locales/zh-CN/embeddings.json +++ b/src/i18n/locales/zh-CN/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "创建嵌入器缺少 OpenAI 兼容配置", "geminiConfigMissing": "创建嵌入器缺少 Gemini 配置", "mistralConfigMissing": "创建嵌入器时缺少 Mistral 配置", + "openRouterConfigMissing": "创建嵌入器时缺少 OpenRouter 配置", "vercelAiGatewayConfigMissing": "创建嵌入器时缺少 Vercel AI Gateway 配置", "invalidEmbedderType": "配置的嵌入器类型无效:{{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "无法确定提供商 '{{provider}}' 的模型 '{{modelId}}' 的向量维度。请确保在 OpenAI 兼容提供商设置中正确设置了「嵌入维度」。", diff --git a/src/i18n/locales/zh-TW/common.json b/src/i18n/locales/zh-TW/common.json index 165e539d9db..4d01600c881 100644 --- a/src/i18n/locales/zh-TW/common.json +++ b/src/i18n/locales/zh-TW/common.json @@ -37,6 +37,12 @@ "checkpoint_timeout": "嘗試恢復檢查點時超時。", "checkpoint_failed": "恢復檢查點失敗。", "git_not_installed": "存檔點功能需要 Git。請安裝 Git 以啟用存檔點。", + "checkpoint_no_first": "沒有第一個存檔點可供比較。", + "checkpoint_no_previous": "沒有上一個存檔點可供比較。", + "checkpoint_no_changes": "未發現任何變更。", + "checkpoint_diff_with_next": "與下一個存檔點比較的變更", + "checkpoint_diff_since_first": "自第一個存檔點以來的變更", + "checkpoint_diff_to_current": "對目前工作區的變更", "nested_git_repos_warning": "存檔點已停用,因為在 {{path}} 偵測到巢狀的 git 儲存庫。要使用存檔點,請移除或重新配置此巢狀的 git 儲存庫。", "no_workspace": "請先開啟專案資料夾", "update_support_prompt": "更新支援訊息失敗", diff --git a/src/i18n/locales/zh-TW/embeddings.json b/src/i18n/locales/zh-TW/embeddings.json index 4ecb01b88d9..93b624a35c9 100644 --- a/src/i18n/locales/zh-TW/embeddings.json +++ b/src/i18n/locales/zh-TW/embeddings.json @@ -47,6 +47,7 @@ "openAiCompatibleConfigMissing": "建立嵌入器缺少 OpenAI 相容設定", "geminiConfigMissing": "建立嵌入器缺少 Gemini 設定", "mistralConfigMissing": "建立嵌入器時缺少 Mistral 設定", + "openRouterConfigMissing": "建立嵌入器時缺少 OpenRouter 設定", "vercelAiGatewayConfigMissing": "建立嵌入器時缺少 Vercel AI Gateway 設定", "invalidEmbedderType": "設定的嵌入器類型無效:{{embedderProvider}}", "vectorDimensionNotDeterminedOpenAiCompatible": "無法確定提供商 '{{provider}}' 的模型 '{{modelId}}' 的向量維度。請確保在 OpenAI 相容提供商設定中正確設定了「嵌入維度」。", diff --git a/src/integrations/misc/__tests__/line-counter.spec.ts b/src/integrations/misc/__tests__/line-counter.spec.ts index e7d0f85c8c5..68011cdc2ce 100644 --- a/src/integrations/misc/__tests__/line-counter.spec.ts +++ b/src/integrations/misc/__tests__/line-counter.spec.ts @@ -1,146 +1,98 @@ -import type { Mock } from "vitest" -import fs from "fs" -import { countFileLines } from "../line-counter" +import { describe, it, expect, vi, beforeEach } from "vitest" +import { countFileLines, countFileLinesAndTokens } from "../line-counter" +import { countTokens } from "../../../utils/countTokens" +import { Readable } from "stream" -// Mock the fs module -vitest.mock("fs", () => ({ +// Mock dependencies +vi.mock("fs", () => ({ default: { promises: { - access: vitest.fn(), + access: vi.fn(), }, constants: { F_OK: 0, }, + createReadStream: vi.fn(), }, - createReadStream: vitest.fn(), + createReadStream: vi.fn(), })) -// Mock readline -vitest.mock("readline", () => ({ - createInterface: vitest.fn().mockReturnValue({ - on: vitest.fn().mockImplementation(function (this: any, event, callback) { - if (event === "line" && this.mockLines) { - for (let i = 0; i < this.mockLines; i++) { - callback() - } - } - if (event === "close") { - callback() - } - return this - }), - mockLines: 0, - }), +vi.mock("../../../utils/countTokens", () => ({ + countTokens: vi.fn(), })) -describe("countFileLines", () => { - beforeEach(() => { - vitest.clearAllMocks() - }) +const mockCountTokens = vi.mocked(countTokens) - it("should throw error if file does not exist", async () => { - // Setup - ;(fs.promises.access as Mock).mockRejectedValueOnce(new Error("File not found")) +// Get the mocked fs module +const fs = await import("fs") +const mockCreateReadStream = vi.mocked(fs.createReadStream) +const mockFsAccess = vi.mocked(fs.default.promises.access) - // Test & Assert - await expect(countFileLines("non-existent-file.txt")).rejects.toThrow("File not found") +describe("line-counter", () => { + beforeEach(() => { + vi.clearAllMocks() }) - it("should return the correct line count for a file", async () => { - // Setup - ;(fs.promises.access as Mock).mockResolvedValueOnce(undefined) - - const mockEventEmitter = { - on: vitest.fn().mockImplementation(function (this: any, event, callback) { - if (event === "line") { - // Simulate 10 lines - for (let i = 0; i < 10; i++) { - callback() - } - } - if (event === "close") { - callback() - } - return this - }), - } - - const mockReadStream = { - on: vitest.fn().mockImplementation(function (this: any, _event, _callback) { - return this - }), - } - - const { createReadStream } = await import("fs") - vitest.mocked(createReadStream).mockReturnValueOnce(mockReadStream as any) - const readline = await import("readline") - vitest.mocked(readline.createInterface).mockReturnValueOnce(mockEventEmitter as any) - - // Test - const result = await countFileLines("test-file.txt") - - // Assert - expect(result).toBe(10) - expect(fs.promises.access).toHaveBeenCalledWith("test-file.txt", fs.constants.F_OK) - expect(createReadStream).toHaveBeenCalledWith("test-file.txt") + describe("countFileLinesAndTokens", () => { + it("should count lines and tokens without budget limit", async () => { + // Create a proper readable stream + const mockStream = new Readable({ + read() { + this.push("line1\n") + this.push("line2\n") + this.push("line3\n") + this.push(null) // End of stream + }, + }) + + mockCreateReadStream.mockReturnValue(mockStream as any) + mockFsAccess.mockResolvedValue(undefined) + + // Mock token counting - simulate ~10 tokens per chunk + mockCountTokens.mockResolvedValue(30) + + const result = await countFileLinesAndTokens("/test/file.txt") + + expect(result.lineCount).toBe(3) + expect(result.tokenEstimate).toBe(30) + expect(result.complete).toBe(true) + }) + + it("should handle tokenizer errors with conservative estimate", async () => { + // Create a proper readable stream + const mockStream = new Readable({ + read() { + this.push("line1\n") + this.push(null) + }, + }) + + mockCreateReadStream.mockReturnValue(mockStream as any) + mockFsAccess.mockResolvedValue(undefined) + + // Simulate tokenizer error + mockCountTokens.mockRejectedValue(new Error("unreachable")) + + const result = await countFileLinesAndTokens("/test/file.txt") + + // Should still complete with conservative token estimate (content.length) + expect(result.lineCount).toBe(1) + expect(result.tokenEstimate).toBeGreaterThan(0) + expect(result.complete).toBe(true) + }) + + it("should throw error for non-existent files", async () => { + mockFsAccess.mockRejectedValue(new Error("ENOENT")) + + await expect(countFileLinesAndTokens("/nonexistent/file.txt")).rejects.toThrow("File not found") + }) }) - it("should handle files with no lines", async () => { - // Setup - ;(fs.promises.access as Mock).mockResolvedValueOnce(undefined) - - const mockEventEmitter = { - on: vitest.fn().mockImplementation(function (this: any, event, callback) { - if (event === "close") { - callback() - } - return this - }), - } - - const mockReadStream = { - on: vitest.fn().mockImplementation(function (this: any, _event, _callback) { - return this - }), - } - - const { createReadStream } = await import("fs") - vitest.mocked(createReadStream).mockReturnValueOnce(mockReadStream as any) - const readline = await import("readline") - vitest.mocked(readline.createInterface).mockReturnValueOnce(mockEventEmitter as any) - - // Test - const result = await countFileLines("empty-file.txt") - - // Assert - expect(result).toBe(0) - }) + describe("countFileLines", () => { + it("should throw error for non-existent files", async () => { + mockFsAccess.mockRejectedValue(new Error("ENOENT")) - it("should handle errors during reading", async () => { - // Setup - ;(fs.promises.access as Mock).mockResolvedValueOnce(undefined) - - const mockEventEmitter = { - on: vitest.fn().mockImplementation(function (this: any, event, callback) { - if (event === "error" && callback) { - callback(new Error("Read error")) - } - return this - }), - } - - const mockReadStream = { - on: vitest.fn().mockImplementation(function (this: any, _event, _callback) { - return this - }), - } - - const { createReadStream } = await import("fs") - vitest.mocked(createReadStream).mockReturnValueOnce(mockReadStream as any) - const readline = await import("readline") - vitest.mocked(readline.createInterface).mockReturnValueOnce(mockEventEmitter as any) - - // Test & Assert - await expect(countFileLines("error-file.txt")).rejects.toThrow("Read error") + await expect(countFileLines("/nonexistent/file.txt")).rejects.toThrow("File not found") + }) }) }) diff --git a/src/integrations/misc/extract-text.ts b/src/integrations/misc/extract-text.ts index 8231c609be7..bafa7a5bab1 100644 --- a/src/integrations/misc/extract-text.ts +++ b/src/integrations/misc/extract-text.ts @@ -163,7 +163,16 @@ export function stripLineNumbers(content: string, aggressive: boolean = false): // Join back with original line endings (carriage return (\r) + line feed (\n) or just line feed (\n)) const lineEnding = content.includes("\r\n") ? "\r\n" : "\n" - return processedLines.join(lineEnding) + let result = processedLines.join(lineEnding) + + // Preserve trailing newline if present in original content + if (content.endsWith(lineEnding)) { + if (!result.endsWith(lineEnding)) { + result += lineEnding + } + } + + return result } /** diff --git a/src/integrations/misc/line-counter.ts b/src/integrations/misc/line-counter.ts index c59736f1bee..d066d565e88 100644 --- a/src/integrations/misc/line-counter.ts +++ b/src/integrations/misc/line-counter.ts @@ -1,5 +1,7 @@ import fs, { createReadStream } from "fs" import { createInterface } from "readline" +import { countTokens } from "../../utils/countTokens" +import { Anthropic } from "@anthropic-ai/sdk" /** * Efficiently counts lines in a file using streams without loading the entire file into memory @@ -41,3 +43,125 @@ export async function countFileLines(filePath: string): Promise { }) }) } + +export interface LineAndTokenCountResult { + /** Total number of lines counted */ + lineCount: number + /** Estimated token count */ + tokenEstimate: number + /** Whether the full file was scanned (false if early exit occurred) */ + complete: boolean +} + +export interface LineAndTokenCountOptions { + /** Maximum tokens allowed before early exit. If undefined, scans entire file */ + budgetTokens?: number + /** Number of lines to buffer before running token estimation (default: 256) */ + chunkLines?: number +} + +/** + * Efficiently counts lines and estimates tokens in a file using streams with incremental token estimation. + * Processes file in chunks to avoid memory issues and can early-exit when budget is exceeded. + * + * @param filePath - Path to the file to analyze + * @param options - Configuration options for counting + * @returns A promise that resolves to line count, token estimate, and completion status + */ +export async function countFileLinesAndTokens( + filePath: string, + options: LineAndTokenCountOptions = {}, +): Promise { + const { budgetTokens, chunkLines = 256 } = options + + // Check if file exists + try { + await fs.promises.access(filePath, fs.constants.F_OK) + } catch (error) { + throw new Error(`File not found: ${filePath}`) + } + + return new Promise((resolve, reject) => { + let lineCount = 0 + let tokenEstimate = 0 + let lineBuffer: string[] = [] + let complete = true + let isProcessing = false + let shouldClose = false + + const readStream = createReadStream(filePath) + const rl = createInterface({ + input: readStream, + crlfDelay: Infinity, + }) + + const processBuffer = async () => { + if (lineBuffer.length === 0) return + + const bufferText = lineBuffer.join("\n") + lineBuffer = [] // Clear buffer before processing + + try { + const contentBlocks: Anthropic.Messages.ContentBlockParam[] = [{ type: "text", text: bufferText }] + const chunkTokens = await countTokens(contentBlocks) + tokenEstimate += chunkTokens + } catch (error) { + // On tokenizer error, use conservative estimate: 2 char ≈ 1 token + tokenEstimate += Math.ceil(bufferText.length / 2) + } + + // Check if we've exceeded budget + if (budgetTokens !== undefined && tokenEstimate > budgetTokens) { + complete = false + shouldClose = true + rl.close() + readStream.destroy() + } + } + + rl.on("line", (line) => { + lineCount++ + lineBuffer.push(line) + + // Process buffer when it reaches chunk size + if (lineBuffer.length >= chunkLines && !isProcessing) { + isProcessing = true + rl.pause() + processBuffer() + .then(() => { + isProcessing = false + if (!shouldClose) { + rl.resume() + } + }) + .catch((err) => { + isProcessing = false + reject(err) + }) + } + }) + + rl.on("close", async () => { + // Wait for any ongoing processing to complete + while (isProcessing) { + await new Promise((r) => setTimeout(r, 10)) + } + + // Process any remaining lines in buffer + try { + await processBuffer() + resolve({ lineCount, tokenEstimate, complete }) + } catch (err) { + reject(err) + } + }) + + rl.on("error", (err) => { + reject(err) + }) + + readStream.on("error", (err) => { + reject(err) + }) + }) +} diff --git a/src/package.json b/src/package.json index cba88edfefb..553f5ff1711 100644 --- a/src/package.json +++ b/src/package.json @@ -463,10 +463,10 @@ }, { "command": "kilo-code.addToContext", - "key": "cmd+y", - "mac": "cmd+y", - "win": "ctrl+y", - "linux": "ctrl+y", + "key": "cmd+k cmd+a", + "mac": "cmd+k cmd+a", + "win": "ctrl+k ctrl+a", + "linux": "ctrl+k ctrl+a", "when": "editorTextFocus && editorHasSelection" }, { @@ -573,6 +573,13 @@ "default": "", "description": "%settings.autoImportSettingsPath.description%" }, + "kilo-code.maximumIndexedFilesForFileSearch": { + "type": "number", + "default": 10000, + "minimum": 5000, + "maximum": 500000, + "description": "%settings.maximumIndexedFilesForFileSearch.description%" + }, "kilo-code.useAgentRules": { "type": "boolean", "default": true, diff --git a/src/package.nls.ar.json b/src/package.nls.ar.json index 62ed7076e43..839b1a287e2 100644 --- a/src/package.nls.ar.json +++ b/src/package.nls.ar.json @@ -42,6 +42,7 @@ "settings.customStoragePath.description": "مسار تخزين مخصص. خله فاضي عشان يستخدم المسار الافتراضي. يدعم المسارات المطلقة (مثال: 'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "فعل الإصلاحات السريعة في Kilo Code", "settings.autoImportSettingsPath.description": "مسار ملف تكوين Kilo Code للاستيراد التلقائي عند بدء تشغيل الإضافة. يدعم المسارات المطلقة والمسارات النسبية لدليل المنزل (مثل '~/Documents/kilo-code-settings.json'). اتركه فارغًا لتعطيل الاستيراد التلقائي.", + "settings.maximumIndexedFilesForFileSearch.description": "الحد الأقصى لعدد الملفات المراد فهرستها لميزة البحث عن الملفات @. القيم الأعلى توفر نتائج بحث أفضل في المشاريع الكبيرة لكن قد تستخدم ذاكرة أكثر. الافتراضي: 10,000.", "settings.useAgentRules.description": "تمكين تحميل ملفات AGENTS.md للقواعد الخاصة بالوكيل (انظر https://agent-rules.org/)", "settings.apiRequestTimeout.description": "الحد الأقصى للوقت بالثواني لانتظار استجابات API (0 = بدون مهلة، 1-86400 ثانية، افتراضي: 600 ثانية). القيم الأعلى موصى بها لمقدمي الخدمة المحليين مثل LM Studio و Ollama الذين قد يحتاجون وقت معالجة أكثر.", "settings.newTaskRequireTodos.description": "مطالبة بمعامل todos عند إنشاء مهام جديدة باستخدام أداة new_task", diff --git a/src/package.nls.ca.json b/src/package.nls.ca.json index 5e7909f177c..5c14517a52b 100644 --- a/src/package.nls.ca.json +++ b/src/package.nls.ca.json @@ -42,6 +42,7 @@ "settings.customStoragePath.description": "Ruta d'emmagatzematge personalitzada. Deixeu-la buida per utilitzar la ubicació predeterminada. Admet rutes absolutes (p. ex. 'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "Habilitar correccions ràpides de Kilo Code.", "settings.autoImportSettingsPath.description": "Ruta a un fitxer de configuració de Kilo Code per importar automàticament en iniciar l'extensió. Admet rutes absolutes i rutes relatives al directori d'inici (per exemple, '~/Documents/kilo-code-settings.json'). Deixeu-ho en blanc per desactivar la importació automàtica.", + "settings.maximumIndexedFilesForFileSearch.description": "Nombre màxim de fitxers per indexar per a la funció de cerca de fitxers @. Valors més alts proporcionen millors resultats de cerca en projectes grans però poden utilitzar més memòria. Per defecte: 10.000.", "settings.useAgentRules.description": "Activa la càrrega de fitxers AGENTS.md per a regles específiques de l'agent (vegeu https://agent-rules.org/)", "settings.apiRequestTimeout.description": "Temps màxim en segons per esperar les respostes de l'API (0 = sense temps d'espera, 1-86400s, per defecte: 600s). Es recomanen valors més alts per a proveïdors locals com LM Studio i Ollama que poden necessitar més temps de processament.", "settings.newTaskRequireTodos.description": "Requerir el paràmetre de tasques pendents quan es creïn noves tasques amb l'eina new_task", diff --git a/src/package.nls.cs.json b/src/package.nls.cs.json index c4ee07d4cf7..6f1597131cb 100644 --- a/src/package.nls.cs.json +++ b/src/package.nls.cs.json @@ -42,6 +42,7 @@ "command.importSettings.title": "Importovat nastavení", "settings.enableCodeActions.description": "Povolit rychlé opravy Kilo Code", "settings.autoImportSettingsPath.description": "Cesta k konfiguračnímu souboru Kilo Code pro automatický import při spuštění rozšíření. Podporuje absolutní cesty a cesty relativní k domovskému adresáři (např. '~/Documents/kilo-code-settings.json'). Ponechte prázdné pro zakázání automatického importu.", + "settings.maximumIndexedFilesForFileSearch.description": "Maximální počet souborů k indexování pro funkci vyhledávání souborů @. Vyšší hodnoty poskytují lepší výsledky vyhledávání ve velkých projektech, ale mohou využívat více paměti. Výchozí: 10 000.", "settings.useAgentRules.description": "Povolit načítání souborů AGENTS.md pro pravidla specifická pro agenty (viz https://agent-rules.org/)", "settings.apiRequestTimeout.description": "Maximální doba v sekundách čekání na odpovědi API (0 = bez časového limitu, 1-86400s, výchozí: 600s). Vyšší hodnoty se doporučují pro lokální poskytovatele jako LM Studio a Ollama, kteří mohou potřebovat více času na zpracování.", "settings.newTaskRequireTodos.description": "Vyžadovat parametr todos při vytváření nových úkolů pomocí nástroje new_task", diff --git a/src/package.nls.de.json b/src/package.nls.de.json index 874448c9874..5936eb9fa02 100644 --- a/src/package.nls.de.json +++ b/src/package.nls.de.json @@ -42,6 +42,7 @@ "settings.customStoragePath.description": "Benutzerdefinierter Speicherpfad. Leer lassen, um den Standardspeicherort zu verwenden. Unterstützt absolute Pfade (z.B. 'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "Kilo Code Schnelle Problembehebung aktivieren.", "settings.autoImportSettingsPath.description": "Pfad zu einer Kilo Code-Konfigurationsdatei, die beim Start der Erweiterung automatisch importiert wird. Unterstützt absolute Pfade und Pfade relativ zum Home-Verzeichnis (z.B. '~/Documents/kilo-code-settings.json'). Leer lassen, um den automatischen Import zu deaktivieren.", + "settings.maximumIndexedFilesForFileSearch.description": "Maximale Anzahl der zu indizierenden Dateien für die @-Dateisuchfunktion. Höhere Werte bieten bessere Suchergebnisse in großen Projekten, können aber mehr Speicher verbrauchen. Standard: 10.000.", "settings.useAgentRules.description": "Aktiviert das Laden von AGENTS.md-Dateien für agentenspezifische Regeln (siehe https://agent-rules.org/)", "settings.apiRequestTimeout.description": "Maximale Wartezeit in Sekunden auf API-Antworten (0 = kein Timeout, 1-86400s, Standard: 600s). Höhere Werte werden für lokale Anbieter wie LM Studio und Ollama empfohlen, die möglicherweise mehr Verarbeitungszeit benötigen.", "settings.newTaskRequireTodos.description": "Todos-Parameter beim Erstellen neuer Aufgaben mit dem new_task-Tool erfordern", diff --git a/src/package.nls.es.json b/src/package.nls.es.json index b9d8ba041cd..177acbbb16e 100644 --- a/src/package.nls.es.json +++ b/src/package.nls.es.json @@ -42,6 +42,7 @@ "settings.customStoragePath.description": "Ruta de almacenamiento personalizada. Dejar vacío para usar la ubicación predeterminada. Admite rutas absolutas (ej. 'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "Habilitar correcciones rápidas de Kilo Code.", "settings.autoImportSettingsPath.description": "Ruta a un archivo de configuración de Kilo Code para importar automáticamente al iniciar la extensión. Admite rutas absolutas y rutas relativas al directorio de inicio (por ejemplo, '~/Documents/kilo-code-settings.json'). Dejar vacío para desactivar la importación automática.", + "settings.maximumIndexedFilesForFileSearch.description": "Número máximo de archivos a indexar para la función de búsqueda de archivos @. Valores más altos proporcionan mejores resultados de búsqueda en proyectos grandes pero pueden usar más memoria. Por defecto: 10.000.", "settings.useAgentRules.description": "Habilita la carga de archivos AGENTS.md para reglas específicas del agente (ver https://agent-rules.org/)", "settings.apiRequestTimeout.description": "Tiempo máximo en segundos de espera para las respuestas de la API (0 = sin tiempo de espera, 1-86400s, por defecto: 600s). Se recomiendan valores más altos para proveedores locales como LM Studio y Ollama que puedan necesitar más tiempo de procesamiento.", "settings.newTaskRequireTodos.description": "Requerir el parámetro todos al crear nuevas tareas con la herramienta new_task", diff --git a/src/package.nls.fr.json b/src/package.nls.fr.json index 76e15bab53f..01cb0cea346 100644 --- a/src/package.nls.fr.json +++ b/src/package.nls.fr.json @@ -42,6 +42,7 @@ "settings.customStoragePath.description": "Chemin de stockage personnalisé. Laisser vide pour utiliser l'emplacement par défaut. Prend en charge les chemins absolus (ex: 'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "Activer les correctifs rapides de Kilo Code.", "settings.autoImportSettingsPath.description": "Chemin d'accès à un fichier de configuration Kilo Code à importer automatiquement au démarrage de l'extension. Prend en charge les chemins absolus et les chemins relatifs au répertoire de base (par exemple, '~/Documents/kilo-code-settings.json'). Laisser vide pour désactiver l'importation automatique.", + "settings.maximumIndexedFilesForFileSearch.description": "Nombre maximum de fichiers à indexer pour la fonctionnalité de recherche de fichiers @. Des valeurs plus élevées offrent de meilleurs résultats de recherche dans les grands projets mais peuvent consommer plus de mémoire. Par défaut : 10 000.", "settings.useAgentRules.description": "Activer le chargement des fichiers AGENTS.md pour les règles spécifiques à l'agent (voir https://agent-rules.org/)", "settings.apiRequestTimeout.description": "Temps maximum en secondes d'attente pour les réponses de l'API (0 = pas de timeout, 1-86400s, par défaut : 600s). Des valeurs plus élevées sont recommandées pour les fournisseurs locaux comme LM Studio et Ollama qui peuvent nécessiter plus de temps de traitement.", "settings.newTaskRequireTodos.description": "Exiger le paramètre todos lors de la création de nouvelles tâches avec l'outil new_task", diff --git a/src/package.nls.hi.json b/src/package.nls.hi.json index 1869fe31bb1..c7500781434 100644 --- a/src/package.nls.hi.json +++ b/src/package.nls.hi.json @@ -42,6 +42,7 @@ "settings.customStoragePath.description": "कस्टम स्टोरेज पाथ। डिफ़ॉल्ट स्थान का उपयोग करने के लिए खाली छोड़ें। पूर्ण पथ का समर्थन करता है (उदा. 'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "Kilo Code त्वरित सुधार सक्षम करें", "settings.autoImportSettingsPath.description": "Kilo Code कॉन्फ़िगरेशन फ़ाइल का पथ जिसे एक्सटेंशन स्टार्टअप पर स्वचालित रूप से आयात किया जाएगा। होम डायरेक्टरी के सापेक्ष पूर्ण पथ और पथों का समर्थन करता है (उदाहरण के लिए '~/Documents/kilo-code-settings.json')। ऑटो-इंपोर्ट को अक्षम करने के लिए खाली छोड़ दें।", + "settings.maximumIndexedFilesForFileSearch.description": "@ फ़ाइल खोज सुविधा के लिए अनुक्रमित करने के लिए फ़ाइलों की अधिकतम संख्या। उच्च मान बड़ी परियोजनाओं में बेहतर खोज परिणाम प्रदान करते हैं लेकिन अधिक मेमोरी का उपयोग कर सकते हैं। डिफ़ॉल्ट: 10,000।", "settings.useAgentRules.description": "एजेंट-विशिष्ट नियमों के लिए AGENTS.md फ़ाइलों को लोड करना सक्षम करें (देखें https://agent-rules.org/)", "settings.apiRequestTimeout.description": "एपीआई प्रतिक्रियाओं की प्रतीक्षा करने के लिए सेकंड में अधिकतम समय (0 = कोई टाइमआउट नहीं, 1-86400s, डिफ़ॉल्ट: 600s)। एलएम स्टूडियो और ओलामा जैसे स्थानीय प्रदाताओं के लिए उच्च मानों की सिफारिश की जाती है जिन्हें अधिक प्रसंस्करण समय की आवश्यकता हो सकती है।", "settings.newTaskRequireTodos.description": "new_task टूल के साथ नए कार्य बनाते समय टूडू पैरामीटर की आवश्यकता होती है", diff --git a/src/package.nls.id.json b/src/package.nls.id.json index 1f02c7603dc..f5c536ca839 100644 --- a/src/package.nls.id.json +++ b/src/package.nls.id.json @@ -40,6 +40,7 @@ "settings.customStoragePath.description": "Path penyimpanan kustom. Biarkan kosong untuk menggunakan lokasi default. Mendukung path absolut (misalnya 'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "Aktifkan perbaikan cepat Kilo Code.", "settings.autoImportSettingsPath.description": "Path ke file konfigurasi Kilo Code untuk diimpor secara otomatis saat ekstensi dimulai. Mendukung path absolut dan path relatif terhadap direktori home (misalnya '~/Documents/kilo-code-settings.json'). Biarkan kosong untuk menonaktifkan impor otomatis.", + "settings.maximumIndexedFilesForFileSearch.description": "Jumlah maksimum file yang akan diindeks untuk fitur pencarian file @. Nilai yang lebih besar memberikan hasil pencarian yang lebih baik di proyek besar tetapi mungkin menggunakan lebih banyak memori. Default: 10.000.", "settings.useAgentRules.description": "Aktifkan pemuatan file AGENTS.md untuk aturan khusus agen (lihat https://agent-rules.org/)", "settings.apiRequestTimeout.description": "Waktu maksimum dalam detik untuk menunggu respons API (0 = tidak ada batas waktu, 1-86400s, default: 600s). Nilai yang lebih tinggi disarankan untuk penyedia lokal seperti LM Studio dan Ollama yang mungkin memerlukan lebih banyak waktu pemrosesan.", "settings.newTaskRequireTodos.description": "Memerlukan parameter todos saat membuat tugas baru dengan alat new_task", diff --git a/src/package.nls.it.json b/src/package.nls.it.json index 95bb3a8fdfe..ce74d78902b 100644 --- a/src/package.nls.it.json +++ b/src/package.nls.it.json @@ -42,6 +42,7 @@ "settings.customStoragePath.description": "Percorso di archiviazione personalizzato. Lasciare vuoto per utilizzare la posizione predefinita. Supporta percorsi assoluti (es. 'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "Abilita correzioni rapide di Kilo Code.", "settings.autoImportSettingsPath.description": "Percorso di un file di configurazione di Kilo Code da importare automaticamente all'avvio dell'estensione. Supporta percorsi assoluti e percorsi relativi alla directory home (ad es. '~/Documents/kilo-code-settings.json'). Lasciare vuoto per disabilitare l'importazione automatica.", + "settings.maximumIndexedFilesForFileSearch.description": "Numero massimo di file da indicizzare per la funzionalità di ricerca file @. Valori più alti forniscono migliori risultati di ricerca in progetti grandi ma possono consumare più memoria. Predefinito: 10.000.", "settings.useAgentRules.description": "Abilita il caricamento dei file AGENTS.md per regole specifiche dell'agente (vedi https://agent-rules.org/)", "settings.apiRequestTimeout.description": "Tempo massimo in secondi di attesa per le risposte API (0 = nessun timeout, 1-86400s, predefinito: 600s). Valori più alti sono consigliati per provider locali come LM Studio e Ollama che potrebbero richiedere più tempo di elaborazione.", "settings.newTaskRequireTodos.description": "Richiedere il parametro todos quando si creano nuove attività con lo strumento new_task", diff --git a/src/package.nls.ja.json b/src/package.nls.ja.json index 491d2b7af0a..ed868ccdc2d 100644 --- a/src/package.nls.ja.json +++ b/src/package.nls.ja.json @@ -51,6 +51,7 @@ "settings.customStoragePath.description": "カスタムストレージパス。デフォルトの場所を使用する場合は空のままにします。絶対パスをサポートします(例:'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "Kilo Codeのクイック修正を有効にする。", "settings.autoImportSettingsPath.description": "拡張機能の起動時に自動的にインポートするKilo Code設定ファイルへのパス。絶対パスとホームディレクトリからの相対パスをサポートします(例:'~/Documents/kilo-code-settings.json')。自動インポートを無効にするには、空のままにします。", + "settings.maximumIndexedFilesForFileSearch.description": "@ファイル検索機能のためにインデックス化するファイルの最大数。大きな値は大規模プロジェクトでより良い検索結果を提供しますが、より多くのメモリを使用する可能性があります。デフォルト: 10,000。", "settings.useAgentRules.description": "エージェント固有のルールのためにAGENTS.mdファイルの読み込みを有効にします(参照:https://agent-rules.org/)", "settings.apiRequestTimeout.description": "API応答を待機する最大時間(秒)(0 = タイムアウトなし、1-86400秒、デフォルト: 600秒)。LM StudioやOllamaのような、より多くの処理時間を必要とする可能性のあるローカルプロバイダーには、より高い値が推奨されます。", "settings.newTaskRequireTodos.description": "new_taskツールで新しいタスクを作成する際にtodosパラメータを必須にする", diff --git a/src/package.nls.json b/src/package.nls.json index e986fc27fb1..cdd8c79c6bb 100644 --- a/src/package.nls.json +++ b/src/package.nls.json @@ -42,6 +42,7 @@ "settings.customStoragePath.description": "Custom storage path. Leave empty to use the default location. Supports absolute paths (e.g. 'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "Enable Kilo Code quick fixes", "settings.autoImportSettingsPath.description": "Path to a Kilo Code configuration file to automatically import on extension startup. Supports absolute paths and paths relative to the home directory (e.g. '~/Documents/kilo-code-settings.json'). Leave empty to disable auto-import.", + "settings.maximumIndexedFilesForFileSearch.description": "Maximum number of files to index for the @ file search feature. Higher values provide better search results in large projects but may use more memory. Default: 10,000.", "settings.useAgentRules.description": "Enable loading of AGENTS.md files for agent-specific rules (see https://agent-rules.org/)", "settings.apiRequestTimeout.description": "Maximum time in seconds to wait for API responses (0 = no timeout, 1-86400s, default: 600s). Higher values are recommended for local providers like LM Studio and Ollama that may need more processing time.", "settings.newTaskRequireTodos.description": "Require todos parameter when creating new tasks with the new_task tool", diff --git a/src/package.nls.ko.json b/src/package.nls.ko.json index 8ae7f21aef3..bb5f114a9c7 100644 --- a/src/package.nls.ko.json +++ b/src/package.nls.ko.json @@ -42,6 +42,7 @@ "settings.customStoragePath.description": "사용자 지정 저장소 경로. 기본 위치를 사용하려면 비워두세요. 절대 경로를 지원합니다 (예: 'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "Kilo Code 빠른 수정 사용 설정", "settings.autoImportSettingsPath.description": "확장 프로그램 시작 시 자동으로 가져올 Kilo Code 구성 파일의 경로입니다. 절대 경로 및 홈 디렉토리에 대한 상대 경로를 지원합니다(예: '~/Documents/kilo-code-settings.json'). 자동 가져오기를 비활성화하려면 비워 둡니다.", + "settings.maximumIndexedFilesForFileSearch.description": "@ 파일 검색 기능을 위해 인덱싱할 최대 파일 수입니다. 더 큰 값은 대형 프로젝트에서 더 나은 검색 결과를 제공하지만 더 많은 메모리를 사용할 수 있습니다. 기본값: 10,000.", "settings.useAgentRules.description": "에이전트별 규칙에 대한 AGENTS.md 파일 로드를 활성화합니다 (참조: https://agent-rules.org/)", "settings.apiRequestTimeout.description": "API 응답을 기다리는 최대 시간(초) (0 = 시간 초과 없음, 1-86400초, 기본값: 600초). 더 많은 처리 시간이 필요할 수 있는 LM Studio 및 Ollama와 같은 로컬 공급자에게는 더 높은 값을 사용하는 것이 좋습니다.", "settings.newTaskRequireTodos.description": "new_task 도구로 새 작업을 생성할 때 todos 매개변수 필요", diff --git a/src/package.nls.nl.json b/src/package.nls.nl.json index 1b8621352dd..fc455803065 100644 --- a/src/package.nls.nl.json +++ b/src/package.nls.nl.json @@ -42,6 +42,7 @@ "settings.customStoragePath.description": "Aangepast opslagpad. Laat leeg om de standaardlocatie te gebruiken. Ondersteunt absolute paden (bijv. 'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "Snelle correcties van Kilo Code inschakelen.", "settings.autoImportSettingsPath.description": "Pad naar een Kilo Code-configuratiebestand om automatisch te importeren bij het opstarten van de extensie. Ondersteunt absolute paden en paden ten opzichte van de thuismap (bijv. '~/Documents/kilo-code-settings.json'). Laat leeg om automatisch importeren uit te schakelen.", + "settings.maximumIndexedFilesForFileSearch.description": "Maximaal aantal bestanden om te indexeren voor de @ bestandszoekfunctie. Hogere waarden bieden betere zoekresultaten in grote projecten maar kunnen meer geheugen gebruiken. Standaard: 10.000.", "settings.useAgentRules.description": "Laden van AGENTS.md-bestanden voor agentspecifieke regels inschakelen (zie https://agent-rules.org/)", "settings.apiRequestTimeout.description": "Maximale tijd in seconden om te wachten op API-reacties (0 = geen time-out, 1-86400s, standaard: 600s). Hogere waarden worden aanbevolen voor lokale providers zoals LM Studio en Ollama die mogelijk meer verwerkingstijd nodig hebben.", "settings.newTaskRequireTodos.description": "Todos-parameter vereisen bij het maken van nieuwe taken met de new_task tool", diff --git a/src/package.nls.pl.json b/src/package.nls.pl.json index 6f43fab6b2d..85cedf9e29a 100644 --- a/src/package.nls.pl.json +++ b/src/package.nls.pl.json @@ -42,6 +42,7 @@ "settings.customStoragePath.description": "Niestandardowa ścieżka przechowywania. Pozostaw puste, aby użyć domyślnej lokalizacji. Obsługuje ścieżki bezwzględne (np. 'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "Włącz szybkie poprawki Kilo Code.", "settings.autoImportSettingsPath.description": "Ścieżka do pliku konfiguracyjnego Kilo Code, który ma być automatycznie importowany podczas uruchamiania rozszerzenia. Obsługuje ścieżki bezwzględne i ścieżki względne do katalogu domowego (np. '~/Documents/kilo-code-settings.json'). Pozostaw puste, aby wyłączyć automatyczne importowanie.", + "settings.maximumIndexedFilesForFileSearch.description": "Maksymalna liczba plików do indeksowania dla funkcji wyszukiwania plików @. Wyższe wartości zapewniają lepsze wyniki wyszukiwania w dużych projektach, ale mogą zużywać więcej pamięci. Domyślnie: 10 000.", "settings.useAgentRules.description": "Włącz wczytywanie plików AGENTS.md dla reguł specyficznych dla agenta (zobacz https://agent-rules.org/)", "settings.apiRequestTimeout.description": "Maksymalny czas w sekundach oczekiwania na odpowiedzi API (0 = brak limitu czasu, 1-86400s, domyślnie: 600s). Wyższe wartości są zalecane dla lokalnych dostawców, takich jak LM Studio i Ollama, którzy mogą potrzebować więcej czasu na przetwarzanie.", "settings.newTaskRequireTodos.description": "Wymagaj parametru todos podczas tworzenia nowych zadań za pomocą narzędzia new_task", diff --git a/src/package.nls.pt-BR.json b/src/package.nls.pt-BR.json index 7fee464a2f2..4227768d550 100644 --- a/src/package.nls.pt-BR.json +++ b/src/package.nls.pt-BR.json @@ -42,6 +42,7 @@ "settings.customStoragePath.description": "Caminho de armazenamento personalizado. Deixe vazio para usar o local padrão. Suporta caminhos absolutos (ex: 'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "Habilitar correções rápidas do Kilo Code.", "settings.autoImportSettingsPath.description": "Caminho para um arquivo de configuração do Kilo Code para importar automaticamente na inicialização da extensão. Suporta caminhos absolutos e caminhos relativos ao diretório inicial (por exemplo, '~/Documents/kilo-code-settings.json'). Deixe em branco para desativar a importação automática.", + "settings.maximumIndexedFilesForFileSearch.description": "Número máximo de arquivos a indexar para a funcionalidade de busca de arquivos @. Valores maiores fornecem melhores resultados de busca em projetos grandes, mas podem consumir mais memória. Padrão: 10.000.", "settings.useAgentRules.description": "Habilita o carregamento de arquivos AGENTS.md para regras específicas do agente (consulte https://agent-rules.org/)", "settings.apiRequestTimeout.description": "Tempo máximo em segundos de espera pelas respostas da API (0 = sem tempo limite, 1-86400s, padrão: 600s). Valores mais altos são recomendados para provedores locais como LM Studio e Ollama que podem precisar de mais tempo de processamento.", "settings.newTaskRequireTodos.description": "Exigir parâmetro todos ao criar novas tarefas com a ferramenta new_task", diff --git a/src/package.nls.ru.json b/src/package.nls.ru.json index 8cba126760e..f227887ffa2 100644 --- a/src/package.nls.ru.json +++ b/src/package.nls.ru.json @@ -42,6 +42,7 @@ "settings.customStoragePath.description": "Пользовательский путь хранения. Оставьте пустым для использования пути по умолчанию. Поддерживает абсолютные пути (например, 'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "Включить быстрые исправления Kilo Code.", "settings.autoImportSettingsPath.description": "Путь к файлу конфигурации Kilo Code для автоматического импорта при запуске расширения. Поддерживает абсолютные пути и пути относительно домашнего каталога (например, '~/Documents/kilo-code-settings.json'). Оставьте пустым, чтобы отключить автоматический импорт.", + "settings.maximumIndexedFilesForFileSearch.description": "Максимальное количество файлов для индексации при поиске файлов @. Большие значения обеспечивают лучшие результаты поиска в крупных проектах, но могут потреблять больше памяти. По умолчанию: 10 000.", "settings.useAgentRules.description": "Включить загрузку файлов AGENTS.md для специфичных для агента правил (см. https://agent-rules.org/)", "settings.apiRequestTimeout.description": "Максимальное время в секундах для ожидания ответов API (0 = нет тайм-аута, 1-86400 с, по умолчанию: 600 с). Рекомендуются более высокие значения для локальных провайдеров, таких как LM Studio и Ollama, которым может потребоваться больше времени на обработку.", "settings.newTaskRequireTodos.description": "Требовать параметр todos при создании новых задач с помощью инструмента new_task", diff --git a/src/package.nls.th.json b/src/package.nls.th.json index ad472cba879..1602e7cdafc 100644 --- a/src/package.nls.th.json +++ b/src/package.nls.th.json @@ -42,6 +42,7 @@ "command.importSettings.title": "นำเข้าการตั้งค่า", "settings.enableCodeActions.description": "เปิดใช้งานการแก้ไขด่วนของ Kilo Code", "settings.autoImportSettingsPath.description": "เส้นทางไปยังไฟล์กำหนดค่า Kilo Code ที่จะนำเข้าโดยอัตโนมัติเมื่อเริ่มต้นส่วนขยาย รองรับเส้นทางแบบสัมบูรณ์และเส้นทางที่สัมพันธ์กับไดเรกทอรีหลัก (เช่น '~/Documents/kilo-code-settings.json') เว้นว่างไว้เพื่อปิดใช้งานการนำเข้าอัตโนมัติ", + "settings.maximumIndexedFilesForFileSearch.description": "จำนวนไฟล์สูงสุดที่จะสร้างดัชนีสำหรับฟีเจอร์ค้นหาไฟล์ @ ค่าที่สูงขึ้นให้ผลลัพธ์การค้นหาที่ดีขึ้นในโปรเจกต์ขนาดใหญ่ แต่อาจใช้หน่วยความจำมากขึ้น ค่าเริ่มต้น: 10,000", "settings.useAgentRules.description": "เปิดใช้งานการโหลดไฟล์ AGENTS.md สำหรับกฎเฉพาะตัวแทน (ดู https://agent-rules.org/)", "settings.apiRequestTimeout.description": "เวลาสูงสุดเป็นวินาทีที่จะรอการตอบสนอง API (0 = ไม่มีการหมดเวลา, 1-86400 วินาที, ค่าเริ่มต้น: 600 วินาที) แนะนำค่าที่สูงขึ้นสำหรับผู้ให้บริการในเครื่องเช่น LM Studio และ Ollama ที่อาจต้องการเวลาในการประมวลผลมากขึ้น", "settings.newTaskRequireTodos.description": "ต้องการพารามิเตอร์ todos เมื่อสร้างงานใหม่ด้วยเครื่องมือ new_task", diff --git a/src/package.nls.tr.json b/src/package.nls.tr.json index 8673713d9d5..d483220459a 100644 --- a/src/package.nls.tr.json +++ b/src/package.nls.tr.json @@ -42,6 +42,7 @@ "settings.customStoragePath.description": "Özel depolama yolu. Varsayılan konumu kullanmak için boş bırakın. Mutlak yolları destekler (örn: 'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "Kilo Code hızlı düzeltmeleri etkinleştir.", "settings.autoImportSettingsPath.description": "Uzantı başlangıcında otomatik olarak içe aktarılacak bir Kilo Code yapılandırma dosyasının yolu. Mutlak yolları ve ana dizine göreli yolları destekler (ör. '~/Documents/kilo-code-settings.json'). Otomatik içe aktarmayı devre dışı bırakmak için boş bırakın.", + "settings.maximumIndexedFilesForFileSearch.description": "@ dosya arama özelliği için dizinlenecek maksimum dosya sayısı. Daha yüksek değerler büyük projelerde daha iyi arama sonuçları sağlar ancak daha fazla bellek kullanabilir. Varsayılan: 10.000.", "settings.useAgentRules.description": "Aracıya özgü kurallar için AGENTS.md dosyalarının yüklenmesini etkinleştirin (bkz. https://agent-rules.org/)", "settings.apiRequestTimeout.description": "API yanıtları için beklenecek maksimum süre (saniye cinsinden) (0 = zaman aşımı yok, 1-86400s, varsayılan: 600s). LM Studio ve Ollama gibi daha fazla işlem süresi gerektirebilecek yerel sağlayıcılar için daha yüksek değerler önerilir.", "settings.codeIndex.embeddingBatchSize.description": "Kod indeksleme sırasında gömme işlemleri için toplu iş boyutu. Bunu API sağlayıcınızın sınırlarına göre ayarlayın. Varsayılan 60'tır.", diff --git a/src/package.nls.uk.json b/src/package.nls.uk.json index d4a09ffd36d..83500e00af7 100644 --- a/src/package.nls.uk.json +++ b/src/package.nls.uk.json @@ -42,6 +42,7 @@ "command.importSettings.title": "Імпортувати налаштування", "settings.enableCodeActions.description": "Увімкнути швидкі виправлення Kilo Code.", "settings.autoImportSettingsPath.description": "Шлях до файлу конфігурації Kilo Code для автоматичного імпорту під час запуску розширення. Підтримує абсолютні шляхи та шляхи відносно домашнього каталогу (наприклад, '~/Documents/kilo-code-settings.json'). Залиште порожнім, щоб вимкнути автоматичний імпорт.", + "settings.maximumIndexedFilesForFileSearch.description": "Максимальна кількість файлів для індексації для функції пошуку файлів @. Вищі значення забезпечують кращі результати пошуку у великих проектах, але можуть використовувати більше пам'яті. За замовчуванням: 10,000.", "settings.useAgentRules.description": "Увімкнути завантаження файлів AGENTS.md для правил, специфічних для агента (див. https://agent-rules.org/)", "settings.apiRequestTimeout.description": "Максимальний час у секундах очікування відповідей API (0 = без тайм-ауту, 1-86400с, за замовчуванням: 600с). Вищі значення рекомендуються для локальних провайдерів, таких як LM Studio та Ollama, яким може знадобитися більше часу для обробки.", "settings.newTaskRequireTodos.description": "Вимагати параметр todos при створенні нових завдань за допомогою інструменту new_task", diff --git a/src/package.nls.vi.json b/src/package.nls.vi.json index 1d67e815236..81ab54f0c06 100644 --- a/src/package.nls.vi.json +++ b/src/package.nls.vi.json @@ -42,6 +42,7 @@ "settings.customStoragePath.description": "Đường dẫn lưu trữ tùy chỉnh. Để trống để sử dụng vị trí mặc định. Hỗ trợ đường dẫn tuyệt đối (ví dụ: 'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "Bật sửa lỗi nhanh Kilo Code.", "settings.autoImportSettingsPath.description": "Đường dẫn đến tệp cấu hình Kilo Code để tự động nhập khi khởi động tiện ích mở rộng. Hỗ trợ đường dẫn tuyệt đối và đường dẫn tương đối đến thư mục chính (ví dụ: '~/Documents/kilo-code-settings.json'). Để trống để tắt tính năng tự động nhập.", + "settings.maximumIndexedFilesForFileSearch.description": "Số lượng tệp tối đa để lập chỉ mục cho tính năng tìm kiếm tệp @. Giá trị cao hơn cung cấp kết quả tìm kiếm tốt hơn trong các dự án lớn nhưng có thể sử dụng nhiều bộ nhớ hơn. Mặc định: 10.000.", "settings.useAgentRules.description": "Bật tải tệp AGENTS.md cho các quy tắc dành riêng cho tác nhân (xem https://agent-rules.org/)", "settings.apiRequestTimeout.description": "Thời gian tối đa tính bằng giây để đợi phản hồi API (0 = không có thời gian chờ, 1-86400 giây, mặc định: 600 giây). Nên sử dụng các giá trị cao hơn cho các nhà cung cấp cục bộ như LM Studio và Ollama có thể cần thêm thời gian xử lý.", "settings.newTaskRequireTodos.description": "Yêu cầu tham số todos khi tạo nhiệm vụ mới với công cụ new_task", diff --git a/src/package.nls.zh-CN.json b/src/package.nls.zh-CN.json index 71c220dfdfd..afdf7dfc6bc 100644 --- a/src/package.nls.zh-CN.json +++ b/src/package.nls.zh-CN.json @@ -42,6 +42,7 @@ "settings.customStoragePath.description": "自定义存储路径。留空以使用默认位置。支持绝对路径(例如:'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "启用 Kilo Code 快速修复", "settings.autoImportSettingsPath.description": "Kilo Code 配置文件的路径,用于在扩展启动时自动导入。支持绝对路径和相对于主目录的路径(例如 '~/Documents/kilo-code-settings.json')。留空以禁用自动导入。", + "settings.maximumIndexedFilesForFileSearch.description": "为 @ 文件搜索功能建立索引时要索引的最大文件数。较大的值在大型项目中提供更好的搜索结果,但可能占用更多内存。默认值:10,000。", "settings.useAgentRules.description": "为特定于代理的规则启用 AGENTS.md 文件的加载(请参阅 https://agent-rules.org/)", "settings.apiRequestTimeout.description": "等待 API 响应的最长时间(秒)(0 = 无超时,1-86400秒,默认值:600秒)。对于像 LM Studio 和 Ollama 这样可能需要更多处理时间的本地提供商,建议使用更高的值。", "settings.newTaskRequireTodos.description": "使用 new_task 工具创建新任务时需要 todos 参数", diff --git a/src/package.nls.zh-TW.json b/src/package.nls.zh-TW.json index a2b37104fd1..cd433029fb4 100644 --- a/src/package.nls.zh-TW.json +++ b/src/package.nls.zh-TW.json @@ -42,6 +42,7 @@ "settings.customStoragePath.description": "自訂儲存路徑。留空以使用預設位置。支援絕對路徑(例如:'D:\\KiloCodeStorage')", "settings.enableCodeActions.description": "啟用 Kilo Code 快速修復。", "settings.autoImportSettingsPath.description": "Kilo Code 設定檔案的路徑,用於在擴充功能啟動時自動匯入。支援絕對路徑和相對於主目錄的路徑(例如 '~/Documents/kilo-code-settings.json')。留空以停用自動匯入。", + "settings.maximumIndexedFilesForFileSearch.description": "為 @ 檔案搜尋功能建立索引時要索引的最大檔案數。較大的值在大型專案中提供更好的搜尋結果,但可能佔用更多記憶體。預設值:10,000。", "settings.useAgentRules.description": "為特定於代理的規則啟用 AGENTS.md 檔案的載入(請參閱 https://agent-rules.org/)", "settings.apiRequestTimeout.description": "等待 API 回應的最長時間(秒)(0 = 無超時,1-86400秒,預設值:600秒)。對於像 LM Studio 和 Ollama 這樣可能需要更多處理時間的本地提供商,建議使用更高的值。", "settings.newTaskRequireTodos.description": "使用 new_task 工具建立新工作時需要 todos 參數", diff --git a/src/services/checkpoints/ShadowCheckpointService.ts b/src/services/checkpoints/ShadowCheckpointService.ts index 0caab209a8b..c4c416edf62 100644 --- a/src/services/checkpoints/ShadowCheckpointService.ts +++ b/src/services/checkpoints/ShadowCheckpointService.ts @@ -173,7 +173,7 @@ export abstract class ShadowCheckpointService extends EventEmitter { private async stageAll(git: SimpleGit) { try { - await git.add(".") + await git.add([".", "--ignore-errors"]) } catch (error) { this.log( `[${this.constructor.name}#stageAll] failed to add files to git: ${error instanceof Error ? error.message : String(error)}`, diff --git a/src/services/code-index/__tests__/orchestrator.spec.ts b/src/services/code-index/__tests__/orchestrator.spec.ts new file mode 100644 index 00000000000..aab1ef888d3 --- /dev/null +++ b/src/services/code-index/__tests__/orchestrator.spec.ts @@ -0,0 +1,160 @@ +import { describe, it, expect, beforeEach, vi } from "vitest" +import { CodeIndexOrchestrator } from "../orchestrator" + +// Mock vscode workspace so startIndexing passes workspace check +vi.mock("vscode", () => { + const path = require("path") + const testWorkspacePath = path.join(path.sep, "test", "workspace") + return { + window: { + activeTextEditor: null, + }, + workspace: { + workspaceFolders: [ + { + uri: { fsPath: testWorkspacePath }, + name: "test", + index: 0, + }, + ], + createFileSystemWatcher: vi.fn().mockReturnValue({ + onDidCreate: vi.fn().mockReturnValue({ dispose: vi.fn() }), + onDidChange: vi.fn().mockReturnValue({ dispose: vi.fn() }), + onDidDelete: vi.fn().mockReturnValue({ dispose: vi.fn() }), + dispose: vi.fn(), + }), + }, + RelativePattern: vi.fn().mockImplementation((base: string, pattern: string) => ({ base, pattern })), + } +}) + +// Mock TelemetryService +vi.mock("@roo-code/telemetry", () => ({ + TelemetryService: { + instance: { + captureEvent: vi.fn(), + }, + }, +})) + +// Mock i18n translator used in orchestrator messages +vi.mock("../../i18n", () => ({ + t: (key: string, params?: any) => { + if (key === "embeddings:orchestrator.failedDuringInitialScan" && params?.errorMessage) { + return `Failed during initial scan: ${params.errorMessage}` + } + return key + }, +})) + +describe("CodeIndexOrchestrator - error path cleanup gating", () => { + const workspacePath = "/test/workspace" + + let configManager: any + let stateManager: any + let cacheManager: any + let vectorStore: any + let scanner: any + let fileWatcher: any + + beforeEach(() => { + vi.clearAllMocks() + + configManager = { + isFeatureConfigured: true, + } + + // Minimal state manager that tracks state transitions + let currentState = "Standby" + stateManager = { + get state() { + return currentState + }, + setSystemState: vi.fn().mockImplementation((state: string, _msg: string) => { + currentState = state + }), + reportFileQueueProgress: vi.fn(), + reportBlockIndexingProgress: vi.fn(), + } + + cacheManager = { + clearCacheFile: vi.fn().mockResolvedValue(undefined), + } + + vectorStore = { + initialize: vi.fn(), + hasIndexedData: vi.fn(), + markIndexingIncomplete: vi.fn(), + markIndexingComplete: vi.fn(), + clearCollection: vi.fn().mockResolvedValue(undefined), + } + + scanner = { + scanDirectory: vi.fn(), + } + + fileWatcher = { + initialize: vi.fn().mockResolvedValue(undefined), + onDidStartBatchProcessing: vi.fn().mockReturnValue({ dispose: vi.fn() }), + onBatchProgressUpdate: vi.fn().mockReturnValue({ dispose: vi.fn() }), + onDidFinishBatchProcessing: vi.fn().mockReturnValue({ dispose: vi.fn() }), + dispose: vi.fn(), + } + }) + + it("should not call clearCollection() or clear cache when initialize() fails (indexing not started)", async () => { + // Arrange: fail at initialize() + vectorStore.initialize.mockRejectedValue(new Error("Qdrant unreachable")) + + const orchestrator = new CodeIndexOrchestrator( + configManager, + stateManager, + workspacePath, + cacheManager, + vectorStore, + scanner, + fileWatcher, + ) + + // Act + await orchestrator.startIndexing() + + // Assert + expect(vectorStore.clearCollection).not.toHaveBeenCalled() + expect(cacheManager.clearCacheFile).not.toHaveBeenCalled() + + // Error state should be set + expect(stateManager.setSystemState).toHaveBeenCalled() + const lastCall = stateManager.setSystemState.mock.calls[stateManager.setSystemState.mock.calls.length - 1] + expect(lastCall[0]).toBe("Error") + }) + + it("should call clearCollection() and clear cache when an error occurs after initialize() succeeds (indexing started)", async () => { + // Arrange: initialize succeeds; fail soon after to enter error path with indexingStarted=true + vectorStore.initialize.mockResolvedValue(false) // existing collection + vectorStore.hasIndexedData.mockResolvedValue(false) // force full scan path + vectorStore.markIndexingIncomplete.mockRejectedValue(new Error("mark incomplete failure")) + + const orchestrator = new CodeIndexOrchestrator( + configManager, + stateManager, + workspacePath, + cacheManager, + vectorStore, + scanner, + fileWatcher, + ) + + // Act + await orchestrator.startIndexing() + + // Assert: cleanup gated behind indexingStarted should have happened + expect(vectorStore.clearCollection).toHaveBeenCalledTimes(1) + expect(cacheManager.clearCacheFile).toHaveBeenCalledTimes(1) + + // Error state should be set + expect(stateManager.setSystemState).toHaveBeenCalled() + const lastCall = stateManager.setSystemState.mock.calls[stateManager.setSystemState.mock.calls.length - 1] + expect(lastCall[0]).toBe("Error") + }) +}) diff --git a/src/services/code-index/config-manager.ts b/src/services/code-index/config-manager.ts index 2c0e8bb5c9e..5bc00b6ce35 100644 --- a/src/services/code-index/config-manager.ts +++ b/src/services/code-index/config-manager.ts @@ -20,6 +20,7 @@ export class CodeIndexConfigManager { private geminiOptions?: { apiKey: string } private mistralOptions?: { apiKey: string } private vercelAiGatewayOptions?: { apiKey: string } + private openRouterOptions?: { apiKey: string } private qdrantUrl?: string = "http://localhost:6333" private qdrantApiKey?: string private searchMinScore?: number @@ -71,6 +72,7 @@ export class CodeIndexConfigManager { const geminiApiKey = this.contextProxy?.getSecret("codebaseIndexGeminiApiKey") ?? "" const mistralApiKey = this.contextProxy?.getSecret("codebaseIndexMistralApiKey") ?? "" const vercelAiGatewayApiKey = this.contextProxy?.getSecret("codebaseIndexVercelAiGatewayApiKey") ?? "" + const openRouterApiKey = this.contextProxy?.getSecret("codebaseIndexOpenRouterApiKey") ?? "" // Update instance variables with configuration this.codebaseIndexEnabled = codebaseIndexEnabled ?? true @@ -108,6 +110,8 @@ export class CodeIndexConfigManager { this.embedderProvider = "mistral" } else if (codebaseIndexEmbedderProvider === "vercel-ai-gateway") { this.embedderProvider = "vercel-ai-gateway" + } else if (codebaseIndexEmbedderProvider === "openrouter") { + this.embedderProvider = "openrouter" } else { this.embedderProvider = "openai" } @@ -129,6 +133,7 @@ export class CodeIndexConfigManager { this.geminiOptions = geminiApiKey ? { apiKey: geminiApiKey } : undefined this.mistralOptions = mistralApiKey ? { apiKey: mistralApiKey } : undefined this.vercelAiGatewayOptions = vercelAiGatewayApiKey ? { apiKey: vercelAiGatewayApiKey } : undefined + this.openRouterOptions = openRouterApiKey ? { apiKey: openRouterApiKey } : undefined } /** @@ -147,6 +152,7 @@ export class CodeIndexConfigManager { geminiOptions?: { apiKey: string } mistralOptions?: { apiKey: string } vercelAiGatewayOptions?: { apiKey: string } + openRouterOptions?: { apiKey: string } qdrantUrl?: string qdrantApiKey?: string searchMinScore?: number @@ -167,6 +173,7 @@ export class CodeIndexConfigManager { geminiApiKey: this.geminiOptions?.apiKey ?? "", mistralApiKey: this.mistralOptions?.apiKey ?? "", vercelAiGatewayApiKey: this.vercelAiGatewayOptions?.apiKey ?? "", + openRouterApiKey: this.openRouterOptions?.apiKey ?? "", qdrantUrl: this.qdrantUrl ?? "", qdrantApiKey: this.qdrantApiKey ?? "", } @@ -192,6 +199,7 @@ export class CodeIndexConfigManager { geminiOptions: this.geminiOptions, mistralOptions: this.mistralOptions, vercelAiGatewayOptions: this.vercelAiGatewayOptions, + openRouterOptions: this.openRouterOptions, qdrantUrl: this.qdrantUrl, qdrantApiKey: this.qdrantApiKey, searchMinScore: this.currentSearchMinScore, @@ -234,6 +242,11 @@ export class CodeIndexConfigManager { const qdrantUrl = this.qdrantUrl const isConfigured = !!(apiKey && qdrantUrl) return isConfigured + } else if (this.embedderProvider === "openrouter") { + const apiKey = this.openRouterOptions?.apiKey + const qdrantUrl = this.qdrantUrl + const isConfigured = !!(apiKey && qdrantUrl) + return isConfigured } return false // Should not happen if embedderProvider is always set correctly } @@ -269,6 +282,7 @@ export class CodeIndexConfigManager { const prevGeminiApiKey = prev?.geminiApiKey ?? "" const prevMistralApiKey = prev?.mistralApiKey ?? "" const prevVercelAiGatewayApiKey = prev?.vercelAiGatewayApiKey ?? "" + const prevOpenRouterApiKey = prev?.openRouterApiKey ?? "" const prevQdrantUrl = prev?.qdrantUrl ?? "" const prevQdrantApiKey = prev?.qdrantApiKey ?? "" @@ -307,6 +321,7 @@ export class CodeIndexConfigManager { const currentGeminiApiKey = this.geminiOptions?.apiKey ?? "" const currentMistralApiKey = this.mistralOptions?.apiKey ?? "" const currentVercelAiGatewayApiKey = this.vercelAiGatewayOptions?.apiKey ?? "" + const currentOpenRouterApiKey = this.openRouterOptions?.apiKey ?? "" const currentQdrantUrl = this.qdrantUrl ?? "" const currentQdrantApiKey = this.qdrantApiKey ?? "" @@ -337,6 +352,10 @@ export class CodeIndexConfigManager { return true } + if (prevOpenRouterApiKey !== currentOpenRouterApiKey) { + return true + } + // Check for model dimension changes (generic for all providers) if (prevModelDimension !== currentModelDimension) { return true @@ -395,6 +414,7 @@ export class CodeIndexConfigManager { geminiOptions: this.geminiOptions, mistralOptions: this.mistralOptions, vercelAiGatewayOptions: this.vercelAiGatewayOptions, + openRouterOptions: this.openRouterOptions, qdrantUrl: this.qdrantUrl, qdrantApiKey: this.qdrantApiKey, searchMinScore: this.currentSearchMinScore, diff --git a/src/services/code-index/embedders/__tests__/openrouter.spec.ts b/src/services/code-index/embedders/__tests__/openrouter.spec.ts new file mode 100644 index 00000000000..fe9f9d59dd3 --- /dev/null +++ b/src/services/code-index/embedders/__tests__/openrouter.spec.ts @@ -0,0 +1,289 @@ +import type { MockedClass, MockedFunction } from "vitest" +import { describe, it, expect, beforeEach, vi } from "vitest" +import { OpenAI } from "openai" +import { OpenRouterEmbedder } from "../openrouter" +import { getModelDimension, getDefaultModelId } from "../../../../shared/embeddingModels" + +// Mock the OpenAI SDK +vi.mock("openai") + +// Mock TelemetryService +vi.mock("@roo-code/telemetry", () => ({ + TelemetryService: { + instance: { + captureEvent: vi.fn(), + }, + }, + TelemetryEventName: {}, +})) + +// Mock i18n +vi.mock("../../../../i18n", () => ({ + t: (key: string, params?: Record) => { + const translations: Record = { + "embeddings:validation.apiKeyRequired": "validation.apiKeyRequired", + "embeddings:authenticationFailed": + "Failed to create embeddings: Authentication failed. Please check your OpenRouter API key.", + "embeddings:failedWithStatus": `Failed to create embeddings after ${params?.attempts} attempts: HTTP ${params?.statusCode} - ${params?.errorMessage}`, + "embeddings:failedWithError": `Failed to create embeddings after ${params?.attempts} attempts: ${params?.errorMessage}`, + "embeddings:failedMaxAttempts": `Failed to create embeddings after ${params?.attempts} attempts`, + "embeddings:textExceedsTokenLimit": `Text at index ${params?.index} exceeds maximum token limit (${params?.itemTokens} > ${params?.maxTokens}). Skipping.`, + "embeddings:rateLimitRetry": `Rate limit hit, retrying in ${params?.delayMs}ms (attempt ${params?.attempt}/${params?.maxRetries})`, + } + return translations[key] || key + }, +})) + +const MockedOpenAI = OpenAI as MockedClass + +describe("OpenRouterEmbedder", () => { + const mockApiKey = "test-api-key" + let mockEmbeddingsCreate: MockedFunction + let mockOpenAIInstance: any + + beforeEach(() => { + vi.clearAllMocks() + vi.spyOn(console, "warn").mockImplementation(() => {}) + vi.spyOn(console, "error").mockImplementation(() => {}) + + // Setup mock OpenAI instance + mockEmbeddingsCreate = vi.fn() + mockOpenAIInstance = { + embeddings: { + create: mockEmbeddingsCreate, + }, + } + + MockedOpenAI.mockImplementation(() => mockOpenAIInstance) + }) + + afterEach(() => { + vi.restoreAllMocks() + }) + + describe("constructor", () => { + it("should create an instance with valid API key", () => { + const embedder = new OpenRouterEmbedder(mockApiKey) + expect(embedder).toBeInstanceOf(OpenRouterEmbedder) + }) + + it("should throw error with empty API key", () => { + expect(() => new OpenRouterEmbedder("")).toThrow("validation.apiKeyRequired") + }) + + it("should use default model when none specified", () => { + const embedder = new OpenRouterEmbedder(mockApiKey) + const expectedDefault = getDefaultModelId("openrouter") + expect(embedder.embedderInfo.name).toBe("openrouter") + }) + + it("should use custom model when specified", () => { + const customModel = "openai/text-embedding-3-small" + const embedder = new OpenRouterEmbedder(mockApiKey, customModel) + expect(embedder.embedderInfo.name).toBe("openrouter") + }) + + it("should initialize OpenAI client with correct headers", () => { + new OpenRouterEmbedder(mockApiKey) + + expect(MockedOpenAI).toHaveBeenCalledWith({ + baseURL: "https://openrouter.ai/api/v1", + apiKey: mockApiKey, + defaultHeaders: { + "HTTP-Referer": "https://github.com/RooCodeInc/Roo-Code", + "X-Title": "Roo Code", + }, + }) + }) + }) + + describe("embedderInfo", () => { + it("should return correct embedder info", () => { + const embedder = new OpenRouterEmbedder(mockApiKey) + expect(embedder.embedderInfo).toEqual({ + name: "openrouter", + }) + }) + }) + + describe("createEmbeddings", () => { + let embedder: OpenRouterEmbedder + + beforeEach(() => { + embedder = new OpenRouterEmbedder(mockApiKey) + }) + + it("should create embeddings successfully", async () => { + // Create base64 encoded embedding with values that can be exactly represented in Float32 + const testEmbedding = new Float32Array([0.25, 0.5, 0.75]) + const base64String = Buffer.from(testEmbedding.buffer).toString("base64") + + const mockResponse = { + data: [ + { + embedding: base64String, + }, + ], + usage: { + prompt_tokens: 5, + total_tokens: 5, + }, + } + + mockEmbeddingsCreate.mockResolvedValue(mockResponse) + + const result = await embedder.createEmbeddings(["test text"]) + + expect(mockEmbeddingsCreate).toHaveBeenCalledWith({ + input: ["test text"], + model: "openai/text-embedding-3-large", + encoding_format: "base64", + }) + expect(result.embeddings).toHaveLength(1) + expect(result.embeddings[0]).toEqual([0.25, 0.5, 0.75]) + expect(result.usage?.promptTokens).toBe(5) + expect(result.usage?.totalTokens).toBe(5) + }) + + it("should handle multiple texts", async () => { + const embedding1 = new Float32Array([0.25, 0.5]) + const embedding2 = new Float32Array([0.75, 1.0]) + const base64String1 = Buffer.from(embedding1.buffer).toString("base64") + const base64String2 = Buffer.from(embedding2.buffer).toString("base64") + + const mockResponse = { + data: [ + { + embedding: base64String1, + }, + { + embedding: base64String2, + }, + ], + usage: { + prompt_tokens: 10, + total_tokens: 10, + }, + } + + mockEmbeddingsCreate.mockResolvedValue(mockResponse) + + const result = await embedder.createEmbeddings(["text1", "text2"]) + + expect(result.embeddings).toHaveLength(2) + expect(result.embeddings[0]).toEqual([0.25, 0.5]) + expect(result.embeddings[1]).toEqual([0.75, 1.0]) + }) + + it("should use custom model when provided", async () => { + const customModel = "mistralai/mistral-embed-2312" + const embedderWithCustomModel = new OpenRouterEmbedder(mockApiKey, customModel) + + const testEmbedding = new Float32Array([0.25, 0.5]) + const base64String = Buffer.from(testEmbedding.buffer).toString("base64") + + const mockResponse = { + data: [ + { + embedding: base64String, + }, + ], + usage: { + prompt_tokens: 5, + total_tokens: 5, + }, + } + + mockEmbeddingsCreate.mockResolvedValue(mockResponse) + + await embedderWithCustomModel.createEmbeddings(["test"]) + + // Verify the embeddings.create was called with the custom model + expect(mockEmbeddingsCreate).toHaveBeenCalledWith({ + input: ["test"], + model: customModel, + encoding_format: "base64", + }) + }) + }) + + describe("validateConfiguration", () => { + let embedder: OpenRouterEmbedder + + beforeEach(() => { + embedder = new OpenRouterEmbedder(mockApiKey) + }) + + it("should validate configuration successfully", async () => { + const testEmbedding = new Float32Array([0.25, 0.5]) + const base64String = Buffer.from(testEmbedding.buffer).toString("base64") + + const mockResponse = { + data: [ + { + embedding: base64String, + }, + ], + usage: { + prompt_tokens: 1, + total_tokens: 1, + }, + } + + mockEmbeddingsCreate.mockResolvedValue(mockResponse) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(true) + expect(result.error).toBeUndefined() + expect(mockEmbeddingsCreate).toHaveBeenCalledWith({ + input: ["test"], + model: "openai/text-embedding-3-large", + encoding_format: "base64", + }) + }) + + it("should handle validation failure", async () => { + const authError = new Error("Invalid API key") + ;(authError as any).status = 401 + + mockEmbeddingsCreate.mockRejectedValue(authError) + + const result = await embedder.validateConfiguration() + + expect(result.valid).toBe(false) + expect(result.error).toBe("embeddings:validation.authenticationFailed") + }) + }) + + describe("integration with shared models", () => { + it("should work with defined OpenRouter models", () => { + const openRouterModels = [ + "openai/text-embedding-3-small", + "openai/text-embedding-3-large", + "openai/text-embedding-ada-002", + "google/gemini-embedding-001", + "mistralai/mistral-embed-2312", + "mistralai/codestral-embed-2505", + "qwen/qwen3-embedding-8b", + ] + + openRouterModels.forEach((model) => { + const dimension = getModelDimension("openrouter", model) + expect(dimension).toBeDefined() + expect(dimension).toBeGreaterThan(0) + + const embedder = new OpenRouterEmbedder(mockApiKey, model) + expect(embedder.embedderInfo.name).toBe("openrouter") + }) + }) + + it("should use correct default model", () => { + const defaultModel = getDefaultModelId("openrouter") + expect(defaultModel).toBe("openai/text-embedding-3-large") + + const dimension = getModelDimension("openrouter", defaultModel) + expect(dimension).toBe(3072) + }) + }) +}) diff --git a/src/services/code-index/embedders/openrouter.ts b/src/services/code-index/embedders/openrouter.ts new file mode 100644 index 00000000000..a455489d527 --- /dev/null +++ b/src/services/code-index/embedders/openrouter.ts @@ -0,0 +1,396 @@ +import { OpenAI } from "openai" +import { IEmbedder, EmbeddingResponse, EmbedderInfo } from "../interfaces/embedder" +import { + MAX_BATCH_TOKENS, + MAX_ITEM_TOKENS, + MAX_BATCH_RETRIES as MAX_RETRIES, + INITIAL_RETRY_DELAY_MS as INITIAL_DELAY_MS, +} from "../constants" +import { getDefaultModelId, getModelQueryPrefix } from "../../../shared/embeddingModels" +import { t } from "../../../i18n" +import { withValidationErrorHandling, HttpError, formatEmbeddingError } from "../shared/validation-helpers" +import { TelemetryEventName } from "@roo-code/types" +import { TelemetryService } from "@roo-code/telemetry" +import { Mutex } from "async-mutex" +import { handleOpenAIError } from "../../../api/providers/utils/openai-error-handler" + +interface EmbeddingItem { + embedding: string | number[] + [key: string]: any +} + +interface OpenRouterEmbeddingResponse { + data: EmbeddingItem[] + usage?: { + prompt_tokens?: number + total_tokens?: number + } +} + +/** + * OpenRouter implementation of the embedder interface with batching and rate limiting. + * OpenRouter provides an OpenAI-compatible API that gives access to hundreds of models + * through a single endpoint, automatically handling fallbacks and cost optimization. + */ +export class OpenRouterEmbedder implements IEmbedder { + private embeddingsClient: OpenAI + private readonly defaultModelId: string + private readonly apiKey: string + private readonly maxItemTokens: number + private readonly baseUrl: string = "https://openrouter.ai/api/v1" + + // Global rate limiting state shared across all instances + private static globalRateLimitState = { + isRateLimited: false, + rateLimitResetTime: 0, + consecutiveRateLimitErrors: 0, + lastRateLimitError: 0, + // Mutex to ensure thread-safe access to rate limit state + mutex: new Mutex(), + } + + /** + * Creates a new OpenRouter embedder + * @param apiKey The API key for authentication + * @param modelId Optional model identifier (defaults to "openai/text-embedding-3-large") + * @param maxItemTokens Optional maximum tokens per item (defaults to MAX_ITEM_TOKENS) + */ + constructor(apiKey: string, modelId?: string, maxItemTokens?: number) { + if (!apiKey) { + throw new Error(t("embeddings:validation.apiKeyRequired")) + } + + this.apiKey = apiKey + + // Wrap OpenAI client creation to handle invalid API key characters + try { + this.embeddingsClient = new OpenAI({ + baseURL: this.baseUrl, + apiKey: apiKey, + defaultHeaders: { + "HTTP-Referer": "https://github.com/RooCodeInc/Roo-Code", + "X-Title": "Roo Code", + }, + }) + } catch (error) { + // Use the error handler to transform ByteString conversion errors + throw handleOpenAIError(error, "OpenRouter") + } + + this.defaultModelId = modelId || getDefaultModelId("openrouter") + this.maxItemTokens = maxItemTokens || MAX_ITEM_TOKENS + } + + /** + * Creates embeddings for the given texts with batching and rate limiting + * @param texts Array of text strings to embed + * @param model Optional model identifier + * @returns Promise resolving to embedding response + */ + async createEmbeddings(texts: string[], model?: string): Promise { + const modelToUse = model || this.defaultModelId + + // Apply model-specific query prefix if required + const queryPrefix = getModelQueryPrefix("openrouter", modelToUse) + const processedTexts = queryPrefix + ? texts.map((text, index) => { + // Prevent double-prefixing + if (text.startsWith(queryPrefix)) { + return text + } + const prefixedText = `${queryPrefix}${text}` + const estimatedTokens = Math.ceil(prefixedText.length / 4) + if (estimatedTokens > MAX_ITEM_TOKENS) { + console.warn( + t("embeddings:textWithPrefixExceedsTokenLimit", { + index, + estimatedTokens, + maxTokens: MAX_ITEM_TOKENS, + }), + ) + // Return original text if adding prefix would exceed limit + return text + } + return prefixedText + }) + : texts + + const allEmbeddings: number[][] = [] + const usage = { promptTokens: 0, totalTokens: 0 } + const remainingTexts = [...processedTexts] + + while (remainingTexts.length > 0) { + const currentBatch: string[] = [] + let currentBatchTokens = 0 + const processedIndices: number[] = [] + + for (let i = 0; i < remainingTexts.length; i++) { + const text = remainingTexts[i] + const itemTokens = Math.ceil(text.length / 4) + + if (itemTokens > this.maxItemTokens) { + console.warn( + t("embeddings:textExceedsTokenLimit", { + index: i, + itemTokens, + maxTokens: this.maxItemTokens, + }), + ) + processedIndices.push(i) + continue + } + + if (currentBatchTokens + itemTokens <= MAX_BATCH_TOKENS) { + currentBatch.push(text) + currentBatchTokens += itemTokens + processedIndices.push(i) + } else { + break + } + } + + // Remove processed items from remainingTexts (in reverse order to maintain correct indices) + for (let i = processedIndices.length - 1; i >= 0; i--) { + remainingTexts.splice(processedIndices[i], 1) + } + + if (currentBatch.length > 0) { + const batchResult = await this._embedBatchWithRetries(currentBatch, modelToUse) + allEmbeddings.push(...batchResult.embeddings) + usage.promptTokens += batchResult.usage.promptTokens + usage.totalTokens += batchResult.usage.totalTokens + } + } + + return { embeddings: allEmbeddings, usage } + } + + /** + * Helper method to handle batch embedding with retries and exponential backoff + * @param batchTexts Array of texts to embed in this batch + * @param model Model identifier to use + * @returns Promise resolving to embeddings and usage statistics + */ + private async _embedBatchWithRetries( + batchTexts: string[], + model: string, + ): Promise<{ embeddings: number[][]; usage: { promptTokens: number; totalTokens: number } }> { + for (let attempts = 0; attempts < MAX_RETRIES; attempts++) { + // Check global rate limit before attempting request + await this.waitForGlobalRateLimit() + + try { + const response = (await this.embeddingsClient.embeddings.create({ + input: batchTexts, + model: model, + // OpenAI package (as of v4.78.1) has a parsing issue that truncates embedding dimensions to 256 + // when processing numeric arrays, which breaks compatibility with models using larger dimensions. + // By requesting base64 encoding, we bypass the package's parser and handle decoding ourselves. + encoding_format: "base64", + })) as OpenRouterEmbeddingResponse + + // Convert base64 embeddings to float32 arrays + const processedEmbeddings = response.data.map((item: EmbeddingItem) => { + if (typeof item.embedding === "string") { + const buffer = Buffer.from(item.embedding, "base64") + + // Create Float32Array view over the buffer + const float32Array = new Float32Array(buffer.buffer, buffer.byteOffset, buffer.byteLength / 4) + + return { + ...item, + embedding: Array.from(float32Array), + } + } + return item + }) + + // Replace the original data with processed embeddings + response.data = processedEmbeddings + + const embeddings = response.data.map((item) => item.embedding as number[]) + + return { + embeddings: embeddings, + usage: { + promptTokens: response.usage?.prompt_tokens || 0, + totalTokens: response.usage?.total_tokens || 0, + }, + } + } catch (error) { + // Capture telemetry before error is reformatted + TelemetryService.instance.captureEvent(TelemetryEventName.CODE_INDEX_ERROR, { + error: error instanceof Error ? error.message : String(error), + stack: error instanceof Error ? error.stack : undefined, + location: "OpenRouterEmbedder:_embedBatchWithRetries", + attempt: attempts + 1, + }) + + const hasMoreAttempts = attempts < MAX_RETRIES - 1 + + // Check if it's a rate limit error + const httpError = error as HttpError + if (httpError?.status === 429) { + // Update global rate limit state + await this.updateGlobalRateLimitState(httpError) + + if (hasMoreAttempts) { + // Calculate delay based on global rate limit state + const baseDelay = INITIAL_DELAY_MS * Math.pow(2, attempts) + const globalDelay = await this.getGlobalRateLimitDelay() + const delayMs = Math.max(baseDelay, globalDelay) + + console.warn( + t("embeddings:rateLimitRetry", { + delayMs, + attempt: attempts + 1, + maxRetries: MAX_RETRIES, + }), + ) + await new Promise((resolve) => setTimeout(resolve, delayMs)) + continue + } + } + + // Log the error for debugging + console.error(`OpenRouter embedder error (attempt ${attempts + 1}/${MAX_RETRIES}):`, error) + + // Format and throw the error + throw formatEmbeddingError(error, MAX_RETRIES) + } + } + + throw new Error(t("embeddings:failedMaxAttempts", { attempts: MAX_RETRIES })) + } + + /** + * Validates the OpenRouter embedder configuration by testing API connectivity + * @returns Promise resolving to validation result with success status and optional error message + */ + async validateConfiguration(): Promise<{ valid: boolean; error?: string }> { + return withValidationErrorHandling(async () => { + try { + // Test with a minimal embedding request + const testTexts = ["test"] + const modelToUse = this.defaultModelId + + const response = (await this.embeddingsClient.embeddings.create({ + input: testTexts, + model: modelToUse, + encoding_format: "base64", + })) as OpenRouterEmbeddingResponse + + // Check if we got a valid response + if (!response?.data || response.data.length === 0) { + return { + valid: false, + error: "embeddings:validation.invalidResponse", + } + } + + return { valid: true } + } catch (error) { + // Capture telemetry for validation errors + TelemetryService.instance.captureEvent(TelemetryEventName.CODE_INDEX_ERROR, { + error: error instanceof Error ? error.message : String(error), + stack: error instanceof Error ? error.stack : undefined, + location: "OpenRouterEmbedder:validateConfiguration", + }) + throw error + } + }, "openrouter") + } + + /** + * Returns information about this embedder + */ + get embedderInfo(): EmbedderInfo { + return { + name: "openrouter", + } + } + + /** + * Waits if there's an active global rate limit + */ + private async waitForGlobalRateLimit(): Promise { + const release = await OpenRouterEmbedder.globalRateLimitState.mutex.acquire() + let mutexReleased = false + + try { + const state = OpenRouterEmbedder.globalRateLimitState + + if (state.isRateLimited && state.rateLimitResetTime > Date.now()) { + const waitTime = state.rateLimitResetTime - Date.now() + // Silent wait - no logging to prevent flooding + release() + mutexReleased = true + await new Promise((resolve) => setTimeout(resolve, waitTime)) + return + } + + // Reset rate limit if time has passed + if (state.isRateLimited && state.rateLimitResetTime <= Date.now()) { + state.isRateLimited = false + state.consecutiveRateLimitErrors = 0 + } + } finally { + // Only release if we haven't already + if (!mutexReleased) { + release() + } + } + } + + /** + * Updates global rate limit state when a 429 error occurs + */ + private async updateGlobalRateLimitState(error: HttpError): Promise { + const release = await OpenRouterEmbedder.globalRateLimitState.mutex.acquire() + try { + const state = OpenRouterEmbedder.globalRateLimitState + const now = Date.now() + + // Increment consecutive rate limit errors + if (now - state.lastRateLimitError < 60000) { + // Within 1 minute + state.consecutiveRateLimitErrors++ + } else { + state.consecutiveRateLimitErrors = 1 + } + + state.lastRateLimitError = now + + // Calculate exponential backoff based on consecutive errors + const baseDelay = 5000 // 5 seconds base + const maxDelay = 300000 // 5 minutes max + const exponentialDelay = Math.min(baseDelay * Math.pow(2, state.consecutiveRateLimitErrors - 1), maxDelay) + + // Set global rate limit + state.isRateLimited = true + state.rateLimitResetTime = now + exponentialDelay + + // Silent rate limit activation - no logging to prevent flooding + } finally { + release() + } + } + + /** + * Gets the current global rate limit delay + */ + private async getGlobalRateLimitDelay(): Promise { + const release = await OpenRouterEmbedder.globalRateLimitState.mutex.acquire() + try { + const state = OpenRouterEmbedder.globalRateLimitState + + if (state.isRateLimited && state.rateLimitResetTime > Date.now()) { + return state.rateLimitResetTime - Date.now() + } + + return 0 + } finally { + release() + } + } +} diff --git a/src/services/code-index/interfaces/config.ts b/src/services/code-index/interfaces/config.ts index f168e268691..9fe3df1f129 100644 --- a/src/services/code-index/interfaces/config.ts +++ b/src/services/code-index/interfaces/config.ts @@ -15,6 +15,7 @@ export interface CodeIndexConfig { geminiOptions?: { apiKey: string } mistralOptions?: { apiKey: string } vercelAiGatewayOptions?: { apiKey: string } + openRouterOptions?: { apiKey: string } qdrantUrl?: string qdrantApiKey?: string searchMinScore?: number @@ -37,6 +38,7 @@ export type PreviousConfigSnapshot = { geminiApiKey?: string mistralApiKey?: string vercelAiGatewayApiKey?: string + openRouterApiKey?: string qdrantUrl?: string qdrantApiKey?: string } diff --git a/src/services/code-index/interfaces/embedder.ts b/src/services/code-index/interfaces/embedder.ts index 1fcda3aca32..7a3aa91ad9d 100644 --- a/src/services/code-index/interfaces/embedder.ts +++ b/src/services/code-index/interfaces/embedder.ts @@ -28,7 +28,14 @@ export interface EmbeddingResponse { } } -export type AvailableEmbedders = "openai" | "ollama" | "openai-compatible" | "gemini" | "mistral" | "vercel-ai-gateway" +export type AvailableEmbedders = + | "openai" + | "ollama" + | "openai-compatible" + | "gemini" + | "mistral" + | "vercel-ai-gateway" + | "openrouter" export interface EmbedderInfo { name: AvailableEmbedders diff --git a/src/services/code-index/interfaces/manager.ts b/src/services/code-index/interfaces/manager.ts index 527900f6d1c..9a6e4031ab1 100644 --- a/src/services/code-index/interfaces/manager.ts +++ b/src/services/code-index/interfaces/manager.ts @@ -70,7 +70,14 @@ export interface ICodeIndexManager { } export type IndexingState = "Standby" | "Indexing" | "Indexed" | "Error" -export type EmbedderProvider = "openai" | "ollama" | "openai-compatible" | "gemini" | "mistral" | "vercel-ai-gateway" +export type EmbedderProvider = + | "openai" + | "ollama" + | "openai-compatible" + | "gemini" + | "mistral" + | "vercel-ai-gateway" + | "openrouter" export interface IndexProgressUpdate { systemStatus: IndexingState diff --git a/src/services/code-index/interfaces/vector-store.ts b/src/services/code-index/interfaces/vector-store.ts index dde602fb4d9..7946563fd57 100644 --- a/src/services/code-index/interfaces/vector-store.ts +++ b/src/services/code-index/interfaces/vector-store.ts @@ -62,6 +62,24 @@ export interface IVectorStore { * @returns Promise resolving to boolean indicating if the collection exists */ collectionExists(): Promise + + /** + * Checks if the collection exists and has indexed points + * @returns Promise resolving to boolean indicating if the collection exists and has points + */ + hasIndexedData(): Promise + + /** + * Marks the indexing process as complete by storing metadata + * Should be called after a successful full workspace scan or incremental scan + */ + markIndexingComplete(): Promise + + /** + * Marks the indexing process as incomplete by storing metadata + * Should be called at the start of indexing to indicate work in progress + */ + markIndexingIncomplete(): Promise } export interface VectorStoreSearchResult { diff --git a/src/services/code-index/orchestrator.ts b/src/services/code-index/orchestrator.ts index 2de236ee72a..6363b7db855 100644 --- a/src/services/code-index/orchestrator.ts +++ b/src/services/code-index/orchestrator.ts @@ -125,106 +125,183 @@ export class CodeIndexOrchestrator { this._isProcessing = true this.stateManager.setSystemState("Indexing", "Initializing services...") + // Track whether we successfully connected to Qdrant and started indexing + // This helps us decide whether to preserve cache on error + let indexingStarted = false + try { const collectionCreated = await this.vectorStore.initialize() + // Successfully connected to Qdrant + indexingStarted = true + if (collectionCreated) { await this.cacheManager.clearCacheFile() } - // kilocode_change start - if (this._cancelRequested) { - this._isProcessing = false - this.stateManager.setSystemState("Standby", t("embeddings:orchestrator.indexingCancelled")) - return - } - // kilocode_change end + // Check if the collection already has indexed data + // If it does, we can skip the full scan and just start the watcher + const hasExistingData = await this.vectorStore.hasIndexedData() - this.stateManager.setSystemState("Indexing", "Services ready. Starting workspace scan...") + if (hasExistingData && !collectionCreated) { + // Collection exists with data - run incremental scan to catch any new/changed files + // This handles files added while workspace was closed or Qdrant was inactive + console.log( + "[CodeIndexOrchestrator] Collection already has indexed data. Running incremental scan for new/changed files...", + ) - let cumulativeBlocksIndexed = 0 - let cumulativeBlocksFoundSoFar = 0 - let batchErrors: Error[] = [] + // kilocode_change start + if (this._cancelRequested) { + this._isProcessing = false + this.stateManager.setSystemState("Standby", t("embeddings:orchestrator.indexingCancelled")) + return + } + // kilocode_change end - const handleFileParsed = (fileBlockCount: number) => { - if (this._cancelRequested) return // kilocode_change - cumulativeBlocksFoundSoFar += fileBlockCount - this.stateManager.reportBlockIndexingProgress(cumulativeBlocksIndexed, cumulativeBlocksFoundSoFar) - } + this.stateManager.setSystemState("Indexing", "Checking for new or modified files...") - const handleBlocksIndexed = (indexedCount: number) => { - if (this._cancelRequested) return // kilocode_change - cumulativeBlocksIndexed += indexedCount - this.stateManager.reportBlockIndexingProgress(cumulativeBlocksIndexed, cumulativeBlocksFoundSoFar) - } + // Mark as incomplete at the start of incremental scan + await this.vectorStore.markIndexingIncomplete() - const result = await this.scanner.scanDirectory( - this.workspacePath, - (batchError: Error) => { - console.error( - `[CodeIndexOrchestrator] Error during initial scan batch: ${batchError.message}`, - batchError, - ) - batchErrors.push(batchError) - }, - handleBlocksIndexed, - handleFileParsed, - ) + let cumulativeBlocksIndexed = 0 + let cumulativeBlocksFoundSoFar = 0 + let batchErrors: Error[] = [] - if (!result) { - throw new Error("Scan failed, is scanner initialized?") - } + const handleFileParsed = (fileBlockCount: number) => { + cumulativeBlocksFoundSoFar += fileBlockCount + this.stateManager.reportBlockIndexingProgress(cumulativeBlocksIndexed, cumulativeBlocksFoundSoFar) + } - // kilocode_change start - if (this._cancelRequested || this.scanner.isCancelled) { - this._isProcessing = false - if (this.stateManager.state !== "Error") { - this.stateManager.setSystemState("Standby", t("embeddings:orchestrator.indexingCancelled")) + const handleBlocksIndexed = (indexedCount: number) => { + cumulativeBlocksIndexed += indexedCount + this.stateManager.reportBlockIndexingProgress(cumulativeBlocksIndexed, cumulativeBlocksFoundSoFar) } - return - } - // kilocode_change end - const { stats } = result + // Run incremental scan - scanner will skip unchanged files using cache + const result = await this.scanner.scanDirectory( + this.workspacePath, + (batchError: Error) => { + console.error( + `[CodeIndexOrchestrator] Error during incremental scan batch: ${batchError.message}`, + batchError, + ) + batchErrors.push(batchError) + }, + handleBlocksIndexed, + handleFileParsed, + ) + + if (!result) { + throw new Error("Incremental scan failed, is scanner initialized?") + } - // Check if any blocks were actually indexed successfully - // If no blocks were indexed but blocks were found, it means all batches failed - if (cumulativeBlocksIndexed === 0 && cumulativeBlocksFoundSoFar > 0) { - if (batchErrors.length > 0) { - // Use the first batch error as it's likely representative of the main issue - const firstError = batchErrors[0] - throw new Error(`Indexing failed: ${firstError.message}`) + // kilocode_change start + if (this._cancelRequested || this.scanner.isCancelled) { + this._isProcessing = false + if (this.stateManager.state !== "Error") { + this.stateManager.setSystemState("Standby", t("embeddings:orchestrator.indexingCancelled")) + } + return + } + // kilocode_change end + + // If new files were found and indexed, log the results + if (cumulativeBlocksFoundSoFar > 0) { + console.log( + `[CodeIndexOrchestrator] Incremental scan completed: ${cumulativeBlocksIndexed} blocks indexed from new/changed files`, + ) } else { - throw new Error(t("embeddings:orchestrator.indexingFailedNoBlocks")) + console.log("[CodeIndexOrchestrator] No new or changed files found") + } + + await this._startWatcher() + + // Mark indexing as complete after successful incremental scan + await this.vectorStore.markIndexingComplete() + + this.stateManager.setSystemState("Indexed", t("embeddings:orchestrator.fileWatcherStarted")) + } else { + // No existing data or collection was just created - do a full scan + this.stateManager.setSystemState("Indexing", "Services ready. Starting workspace scan...") + + // Mark as incomplete at the start of full scan + await this.vectorStore.markIndexingIncomplete() + + let cumulativeBlocksIndexed = 0 + let cumulativeBlocksFoundSoFar = 0 + let batchErrors: Error[] = [] + + const handleFileParsed = (fileBlockCount: number) => { + cumulativeBlocksFoundSoFar += fileBlockCount + this.stateManager.reportBlockIndexingProgress(cumulativeBlocksIndexed, cumulativeBlocksFoundSoFar) } - } - // Check for partial failures - if a significant portion of blocks failed - const failureRate = (cumulativeBlocksFoundSoFar - cumulativeBlocksIndexed) / cumulativeBlocksFoundSoFar - if (batchErrors.length > 0 && failureRate > 0.1) { - // More than 10% of blocks failed to index - const firstError = batchErrors[0] - throw new Error( - `Indexing partially failed: Only ${cumulativeBlocksIndexed} of ${cumulativeBlocksFoundSoFar} blocks were indexed. ${firstError.message}`, + const handleBlocksIndexed = (indexedCount: number) => { + cumulativeBlocksIndexed += indexedCount + this.stateManager.reportBlockIndexingProgress(cumulativeBlocksIndexed, cumulativeBlocksFoundSoFar) + } + + const result = await this.scanner.scanDirectory( + this.workspacePath, + (batchError: Error) => { + console.error( + `[CodeIndexOrchestrator] Error during initial scan batch: ${batchError.message}`, + batchError, + ) + batchErrors.push(batchError) + }, + handleBlocksIndexed, + handleFileParsed, ) - } - // CRITICAL: If there were ANY batch errors and NO blocks were successfully indexed, - // this is a complete failure regardless of the failure rate calculation - if (batchErrors.length > 0 && cumulativeBlocksIndexed === 0) { - const firstError = batchErrors[0] - throw new Error(`Indexing failed completely: ${firstError.message}`) - } + if (!result) { + throw new Error("Scan failed, is scanner initialized?") + } - // Final sanity check: If we found blocks but indexed none and somehow no errors were reported, - // this is still a failure - if (cumulativeBlocksFoundSoFar > 0 && cumulativeBlocksIndexed === 0) { - throw new Error(t("embeddings:orchestrator.indexingFailedCritical")) - } + const { stats } = result + + // Check if any blocks were actually indexed successfully + // If no blocks were indexed but blocks were found, it means all batches failed + if (cumulativeBlocksIndexed === 0 && cumulativeBlocksFoundSoFar > 0) { + if (batchErrors.length > 0) { + // Use the first batch error as it's likely representative of the main issue + const firstError = batchErrors[0] + throw new Error(`Indexing failed: ${firstError.message}`) + } else { + throw new Error(t("embeddings:orchestrator.indexingFailedNoBlocks")) + } + } + + // Check for partial failures - if a significant portion of blocks failed + const failureRate = (cumulativeBlocksFoundSoFar - cumulativeBlocksIndexed) / cumulativeBlocksFoundSoFar + if (batchErrors.length > 0 && failureRate > 0.1) { + // More than 10% of blocks failed to index + const firstError = batchErrors[0] + throw new Error( + `Indexing partially failed: Only ${cumulativeBlocksIndexed} of ${cumulativeBlocksFoundSoFar} blocks were indexed. ${firstError.message}`, + ) + } + + // CRITICAL: If there were ANY batch errors and NO blocks were successfully indexed, + // this is a complete failure regardless of the failure rate calculation + if (batchErrors.length > 0 && cumulativeBlocksIndexed === 0) { + const firstError = batchErrors[0] + throw new Error(`Indexing failed completely: ${firstError.message}`) + } + + // Final sanity check: If we found blocks but indexed none and somehow no errors were reported, + // this is still a failure + if (cumulativeBlocksFoundSoFar > 0 && cumulativeBlocksIndexed === 0) { + throw new Error(t("embeddings:orchestrator.indexingFailedCritical")) + } - await this._startWatcher() + await this._startWatcher() - this.stateManager.setSystemState("Indexed", t("embeddings:orchestrator.fileWatcherStarted")) + // Mark indexing as complete after successful full scan + await this.vectorStore.markIndexingComplete() + + this.stateManager.setSystemState("Indexed", t("embeddings:orchestrator.fileWatcherStarted")) + } } catch (error: any) { console.error("[CodeIndexOrchestrator] Error during indexing:", error) TelemetryService.instance.captureEvent(TelemetryEventName.CODE_INDEX_ERROR, { @@ -232,18 +309,33 @@ export class CodeIndexOrchestrator { stack: error instanceof Error ? error.stack : undefined, location: "startIndexing", }) - try { - await this.vectorStore.clearCollection() - } catch (cleanupError) { - console.error("[CodeIndexOrchestrator] Failed to clean up after error:", cleanupError) - TelemetryService.instance.captureEvent(TelemetryEventName.CODE_INDEX_ERROR, { - error: cleanupError instanceof Error ? cleanupError.message : String(cleanupError), - stack: cleanupError instanceof Error ? cleanupError.stack : undefined, - location: "startIndexing.cleanup", - }) + if (indexingStarted) { + try { + await this.vectorStore.clearCollection() + } catch (cleanupError) { + console.error("[CodeIndexOrchestrator] Failed to clean up after error:", cleanupError) + TelemetryService.instance.captureEvent(TelemetryEventName.CODE_INDEX_ERROR, { + error: cleanupError instanceof Error ? cleanupError.message : String(cleanupError), + stack: cleanupError instanceof Error ? cleanupError.stack : undefined, + location: "startIndexing.cleanup", + }) + } } - await this.cacheManager.clearCacheFile() + // Only clear cache if indexing had started (Qdrant connection succeeded) + // If we never connected to Qdrant, preserve cache for incremental scan when it comes back + if (indexingStarted) { + // Indexing started but failed mid-way - clear cache to avoid cache-Qdrant mismatch + await this.cacheManager.clearCacheFile() + console.log( + "[CodeIndexOrchestrator] Indexing failed after starting. Clearing cache to avoid inconsistency.", + ) + } else { + // Never connected to Qdrant - preserve cache for future incremental scan + console.log( + "[CodeIndexOrchestrator] Failed to connect to Qdrant. Preserving cache for future incremental scan.", + ) + } this.stateManager.setSystemState( "Error", diff --git a/src/services/code-index/service-factory.ts b/src/services/code-index/service-factory.ts index 6d69e1f0b6c..56ee1cff9f9 100644 --- a/src/services/code-index/service-factory.ts +++ b/src/services/code-index/service-factory.ts @@ -5,6 +5,7 @@ import { OpenAICompatibleEmbedder } from "./embedders/openai-compatible" import { GeminiEmbedder } from "./embedders/gemini" import { MistralEmbedder } from "./embedders/mistral" import { VercelAiGatewayEmbedder } from "./embedders/vercel-ai-gateway" +import { OpenRouterEmbedder } from "./embedders/openrouter" import { EmbedderProvider, getDefaultModelId, getModelDimension } from "../../shared/embeddingModels" import { QdrantVectorStore } from "./vector-store/qdrant-client" import { codeParser, DirectoryScanner, FileWatcher } from "./processors" @@ -79,6 +80,11 @@ export class CodeIndexServiceFactory { throw new Error(t("embeddings:serviceFactory.vercelAiGatewayConfigMissing")) } return new VercelAiGatewayEmbedder(config.vercelAiGatewayOptions.apiKey, config.modelId) + } else if (provider === "openrouter") { + if (!config.openRouterOptions?.apiKey) { + throw new Error(t("embeddings:serviceFactory.openRouterConfigMissing")) + } + return new OpenRouterEmbedder(config.openRouterOptions.apiKey, config.modelId) } throw new Error( diff --git a/src/services/code-index/vector-store/__tests__/qdrant-client.spec.ts b/src/services/code-index/vector-store/__tests__/qdrant-client.spec.ts index bd74bb63b89..19b68836499 100644 --- a/src/services/code-index/vector-store/__tests__/qdrant-client.spec.ts +++ b/src/services/code-index/vector-store/__tests__/qdrant-client.spec.ts @@ -542,14 +542,18 @@ describe("QdrantVectorStore", () => { }) expect(mockQdrantClientInstance.deleteCollection).not.toHaveBeenCalled() - // Verify payload index creation + // Verify payload index creation - 'type' field first, then pathSegments + expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledWith(expectedCollectionName, { + field_name: "type", + field_schema: "keyword", + }) for (let i = 0; i <= 4; i++) { expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledWith(expectedCollectionName, { field_name: `pathSegments.${i}`, field_schema: "keyword", }) } - expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledTimes(5) + expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledTimes(6) }) it("should not create a new collection if one exists with matching vectorSize and return false", async () => { // Mock getCollection to return existing collection info with matching vector size @@ -572,14 +576,18 @@ describe("QdrantVectorStore", () => { expect(mockQdrantClientInstance.createCollection).not.toHaveBeenCalled() expect(mockQdrantClientInstance.deleteCollection).not.toHaveBeenCalled() - // Verify payload index creation still happens + // Verify payload index creation still happens - 'type' field first, then pathSegments + expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledWith(expectedCollectionName, { + field_name: "type", + field_schema: "keyword", + }) for (let i = 0; i <= 4; i++) { expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledWith(expectedCollectionName, { field_name: `pathSegments.${i}`, field_schema: "keyword", }) } - expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledTimes(5) + expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledTimes(6) }) it("should recreate collection if it exists but vectorSize mismatches and return true", async () => { const differentVectorSize = 768 @@ -625,14 +633,18 @@ describe("QdrantVectorStore", () => { }, }) - // Verify payload index creation + // Verify payload index creation - 'type' field first, then pathSegments + expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledWith(expectedCollectionName, { + field_name: "type", + field_schema: "keyword", + }) for (let i = 0; i <= 4; i++) { expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledWith(expectedCollectionName, { field_name: `pathSegments.${i}`, field_schema: "keyword", }) } - expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledTimes(5) + expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledTimes(6) ;(console.warn as any).mockRestore() // Restore console.warn }) it("should log warning for non-404 errors but still create collection", async () => { @@ -646,7 +658,7 @@ describe("QdrantVectorStore", () => { expect(mockQdrantClientInstance.getCollection).toHaveBeenCalledTimes(1) expect(mockQdrantClientInstance.createCollection).toHaveBeenCalledTimes(1) expect(mockQdrantClientInstance.deleteCollection).not.toHaveBeenCalled() - expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledTimes(5) + expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledTimes(6) expect(console.warn).toHaveBeenCalledWith( expect.stringContaining(`Warning during getCollectionInfo for "${expectedCollectionName}"`), genericError.message, @@ -693,11 +705,16 @@ describe("QdrantVectorStore", () => { expect(result).toBe(true) expect(mockQdrantClientInstance.createCollection).toHaveBeenCalledTimes(1) - // Verify all payload index creations were attempted - expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledTimes(5) + // Verify all payload index creations were attempted (6: type + 5 pathSegments) + expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledTimes(6) - // Verify warnings were logged for each failed index - expect(console.warn).toHaveBeenCalledTimes(5) + // Verify warnings were logged for each failed index (now 6) + expect(console.warn).toHaveBeenCalledTimes(6) + // Verify warning for 'type' index + expect(console.warn).toHaveBeenCalledWith( + expect.stringContaining(`Could not create payload index for type`), + indexError.message, + ) for (let i = 0; i <= 4; i++) { expect(console.warn).toHaveBeenCalledWith( expect.stringContaining(`Could not create payload index for pathSegments.${i}`), @@ -826,7 +843,7 @@ describe("QdrantVectorStore", () => { expect(mockQdrantClientInstance.getCollection).toHaveBeenCalledTimes(2) expect(mockQdrantClientInstance.deleteCollection).toHaveBeenCalledTimes(1) expect(mockQdrantClientInstance.createCollection).toHaveBeenCalledTimes(1) - expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledTimes(5) + expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledTimes(6) ;(console.warn as any).mockRestore() }) @@ -923,7 +940,7 @@ describe("QdrantVectorStore", () => { on_disk: true, }, }) - expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledTimes(5) + expect(mockQdrantClientInstance.createPayloadIndex).toHaveBeenCalledTimes(6) ;(console.warn as any).mockRestore() }) @@ -1260,9 +1277,9 @@ describe("QdrantVectorStore", () => { const results = await vectorStore.search(queryVector) expect(mockQdrantClientInstance.query).toHaveBeenCalledTimes(1) - expect(mockQdrantClientInstance.query).toHaveBeenCalledWith(expectedCollectionName, { + const callArgs = mockQdrantClientInstance.query.mock.calls[0][1] + expect(callArgs).toMatchObject({ query: queryVector, - filter: undefined, score_threshold: DEFAULT_SEARCH_MIN_SCORE, limit: DEFAULT_MAX_SEARCH_RESULTS, params: { @@ -1273,6 +1290,9 @@ describe("QdrantVectorStore", () => { include: ["filePath", "codeChunk", "startLine", "endLine", "pathSegments"], }, }) + expect(callArgs.filter).toEqual({ + must_not: [{ key: "type", match: { value: "metadata" } }], + }) expect(results).toEqual(mockQdrantResults.points) }) @@ -1300,29 +1320,20 @@ describe("QdrantVectorStore", () => { const results = await vectorStore.search(queryVector, directoryPrefix) - expect(mockQdrantClientInstance.query).toHaveBeenCalledWith(expectedCollectionName, { + const callArgs2 = mockQdrantClientInstance.query.mock.calls[0][1] + expect(callArgs2).toMatchObject({ query: queryVector, - filter: { - must: [ - { - key: "pathSegments.0", - match: { value: "src" }, - }, - { - key: "pathSegments.1", - match: { value: "components" }, - }, - ], - }, score_threshold: DEFAULT_SEARCH_MIN_SCORE, limit: DEFAULT_MAX_SEARCH_RESULTS, - params: { - hnsw_ef: 128, - exact: false, - }, - with_payload: { - include: ["filePath", "codeChunk", "startLine", "endLine", "pathSegments"], - }, + params: { hnsw_ef: 128, exact: false }, + with_payload: { include: ["filePath", "codeChunk", "startLine", "endLine", "pathSegments"] }, + }) + expect(callArgs2.filter).toEqual({ + must: [ + { key: "pathSegments.0", match: { value: "src" } }, + { key: "pathSegments.1", match: { value: "components" } }, + ], + must_not: [{ key: "type", match: { value: "metadata" } }], }) expect(results).toEqual(mockQdrantResults.points) @@ -1337,9 +1348,9 @@ describe("QdrantVectorStore", () => { await vectorStore.search(queryVector, undefined, customMinScore) - expect(mockQdrantClientInstance.query).toHaveBeenCalledWith(expectedCollectionName, { + const callArgs3 = mockQdrantClientInstance.query.mock.calls[0][1] + expect(callArgs3).toMatchObject({ query: queryVector, - filter: undefined, score_threshold: customMinScore, limit: DEFAULT_MAX_SEARCH_RESULTS, params: { @@ -1350,6 +1361,9 @@ describe("QdrantVectorStore", () => { include: ["filePath", "codeChunk", "startLine", "endLine", "pathSegments"], }, }) + expect(callArgs3.filter).toEqual({ + must_not: [{ key: "type", match: { value: "metadata" } }], + }) }) it("should use custom maxResults when provided", async () => { @@ -1361,9 +1375,9 @@ describe("QdrantVectorStore", () => { await vectorStore.search(queryVector, undefined, undefined, customMaxResults) - expect(mockQdrantClientInstance.query).toHaveBeenCalledWith(expectedCollectionName, { + const callArgs4 = mockQdrantClientInstance.query.mock.calls[0][1] + expect(callArgs4).toMatchObject({ query: queryVector, - filter: undefined, score_threshold: DEFAULT_SEARCH_MIN_SCORE, limit: customMaxResults, params: { @@ -1374,6 +1388,9 @@ describe("QdrantVectorStore", () => { include: ["filePath", "codeChunk", "startLine", "endLine", "pathSegments"], }, }) + expect(callArgs4.filter).toEqual({ + must_not: [{ key: "type", match: { value: "metadata" } }], + }) }) it("should filter out results with invalid payloads", async () => { @@ -1489,28 +1506,9 @@ describe("QdrantVectorStore", () => { await vectorStore.search(queryVector, directoryPrefix) - expect(mockQdrantClientInstance.query).toHaveBeenCalledWith(expectedCollectionName, { + const callArgs5 = mockQdrantClientInstance.query.mock.calls[0][1] + expect(callArgs5).toMatchObject({ query: queryVector, - filter: { - must: [ - { - key: "pathSegments.0", - match: { value: "src" }, - }, - { - key: "pathSegments.1", - match: { value: "components" }, - }, - { - key: "pathSegments.2", - match: { value: "ui" }, - }, - { - key: "pathSegments.3", - match: { value: "forms" }, - }, - ], - }, score_threshold: DEFAULT_SEARCH_MIN_SCORE, limit: DEFAULT_MAX_SEARCH_RESULTS, params: { @@ -1521,6 +1519,15 @@ describe("QdrantVectorStore", () => { include: ["filePath", "codeChunk", "startLine", "endLine", "pathSegments"], }, }) + expect(callArgs5.filter).toEqual({ + must: [ + { key: "pathSegments.0", match: { value: "src" } }, + { key: "pathSegments.1", match: { value: "components" } }, + { key: "pathSegments.2", match: { value: "ui" } }, + { key: "pathSegments.3", match: { value: "forms" } }, + ], + must_not: [{ key: "type", match: { value: "metadata" } }], + }) }) it("should handle error scenarios when qdrantClient.query fails", async () => { @@ -1573,9 +1580,9 @@ describe("QdrantVectorStore", () => { const results = await vectorStore.search(queryVector, directoryPrefix) - expect(mockQdrantClientInstance.query).toHaveBeenCalledWith(expectedCollectionName, { + const callArgs7 = mockQdrantClientInstance.query.mock.calls[0][1] + expect(callArgs7).toMatchObject({ query: queryVector, - filter: undefined, // Should be undefined for current directory score_threshold: DEFAULT_SEARCH_MIN_SCORE, limit: DEFAULT_MAX_SEARCH_RESULTS, params: { @@ -1586,6 +1593,9 @@ describe("QdrantVectorStore", () => { include: ["filePath", "codeChunk", "startLine", "endLine", "pathSegments"], }, }) + expect(callArgs7.filter).toEqual({ + must_not: [{ key: "type", match: { value: "metadata" } }], + }) expect(results).toEqual(mockQdrantResults.points) }) @@ -1599,9 +1609,9 @@ describe("QdrantVectorStore", () => { await vectorStore.search(queryVector, directoryPrefix) - expect(mockQdrantClientInstance.query).toHaveBeenCalledWith(expectedCollectionName, { + const callArgs6 = mockQdrantClientInstance.query.mock.calls[0][1] + expect(callArgs6).toMatchObject({ query: queryVector, - filter: undefined, // Should be undefined for current directory score_threshold: DEFAULT_SEARCH_MIN_SCORE, limit: DEFAULT_MAX_SEARCH_RESULTS, params: { @@ -1612,6 +1622,9 @@ describe("QdrantVectorStore", () => { include: ["filePath", "codeChunk", "startLine", "endLine", "pathSegments"], }, }) + expect(callArgs6.filter).toEqual({ + must_not: [{ key: "type", match: { value: "metadata" } }], + }) }) it("should not apply filter when directoryPrefix is empty string", async () => { @@ -1623,9 +1636,9 @@ describe("QdrantVectorStore", () => { await vectorStore.search(queryVector, directoryPrefix) - expect(mockQdrantClientInstance.query).toHaveBeenCalledWith(expectedCollectionName, { + const callArgs8 = mockQdrantClientInstance.query.mock.calls[0][1] + expect(callArgs8).toMatchObject({ query: queryVector, - filter: undefined, // Should be undefined for empty string score_threshold: DEFAULT_SEARCH_MIN_SCORE, limit: DEFAULT_MAX_SEARCH_RESULTS, params: { @@ -1636,6 +1649,9 @@ describe("QdrantVectorStore", () => { include: ["filePath", "codeChunk", "startLine", "endLine", "pathSegments"], }, }) + expect(callArgs8.filter).toEqual({ + must_not: [{ key: "type", match: { value: "metadata" } }], + }) }) it("should not apply filter when directoryPrefix is '.\\' (Windows style)", async () => { @@ -1647,9 +1663,9 @@ describe("QdrantVectorStore", () => { await vectorStore.search(queryVector, directoryPrefix) - expect(mockQdrantClientInstance.query).toHaveBeenCalledWith(expectedCollectionName, { + const callArgs9 = mockQdrantClientInstance.query.mock.calls[0][1] + expect(callArgs9).toMatchObject({ query: queryVector, - filter: undefined, // Should be undefined for Windows current directory score_threshold: DEFAULT_SEARCH_MIN_SCORE, limit: DEFAULT_MAX_SEARCH_RESULTS, params: { @@ -1660,6 +1676,9 @@ describe("QdrantVectorStore", () => { include: ["filePath", "codeChunk", "startLine", "endLine", "pathSegments"], }, }) + expect(callArgs9.filter).toEqual({ + must_not: [{ key: "type", match: { value: "metadata" } }], + }) }) it("should not apply filter when directoryPrefix has trailing slashes", async () => { @@ -1671,9 +1690,9 @@ describe("QdrantVectorStore", () => { await vectorStore.search(queryVector, directoryPrefix) - expect(mockQdrantClientInstance.query).toHaveBeenCalledWith(expectedCollectionName, { + const callArgs10 = mockQdrantClientInstance.query.mock.calls[0][1] + expect(callArgs10).toMatchObject({ query: queryVector, - filter: undefined, // Should be undefined after normalizing trailing slashes score_threshold: DEFAULT_SEARCH_MIN_SCORE, limit: DEFAULT_MAX_SEARCH_RESULTS, params: { @@ -1684,6 +1703,9 @@ describe("QdrantVectorStore", () => { include: ["filePath", "codeChunk", "startLine", "endLine", "pathSegments"], }, }) + expect(callArgs10.filter).toEqual({ + must_not: [{ key: "type", match: { value: "metadata" } }], + }) }) it("should still apply filter for relative paths like './src'", async () => { @@ -1695,16 +1717,9 @@ describe("QdrantVectorStore", () => { await vectorStore.search(queryVector, directoryPrefix) - expect(mockQdrantClientInstance.query).toHaveBeenCalledWith(expectedCollectionName, { + const callArgs11 = mockQdrantClientInstance.query.mock.calls[0][1] + expect(callArgs11).toMatchObject({ query: queryVector, - filter: { - must: [ - { - key: "pathSegments.0", - match: { value: "src" }, - }, - ], - }, // Should normalize "./src" to "src" score_threshold: DEFAULT_SEARCH_MIN_SCORE, limit: DEFAULT_MAX_SEARCH_RESULTS, params: { @@ -1715,6 +1730,15 @@ describe("QdrantVectorStore", () => { include: ["filePath", "codeChunk", "startLine", "endLine", "pathSegments"], }, }) + expect(callArgs11.filter).toEqual({ + must: [ + { + key: "pathSegments.0", + match: { value: "src" }, + }, + ], + must_not: [{ key: "type", match: { value: "metadata" } }], + }) // Should normalize "./src" to "src" }) it("should still apply filter for regular directory paths", async () => { @@ -1726,16 +1750,9 @@ describe("QdrantVectorStore", () => { await vectorStore.search(queryVector, directoryPrefix) - expect(mockQdrantClientInstance.query).toHaveBeenCalledWith(expectedCollectionName, { + const callArgs12 = mockQdrantClientInstance.query.mock.calls[0][1] + expect(callArgs12).toMatchObject({ query: queryVector, - filter: { - must: [ - { - key: "pathSegments.0", - match: { value: "src" }, - }, - ], - }, // Should still create filter for regular paths score_threshold: DEFAULT_SEARCH_MIN_SCORE, limit: DEFAULT_MAX_SEARCH_RESULTS, params: { @@ -1746,6 +1763,15 @@ describe("QdrantVectorStore", () => { include: ["filePath", "codeChunk", "startLine", "endLine", "pathSegments"], }, }) + expect(callArgs12.filter).toEqual({ + must: [ + { + key: "pathSegments.0", + match: { value: "src" }, + }, + ], + must_not: [{ key: "type", match: { value: "metadata" } }], + }) // Should still create filter for regular paths }) }) }) diff --git a/src/services/code-index/vector-store/qdrant-client.ts b/src/services/code-index/vector-store/qdrant-client.ts index e561c71ee6b..4e7af609fed 100644 --- a/src/services/code-index/vector-store/qdrant-client.ts +++ b/src/services/code-index/vector-store/qdrant-client.ts @@ -1,10 +1,10 @@ import { QdrantClient, Schemas } from "@qdrant/js-client-rest" import { createHash } from "crypto" import * as path from "path" -import { getWorkspacePath } from "../../../utils/path" +import { v5 as uuidv5 } from "uuid" import { IVectorStore } from "../interfaces/vector-store" import { Payload, VectorStoreSearchResult } from "../interfaces" -import { DEFAULT_MAX_SEARCH_RESULTS, DEFAULT_SEARCH_MIN_SCORE } from "../constants" +import { DEFAULT_MAX_SEARCH_RESULTS, DEFAULT_SEARCH_MIN_SCORE, QDRANT_CODE_BLOCK_NAMESPACE } from "../constants" import { t } from "../../../i18n" /** @@ -296,6 +296,23 @@ export class QdrantVectorStore implements IVectorStore { * Creates payload indexes for the collection, handling errors gracefully. */ private async _createPayloadIndexes(): Promise { + // Create index for the 'type' field to enable metadata filtering + try { + await this.client.createPayloadIndex(this.collectionName, { + field_name: "type", + field_schema: "keyword", + }) + } catch (indexError: any) { + const errorMessage = (indexError?.message || "").toLowerCase() + if (!errorMessage.includes("already exists")) { + console.warn( + `[QdrantVectorStore] Could not create payload index for type on ${this.collectionName}. Details:`, + indexError?.message || indexError, + ) + } + } + + // Create indexes for pathSegments fields for (let i = 0; i <= 4; i++) { try { await this.client.createPayloadIndex(this.collectionName, { @@ -386,7 +403,12 @@ export class QdrantVectorStore implements IVectorStore { maxResults?: number, ): Promise { try { - let filter = undefined + let filter: + | { + must: Array<{ key: string; match: { value: string } }> + must_not?: Array<{ key: string; match: { value: string } }> + } + | undefined = undefined if (directoryPrefix) { // Check if the path represents current directory @@ -412,9 +434,18 @@ export class QdrantVectorStore implements IVectorStore { } } + // Always exclude metadata points at query-time to avoid wasting top-k + const metadataExclusion = { + must_not: [{ key: "type", match: { value: "metadata" } }], + } + + const mergedFilter = filter + ? { ...filter, must_not: [...(filter.must_not || []), ...metadataExclusion.must_not] } + : metadataExclusion + const searchRequest = { query: queryVector, - filter, + filter: mergedFilter, score_threshold: minScore ?? DEFAULT_SEARCH_MIN_SCORE, limit: maxResults ?? DEFAULT_MAX_SEARCH_RESULTS, params: { @@ -548,4 +579,106 @@ export class QdrantVectorStore implements IVectorStore { const collectionInfo = await this.getCollectionInfo() return collectionInfo !== null } + + /** + * Checks if the collection exists and has indexed points + * @returns Promise resolving to boolean indicating if the collection exists and has points + */ + async hasIndexedData(): Promise { + try { + const collectionInfo = await this.getCollectionInfo() + if (!collectionInfo) { + return false + } + // Check if the collection has any points indexed + const pointsCount = collectionInfo.points_count ?? 0 + if (pointsCount === 0) { + return false + } + + // Check if the indexing completion marker exists + // Use a deterministic UUID generated from a constant string + const metadataId = uuidv5("__indexing_metadata__", QDRANT_CODE_BLOCK_NAMESPACE) + const metadataPoints = await this.client.retrieve(this.collectionName, { + ids: [metadataId], + }) + + // If marker exists, use it to determine completion status + if (metadataPoints.length > 0) { + return metadataPoints[0].payload?.indexing_complete === true + } + + // Backward compatibility: No marker exists (old index or pre-marker version) + // Fall back to old logic - assume complete if collection has points + console.log( + "[QdrantVectorStore] No indexing metadata marker found. Using backward compatibility mode (checking points_count > 0).", + ) + return pointsCount > 0 + } catch (error) { + console.warn("[QdrantVectorStore] Failed to check if collection has data:", error) + return false + } + } + + /** + * Marks the indexing process as complete by storing metadata + * Should be called after a successful full workspace scan or incremental scan + */ + async markIndexingComplete(): Promise { + try { + // Create a metadata point with a deterministic UUID to mark indexing as complete + // Use uuidv5 to generate a consistent UUID from a constant string + const metadataId = uuidv5("__indexing_metadata__", QDRANT_CODE_BLOCK_NAMESPACE) + + await this.client.upsert(this.collectionName, { + points: [ + { + id: metadataId, + vector: new Array(this.vectorSize).fill(0), + payload: { + type: "metadata", + indexing_complete: true, + completed_at: Date.now(), + }, + }, + ], + wait: true, + }) + console.log("[QdrantVectorStore] Marked indexing as complete") + } catch (error) { + console.error("[QdrantVectorStore] Failed to mark indexing as complete:", error) + throw error + } + } + + /** + * Marks the indexing process as incomplete by storing metadata + * Should be called at the start of indexing to indicate work in progress + */ + async markIndexingIncomplete(): Promise { + try { + // Create a metadata point with a deterministic UUID to mark indexing as incomplete + // Use uuidv5 to generate a consistent UUID from a constant string + const metadataId = uuidv5("__indexing_metadata__", QDRANT_CODE_BLOCK_NAMESPACE) + + await this.client.upsert(this.collectionName, { + points: [ + { + id: metadataId, + vector: new Array(this.vectorSize).fill(0), + payload: { + type: "metadata", + indexing_complete: false, + started_at: Date.now(), + }, + }, + ], + wait: true, + }) + console.log("[QdrantVectorStore] Marked indexing as incomplete (in progress)") + } catch (error) { + console.error("[QdrantVectorStore] Failed to mark indexing as incomplete:", error) + throw error + } + } } diff --git a/src/services/mcp/McpHub.ts b/src/services/mcp/McpHub.ts index 741dc9efd2a..2bc504d7b25 100644 --- a/src/services/mcp/McpHub.ts +++ b/src/services/mcp/McpHub.ts @@ -33,6 +33,7 @@ import { fileExistsAtPath } from "../../utils/fs" import { arePathsEqual, getWorkspacePath } from "../../utils/path" import { injectVariables } from "../../utils/config" import { NotificationService } from "./kilocode/NotificationService" +import { safeWriteJson } from "../../utils/safeWriteJson" // Discriminated union for connection states export type ConnectedMcpConnection = { @@ -153,6 +154,8 @@ export class McpHub { readonly kiloNotificationService = new NotificationService() private refCount: number = 0 // Reference counter for active clients private configChangeDebounceTimers: Map = new Map() + private isProgrammaticUpdate: boolean = false + private flagResetTimer?: NodeJS.Timeout constructor(provider: ClineProvider) { this.providerRef = new WeakRef(provider) @@ -280,6 +283,11 @@ export class McpHub { * Debounced wrapper for handling config file changes */ private debounceConfigChange(filePath: string, source: "global" | "project"): void { + // Skip processing if this is a programmatic update to prevent unnecessary server restarts + if (this.isProgrammaticUpdate) { + return + } + const key = `${source}-${filePath}` // Clear existing timer if any @@ -1425,13 +1433,16 @@ export class McpHub { this.removeFileWatchersForServer(serverName) await this.deleteConnection(serverName, serverSource) // Re-add as a disabled connection - await this.connectToServer(serverName, JSON.parse(connection.server.config), serverSource) + // Re-read config from file to get updated disabled state + const updatedConfig = await this.readServerConfigFromFile(serverName, serverSource) + await this.connectToServer(serverName, updatedConfig, serverSource) } else if (!disabled && connection.server.status === "disconnected") { // If enabling a disabled server, connect it - const config = JSON.parse(connection.server.config) + // Re-read config from file to get updated disabled state + const updatedConfig = await this.readServerConfigFromFile(serverName, serverSource) await this.deleteConnection(serverName, serverSource) // When re-enabling, file watchers will be set up in connectToServer - await this.connectToServer(serverName, config, serverSource) + await this.connectToServer(serverName, updatedConfig, serverSource) } else if (connection.server.status === "connected") { // Only refresh capabilities if connected await this.fetchAvailableServerCapabilities(serverName, serverSource) // kilocode_change: logic moved into method @@ -1448,6 +1459,57 @@ export class McpHub { } } + /** + * Helper method to read a server's configuration from the appropriate settings file + * @param serverName The name of the server to read + * @param source Whether to read from the global or project config + * @returns The validated server configuration + */ + private async readServerConfigFromFile( + serverName: string, + source: "global" | "project" = "global", + ): Promise> { + // Determine which config file to read + let configPath: string + if (source === "project") { + const projectMcpPath = await this.getProjectMcpPath() + if (!projectMcpPath) { + throw new Error("Project MCP configuration file not found") + } + configPath = projectMcpPath + } else { + configPath = await this.getMcpSettingsFilePath() + } + + // Ensure the settings file exists and is accessible + try { + await fs.access(configPath) + } catch (error) { + console.error("Settings file not accessible:", error) + throw new Error("Settings file not accessible") + } + + // Read and parse the config file + const content = await fs.readFile(configPath, "utf-8") + const config = JSON.parse(content) + + // Validate the config structure + if (!config || typeof config !== "object") { + throw new Error("Invalid config structure") + } + + if (!config.mcpServers || typeof config.mcpServers !== "object") { + throw new Error("No mcpServers section in config") + } + + if (!config.mcpServers[serverName]) { + throw new Error(`Server ${serverName} not found in config`) + } + + // Validate and return the server config + return this.validateServerConfig(config.mcpServers[serverName], serverName) + } + /** * Helper method to update a server's configuration in the appropriate settings file * @param serverName The name of the server to update @@ -1514,7 +1576,20 @@ export class McpHub { mcpServers: config.mcpServers, } - await fs.writeFile(configPath, JSON.stringify(updatedConfig, null, 2)) + // Set flag to prevent file watcher from triggering server restart + if (this.flagResetTimer) { + clearTimeout(this.flagResetTimer) + } + this.isProgrammaticUpdate = true + try { + await safeWriteJson(configPath, updatedConfig) + } finally { + // Reset flag after watcher debounce period (non-blocking) + this.flagResetTimer = setTimeout(() => { + this.isProgrammaticUpdate = false + this.flagResetTimer = undefined + }, 600) + } } public async updateServerTimeout( @@ -1592,7 +1667,7 @@ export class McpHub { mcpServers: config.mcpServers, } - await fs.writeFile(configPath, JSON.stringify(updatedConfig, null, 2)) + await safeWriteJson(configPath, updatedConfig) // Update server connections with the correct source await this.updateServerConnections(config.mcpServers, serverSource) @@ -1737,7 +1812,20 @@ export class McpHub { targetList.splice(toolIndex, 1) } - await fs.writeFile(normalizedPath, JSON.stringify(config, null, 2)) + // Set flag to prevent file watcher from triggering server restart + if (this.flagResetTimer) { + clearTimeout(this.flagResetTimer) + } + this.isProgrammaticUpdate = true + try { + await safeWriteJson(normalizedPath, config) + } finally { + // Reset flag after watcher debounce period (non-blocking) + this.flagResetTimer = setTimeout(() => { + this.isProgrammaticUpdate = false + this.flagResetTimer = undefined + }, 600) + } if (connection) { connection.server.tools = await this.fetchToolsList(serverName, source) @@ -1847,6 +1935,13 @@ export class McpHub { } this.configChangeDebounceTimers.clear() + // Clear flag reset timer and reset programmatic update flag + if (this.flagResetTimer) { + clearTimeout(this.flagResetTimer) + this.flagResetTimer = undefined + } + this.isProgrammaticUpdate = false + this.removeAllFileWatchers() for (const connection of this.connections) { try { diff --git a/src/services/ripgrep/index.ts b/src/services/ripgrep/index.ts index d384b27c91c..5dd800ac6f7 100644 --- a/src/services/ripgrep/index.ts +++ b/src/services/ripgrep/index.ts @@ -150,7 +150,15 @@ export async function regexSearchFiles( throw new Error("Could not find ripgrep binary") } - const args = ["--json", "-e", regex, "--glob", filePattern || "*", "--context", "1", "--no-messages", directoryPath] + const args = ["--json", "-e", regex] + + // Only add --glob if a specific file pattern is provided + // Using --glob "*" overrides .gitignore behavior, so we omit it when no pattern is specified + if (filePattern) { + args.push("--glob", filePattern) + } + + args.push("--context", "1", "--no-messages", directoryPath) let output: string try { diff --git a/src/services/search/__tests__/file-search.spec.ts b/src/services/search/__tests__/file-search.spec.ts new file mode 100644 index 00000000000..94b140e888f --- /dev/null +++ b/src/services/search/__tests__/file-search.spec.ts @@ -0,0 +1,88 @@ +import { describe, it, expect, vi } from "vitest" +import * as vscode from "vscode" + +// Mock vscode +vi.mock("vscode", () => ({ + workspace: { + getConfiguration: vi.fn(), + }, + env: { + appRoot: "/mock/app/root", + }, +})) + +// Mock getBinPath +vi.mock("../ripgrep", () => ({ + getBinPath: vi.fn(async () => null), // Return null to skip actual ripgrep execution +})) + +// Mock child_process +vi.mock("child_process", () => ({ + spawn: vi.fn(), +})) + +describe("file-search", () => { + describe("configuration integration", () => { + it("should read VSCode search configuration settings", async () => { + const mockSearchConfig = { + get: vi.fn((key: string) => { + if (key === "useIgnoreFiles") return false + if (key === "useGlobalIgnoreFiles") return false + if (key === "useParentIgnoreFiles") return false + return undefined + }), + } + const mockRooConfig = { + get: vi.fn(() => 10000), + } + + ;(vscode.workspace.getConfiguration as any).mockImplementation((section: string) => { + if (section === "search") return mockSearchConfig + if (section === "roo-cline") return mockRooConfig + return { get: vi.fn() } + }) + + // Import the module - this will call getConfiguration during import + await import("../file-search") + + // Verify that configuration is accessible + expect(vscode.workspace.getConfiguration).toBeDefined() + }) + + it("should read maximumIndexedFilesForFileSearch configuration", async () => { + const mockRooConfig = { + get: vi.fn((key: string, defaultValue: number) => { + if (key === "maximumIndexedFilesForFileSearch") return 50000 + return defaultValue + }), + } + + ;(vscode.workspace.getConfiguration as any).mockImplementation((section: string) => { + if (section === "roo-cline") return mockRooConfig + return { get: vi.fn() } + }) + + // The configuration should be readable + const config = vscode.workspace.getConfiguration("roo-cline") + const limit = config.get("maximumIndexedFilesForFileSearch", 10000) + + expect(limit).toBe(50000) + }) + + it("should use default limit when configuration is not provided", () => { + const mockRooConfig = { + get: vi.fn((key: string, defaultValue: number) => defaultValue), + } + + ;(vscode.workspace.getConfiguration as any).mockImplementation((section: string) => { + if (section === "roo-cline") return mockRooConfig + return { get: vi.fn() } + }) + + const config = vscode.workspace.getConfiguration("roo-cline") + const limit = config.get("maximumIndexedFilesForFileSearch", 10000) + + expect(limit).toBe(10000) + }) + }) +}) diff --git a/src/services/search/file-search.ts b/src/services/search/file-search.ts index a25dd4068f9..02b82512a54 100644 --- a/src/services/search/file-search.ts +++ b/src/services/search/file-search.ts @@ -85,14 +85,44 @@ export async function executeRipgrep({ }) } +/** + * Get extra ripgrep arguments based on VSCode search configuration + */ +function getRipgrepSearchOptions(): string[] { + const config = vscode.workspace.getConfiguration("search") + const extraArgs: string[] = [] + + // Respect VSCode's search.useIgnoreFiles setting + if (config.get("useIgnoreFiles") === false) { + extraArgs.push("--no-ignore") + } + + // Respect VSCode's search.useGlobalIgnoreFiles setting + if (config.get("useGlobalIgnoreFiles") === false) { + extraArgs.push("--no-ignore-global") + } + + // Respect VSCode's search.useParentIgnoreFiles setting + if (config.get("useParentIgnoreFiles") === false) { + extraArgs.push("--no-ignore-parent") + } + + return extraArgs +} + export async function executeRipgrepForFiles( workspacePath: string, - limit: number = 5000, + limit?: number, ): Promise<{ path: string; type: "file" | "folder"; label?: string }[]> { + // Get limit from configuration if not provided + const effectiveLimit = + limit ?? vscode.workspace.getConfiguration("roo-cline").get("maximumIndexedFilesForFileSearch", 10000) + const args = [ "--files", "--follow", "--hidden", + ...getRipgrepSearchOptions(), "-g", "!**/node_modules/**", "-g", @@ -104,7 +134,7 @@ export async function executeRipgrepForFiles( workspacePath, ] - return executeRipgrep({ args, workspacePath, limit }) + return executeRipgrep({ args, workspacePath, limit: effectiveLimit }) } export async function searchWorkspaceFiles( @@ -113,8 +143,8 @@ export async function searchWorkspaceFiles( limit: number = 20, ): Promise<{ path: string; type: "file" | "folder"; label?: string }[]> { try { - // Get all files and directories (from our modified function) - const allItems = await executeRipgrepForFiles(workspacePath, 5000) + // Get all files and directories (uses configured limit) + const allItems = await executeRipgrepForFiles(workspacePath) // If no query, just return the top items if (!query.trim()) { diff --git a/src/shared/ExtensionMessage.ts b/src/shared/ExtensionMessage.ts index cc2b8573f76..1af6604a888 100644 --- a/src/shared/ExtensionMessage.ts +++ b/src/shared/ExtensionMessage.ts @@ -105,6 +105,7 @@ export interface ExtensionMessage { | "checkRulesDirectoryResult" | "deleteCustomModeCheck" | "currentCheckpointUpdated" + | "checkpointInitWarning" | "showHumanRelayDialog" | "humanRelayResponse" | "humanRelayCancel" @@ -170,6 +171,11 @@ export interface ExtensionMessage { | TasksByIdResponsePayload | TaskHistoryResponsePayload // kilocode_change end + // Checkpoint warning message + checkpointWarning?: { + type: "WAIT_TIMEOUT" | "INIT_TIMEOUT" + timeout: number + } action?: | "chatButtonClicked" | "mcpButtonClicked" @@ -379,6 +385,8 @@ export type ExtensionState = Pick< | "openRouterImageGenerationSelectedModel" | "includeTaskHistoryInEnhance" | "reasoningBlockCollapsed" + | "includeCurrentTime" + | "includeCurrentCost" > & { version: string clineMessages: ClineMessage[] @@ -400,6 +408,7 @@ export type ExtensionState = Pick< requestDelaySeconds: number enableCheckpoints: boolean + checkpointTimeout: number // Timeout for checkpoint initialization in seconds (default: 15) maxOpenTabsContext: number // Maximum number of VSCode open tabs to include in context (0-500) maxWorkspaceFiles: number // Maximum number of files to include in current working directory details (0-500) showRooIgnoredFiles: boolean // Whether to show .kilocodeignore'd files in listings @@ -474,7 +483,6 @@ export interface ClineSayTool { | "switchMode" | "newTask" | "finishTask" - | "searchAndReplace" | "insertContent" | "generateImage" | "imageGenerated" @@ -489,12 +497,6 @@ export interface ClineSayTool { isOutsideWorkspace?: boolean isProtected?: boolean additionalFileCount?: number // Number of additional files in the same read_file request - search?: string - replace?: string - useRegex?: boolean - ignoreCase?: boolean - startLine?: number - endLine?: number lineNumber?: number query?: string batchFiles?: Array<{ diff --git a/src/shared/WebviewMessage.ts b/src/shared/WebviewMessage.ts index 93c3c70f3fe..154b005e1fe 100644 --- a/src/shared/WebviewMessage.ts +++ b/src/shared/WebviewMessage.ts @@ -93,6 +93,7 @@ export interface WebviewMessage { | "requestOpenAiModels" | "requestOllamaModels" | "requestLmStudioModels" + | "requestRooModels" | "requestVsCodeLmModels" | "requestHuggingFaceModels" | "openImage" @@ -123,6 +124,7 @@ export interface WebviewMessage { | "soundVolume" | "diffEnabled" | "enableCheckpoints" + | "checkpointTimeout" | "browserViewportSize" | "screenshotQuality" | "remoteBrowserHost" @@ -208,6 +210,8 @@ export interface WebviewMessage { | "allowVeryLargeReads" // kilocode_change | "includeDiagnosticMessages" | "maxDiagnosticMessages" + | "includeCurrentTime" + | "includeCurrentCost" | "searchFiles" | "setHistoryPreviewCollapsed" | "showFeedbackOptions" // kilocode_change @@ -388,6 +392,7 @@ export interface WebviewMessage { | "gemini" | "mistral" | "vercel-ai-gateway" + | "openrouter" codebaseIndexEmbedderBaseUrl?: string codebaseIndexEmbedderModelId: string codebaseIndexEmbedderModelDimension?: number // Generic dimension for all providers @@ -402,6 +407,7 @@ export interface WebviewMessage { codebaseIndexGeminiApiKey?: string codebaseIndexMistralApiKey?: string codebaseIndexVercelAiGatewayApiKey?: string + codebaseIndexOpenRouterApiKey?: string } } @@ -475,10 +481,10 @@ export interface TaskHistoryResponsePayload { // kilocode_change end export const checkoutDiffPayloadSchema = z.object({ - ts: z.number(), + ts: z.number().optional(), previousCommitHash: z.string().optional(), commitHash: z.string(), - mode: z.enum(["full", "checkpoint"]), + mode: z.enum(["full", "checkpoint", "from-init", "to-current"]), }) export type CheckpointDiffPayload = z.infer diff --git a/src/shared/__tests__/modes.spec.ts b/src/shared/__tests__/modes.spec.ts index 1aeb468361d..e443a30bd7e 100644 --- a/src/shared/__tests__/modes.spec.ts +++ b/src/shared/__tests__/modes.spec.ts @@ -246,16 +246,7 @@ describe("isToolAllowedForMode", () => { expect(isToolAllowedForMode("use_mcp_tool", "architect", [])).toBe(true) }) - it("applies restrictions to all edit tools including search_and_replace and insert_content", () => { - // Test search_and_replace with matching file - expect( - isToolAllowedForMode("search_and_replace", "architect", [], undefined, { - path: "test.md", - search: "old text", - replace: "new text", - }), - ).toBe(true) - + it("applies restrictions to insert_content edit tool", () => { // Test insert_content with matching file expect( isToolAllowedForMode("insert_content", "architect", [], undefined, { @@ -265,22 +256,6 @@ describe("isToolAllowedForMode", () => { }), ).toBe(true) - // Test search_and_replace with non-matching file - should throw error - expect(() => - isToolAllowedForMode("search_and_replace", "architect", [], undefined, { - path: "test.py", - search: "old text", - replace: "new text", - }), - ).toThrow(FileRestrictionError) - expect(() => - isToolAllowedForMode("search_and_replace", "architect", [], undefined, { - path: "test.py", - search: "old text", - replace: "new text", - }), - ).toThrow(/Markdown files only/) - // Test insert_content with non-matching file - should throw error expect(() => isToolAllowedForMode("insert_content", "architect", [], undefined, { diff --git a/src/shared/api.ts b/src/shared/api.ts index f792847ca8f..279f72c7af0 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -159,7 +159,7 @@ const dynamicProviderExtras = { "vercel-ai-gateway": {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type huggingface: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type litellm: {} as { apiKey: string; baseUrl: string }, - "kilocode-openrouter": {} as { kilocodeToken?: string; kilocodeOrganizationId?: string }, // kilocode_change + kilocode: {} as { kilocodeToken?: string; kilocodeOrganizationId?: string }, // kilocode_change deepinfra: {} as { apiKey?: string; baseUrl?: string }, "io-intelligence": {} as { apiKey: string }, requesty: {} as { apiKey?: string; baseUrl?: string }, @@ -168,8 +168,9 @@ const dynamicProviderExtras = { ollama: {} as { numCtx?: number }, // kilocode_change lmstudio: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type ovhcloud: {} as { apiKey?: string }, // kilocode_change - chutes: {} as { apiKey?: string }, // kilocode_change inception: {} as { apiKey?: string; baseUrl?: string }, // kilocode_change + roo: {} as { apiKey?: string; baseUrl?: string }, + chutes: {} as { apiKey?: string }, } as const satisfies Record // Build the dynamic options union from the map, intersected with CommonFetchParams diff --git a/src/shared/cost.ts b/src/shared/cost.ts index a628756b0db..fea686d8aed 100644 --- a/src/shared/cost.ts +++ b/src/shared/cost.ts @@ -1,18 +1,31 @@ import type { ModelInfo } from "@roo-code/types" +export interface ApiCostResult { + totalInputTokens: number + totalOutputTokens: number + totalCost: number +} + function calculateApiCostInternal( modelInfo: ModelInfo, inputTokens: number, outputTokens: number, cacheCreationInputTokens: number, cacheReadInputTokens: number, -): number { + totalInputTokens: number, + totalOutputTokens: number, +): ApiCostResult { const cacheWritesCost = ((modelInfo.cacheWritesPrice || 0) / 1_000_000) * cacheCreationInputTokens const cacheReadsCost = ((modelInfo.cacheReadsPrice || 0) / 1_000_000) * cacheReadInputTokens const baseInputCost = ((modelInfo.inputPrice || 0) / 1_000_000) * inputTokens const outputCost = ((modelInfo.outputPrice || 0) / 1_000_000) * outputTokens const totalCost = cacheWritesCost + cacheReadsCost + baseInputCost + outputCost - return totalCost + + return { + totalInputTokens, + totalOutputTokens, + totalCost, + } } // For Anthropic compliant usage, the input tokens count does NOT include the @@ -23,13 +36,22 @@ export function calculateApiCostAnthropic( outputTokens: number, cacheCreationInputTokens?: number, cacheReadInputTokens?: number, -): number { +): ApiCostResult { + const cacheCreation = cacheCreationInputTokens || 0 + const cacheRead = cacheReadInputTokens || 0 + + // For Anthropic: inputTokens does NOT include cached tokens + // Total input = base input + cache creation + cache reads + const totalInputTokens = inputTokens + cacheCreation + cacheRead + return calculateApiCostInternal( modelInfo, inputTokens, outputTokens, - cacheCreationInputTokens || 0, - cacheReadInputTokens || 0, + cacheCreation, + cacheRead, + totalInputTokens, + outputTokens, ) } @@ -40,17 +62,21 @@ export function calculateApiCostOpenAI( outputTokens: number, cacheCreationInputTokens?: number, cacheReadInputTokens?: number, -): number { +): ApiCostResult { const cacheCreationInputTokensNum = cacheCreationInputTokens || 0 const cacheReadInputTokensNum = cacheReadInputTokens || 0 const nonCachedInputTokens = Math.max(0, inputTokens - cacheCreationInputTokensNum - cacheReadInputTokensNum) + // For OpenAI: inputTokens ALREADY includes all tokens (cached + non-cached) + // So we pass the original inputTokens as the total return calculateApiCostInternal( modelInfo, nonCachedInputTokens, outputTokens, cacheCreationInputTokensNum, cacheReadInputTokensNum, + inputTokens, + outputTokens, ) } diff --git a/src/shared/embeddingModels.ts b/src/shared/embeddingModels.ts index 80c51a6b455..8c2f8fd44c7 100644 --- a/src/shared/embeddingModels.ts +++ b/src/shared/embeddingModels.ts @@ -2,7 +2,14 @@ * Defines profiles for different embedding models, including their dimensions. */ -export type EmbedderProvider = "openai" | "ollama" | "openai-compatible" | "gemini" | "mistral" | "vercel-ai-gateway" // Add other providers as needed +export type EmbedderProvider = + | "openai" + | "ollama" + | "openai-compatible" + | "gemini" + | "mistral" + | "vercel-ai-gateway" + | "openrouter" // Add other providers as needed export interface EmbeddingModelProfile { dimension: number @@ -70,6 +77,19 @@ export const EMBEDDING_MODEL_PROFILES: EmbeddingModelProfiles = { "mistral/codestral-embed": { dimension: 1536, scoreThreshold: 0.4 }, "mistral/mistral-embed": { dimension: 1024, scoreThreshold: 0.4 }, }, + openrouter: { + // OpenAI models via OpenRouter + "openai/text-embedding-3-small": { dimension: 1536, scoreThreshold: 0.4 }, + "openai/text-embedding-3-large": { dimension: 3072, scoreThreshold: 0.4 }, + "openai/text-embedding-ada-002": { dimension: 1536, scoreThreshold: 0.4 }, + // Google models via OpenRouter + "google/gemini-embedding-001": { dimension: 3072, scoreThreshold: 0.4 }, + // Mistral models via OpenRouter + "mistralai/mistral-embed-2312": { dimension: 1024, scoreThreshold: 0.4 }, + "mistralai/codestral-embed-2505": { dimension: 3072, scoreThreshold: 0.4 }, + // Qwen models via OpenRouter + "qwen/qwen3-embedding-8b": { dimension: 4096, scoreThreshold: 0.4 }, + }, } /** @@ -163,6 +183,9 @@ export function getDefaultModelId(provider: EmbedderProvider): string { case "vercel-ai-gateway": return "openai/text-embedding-3-large" + case "openrouter": + return "openai/text-embedding-3-large" + default: // Fallback for unknown providers console.warn(`Unknown provider for default model ID: ${provider}. Falling back to OpenAI default.`) diff --git a/src/shared/getApiMetrics.ts b/src/shared/getApiMetrics.ts index 066d44c8081..93884309daf 100644 --- a/src/shared/getApiMetrics.ts +++ b/src/shared/getApiMetrics.ts @@ -94,15 +94,12 @@ export function getApiMetrics(messages: ClineMessage[]) { if (message.type === "say" && message.say === "api_req_started" && message.text) { try { const parsedText: ParsedApiReqStartedTextType = JSON.parse(message.text) - const { tokensIn, tokensOut, cacheWrites, cacheReads, apiProtocol } = parsedText - - // Calculate context tokens based on API protocol. - if (apiProtocol === "anthropic") { - result.contextTokens = (tokensIn || 0) + (tokensOut || 0) + (cacheWrites || 0) + (cacheReads || 0) - } else { - // For OpenAI (or when protocol is not specified). - result.contextTokens = (tokensIn || 0) + (tokensOut || 0) - } + const { tokensIn, tokensOut } = parsedText + + // Since tokensIn now stores TOTAL input tokens (including cache tokens), + // we no longer need to add cacheWrites and cacheReads separately. + // This applies to both Anthropic and OpenAI protocols. + result.contextTokens = (tokensIn || 0) + (tokensOut || 0) } catch (error) { console.error("Error parsing JSON:", error) continue diff --git a/src/shared/tools.ts b/src/shared/tools.ts index 8f994e8b251..bb3bc18dfc3 100644 --- a/src/shared/tools.ts +++ b/src/shared/tools.ts @@ -67,11 +67,10 @@ export const toolParamNames = [ "code_edit", "files", // kilocode_change end + "query", "args", "start_line", "end_line", - "query", - "args", "todos", "prompt", "image", @@ -179,12 +178,6 @@ export interface RunSlashCommandToolUse extends ToolUse { params: Partial, "command" | "args">> } -export interface SearchAndReplaceToolUse extends ToolUse { - name: "search_and_replace" - params: Required, "path" | "search" | "replace">> & - Partial, "use_regex" | "ignore_case" | "start_line" | "end_line">> -} - // kilocode_change start: Morph fast apply export interface EditFileToolUse extends ToolUse { name: "edit_file" @@ -221,7 +214,6 @@ export const TOOL_DISPLAY_NAMES: Record = { switch_mode: "switch modes", new_task: "create new task", insert_content: "insert content", - search_and_replace: "search and replace", new_rule: "create new rule", report_bug: "report bug", // kilocode_change condense: "condense the current context window", // kilocode_change @@ -249,7 +241,6 @@ export const TOOL_GROUPS: Record = { "edit_file", // kilocode_change: Morph fast apply "write_to_file", "insert_content", - "search_and_replace", "new_rule", // kilocode_change "generate_image", ], diff --git a/src/shared/utils/requesty.ts b/src/shared/utils/requesty.ts index b5d73e629f8..556a17cdb8d 100644 --- a/src/shared/utils/requesty.ts +++ b/src/shared/utils/requesty.ts @@ -1,4 +1,4 @@ -const REQUESTY_BASE_URL = "https://router.requesty.ai/v1" +export const REQUESTY_BASE_URL = "https://router.requesty.ai/v1" type URLType = "router" | "app" | "api" diff --git a/src/utils/__tests__/cost.spec.ts b/src/utils/__tests__/cost.spec.ts index 10ae279e48d..83d26871369 100644 --- a/src/utils/__tests__/cost.spec.ts +++ b/src/utils/__tests__/cost.spec.ts @@ -17,43 +17,51 @@ describe("Cost Utility", () => { } it("should calculate basic input/output costs correctly", () => { - const cost = calculateApiCostAnthropic(mockModelInfo, 1000, 500) + const result = calculateApiCostAnthropic(mockModelInfo, 1000, 500) // Input cost: (3.0 / 1_000_000) * 1000 = 0.003 // Output cost: (15.0 / 1_000_000) * 500 = 0.0075 // Total: 0.003 + 0.0075 = 0.0105 - expect(cost).toBe(0.0105) + expect(result.totalCost).toBe(0.0105) + expect(result.totalInputTokens).toBe(1000) + expect(result.totalOutputTokens).toBe(500) }) it("should handle cache writes cost", () => { - const cost = calculateApiCostAnthropic(mockModelInfo, 1000, 500, 2000) + const result = calculateApiCostAnthropic(mockModelInfo, 1000, 500, 2000) // Input cost: (3.0 / 1_000_000) * 1000 = 0.003 // Output cost: (15.0 / 1_000_000) * 500 = 0.0075 // Cache writes: (3.75 / 1_000_000) * 2000 = 0.0075 // Total: 0.003 + 0.0075 + 0.0075 = 0.018 - expect(cost).toBeCloseTo(0.018, 6) + expect(result.totalCost).toBeCloseTo(0.018, 6) + expect(result.totalInputTokens).toBe(3000) // 1000 + 2000 + expect(result.totalOutputTokens).toBe(500) }) it("should handle cache reads cost", () => { - const cost = calculateApiCostAnthropic(mockModelInfo, 1000, 500, undefined, 3000) + const result = calculateApiCostAnthropic(mockModelInfo, 1000, 500, undefined, 3000) // Input cost: (3.0 / 1_000_000) * 1000 = 0.003 // Output cost: (15.0 / 1_000_000) * 500 = 0.0075 // Cache reads: (0.3 / 1_000_000) * 3000 = 0.0009 // Total: 0.003 + 0.0075 + 0.0009 = 0.0114 - expect(cost).toBe(0.0114) + expect(result.totalCost).toBe(0.0114) + expect(result.totalInputTokens).toBe(4000) // 1000 + 3000 + expect(result.totalOutputTokens).toBe(500) }) it("should handle all cost components together", () => { - const cost = calculateApiCostAnthropic(mockModelInfo, 1000, 500, 2000, 3000) + const result = calculateApiCostAnthropic(mockModelInfo, 1000, 500, 2000, 3000) // Input cost: (3.0 / 1_000_000) * 1000 = 0.003 // Output cost: (15.0 / 1_000_000) * 500 = 0.0075 // Cache writes: (3.75 / 1_000_000) * 2000 = 0.0075 // Cache reads: (0.3 / 1_000_000) * 3000 = 0.0009 // Total: 0.003 + 0.0075 + 0.0075 + 0.0009 = 0.0189 - expect(cost).toBe(0.0189) + expect(result.totalCost).toBe(0.0189) + expect(result.totalInputTokens).toBe(6000) // 1000 + 2000 + 3000 + expect(result.totalOutputTokens).toBe(500) }) it("should handle missing prices gracefully", () => { @@ -63,22 +71,28 @@ describe("Cost Utility", () => { supportsPromptCache: true, } - const cost = calculateApiCostAnthropic(modelWithoutPrices, 1000, 500, 2000, 3000) - expect(cost).toBe(0) + const result = calculateApiCostAnthropic(modelWithoutPrices, 1000, 500, 2000, 3000) + expect(result.totalCost).toBe(0) + expect(result.totalInputTokens).toBe(6000) // 1000 + 2000 + 3000 + expect(result.totalOutputTokens).toBe(500) }) it("should handle zero tokens", () => { - const cost = calculateApiCostAnthropic(mockModelInfo, 0, 0, 0, 0) - expect(cost).toBe(0) + const result = calculateApiCostAnthropic(mockModelInfo, 0, 0, 0, 0) + expect(result.totalCost).toBe(0) + expect(result.totalInputTokens).toBe(0) + expect(result.totalOutputTokens).toBe(0) }) it("should handle undefined cache values", () => { - const cost = calculateApiCostAnthropic(mockModelInfo, 1000, 500) + const result = calculateApiCostAnthropic(mockModelInfo, 1000, 500) // Input cost: (3.0 / 1_000_000) * 1000 = 0.003 // Output cost: (15.0 / 1_000_000) * 500 = 0.0075 // Total: 0.003 + 0.0075 = 0.0105 - expect(cost).toBe(0.0105) + expect(result.totalCost).toBe(0.0105) + expect(result.totalInputTokens).toBe(1000) + expect(result.totalOutputTokens).toBe(500) }) it("should handle missing cache prices", () => { @@ -88,13 +102,15 @@ describe("Cost Utility", () => { cacheReadsPrice: undefined, } - const cost = calculateApiCostAnthropic(modelWithoutCachePrices, 1000, 500, 2000, 3000) + const result = calculateApiCostAnthropic(modelWithoutCachePrices, 1000, 500, 2000, 3000) // Should only include input and output costs // Input cost: (3.0 / 1_000_000) * 1000 = 0.003 // Output cost: (15.0 / 1_000_000) * 500 = 0.0075 // Total: 0.003 + 0.0075 = 0.0105 - expect(cost).toBe(0.0105) + expect(result.totalCost).toBe(0.0105) + expect(result.totalInputTokens).toBe(6000) // 1000 + 2000 + 3000 + expect(result.totalOutputTokens).toBe(500) }) }) @@ -110,43 +126,51 @@ describe("Cost Utility", () => { } it("should calculate basic input/output costs correctly", () => { - const cost = calculateApiCostOpenAI(mockModelInfo, 1000, 500) + const result = calculateApiCostOpenAI(mockModelInfo, 1000, 500) // Input cost: (3.0 / 1_000_000) * 1000 = 0.003 // Output cost: (15.0 / 1_000_000) * 500 = 0.0075 // Total: 0.003 + 0.0075 = 0.0105 - expect(cost).toBe(0.0105) + expect(result.totalCost).toBe(0.0105) + expect(result.totalInputTokens).toBe(1000) + expect(result.totalOutputTokens).toBe(500) }) it("should handle cache writes cost", () => { - const cost = calculateApiCostOpenAI(mockModelInfo, 3000, 500, 2000) + const result = calculateApiCostOpenAI(mockModelInfo, 3000, 500, 2000) // Input cost: (3.0 / 1_000_000) * (3000 - 2000) = 0.003 // Output cost: (15.0 / 1_000_000) * 500 = 0.0075 // Cache writes: (3.75 / 1_000_000) * 2000 = 0.0075 // Total: 0.003 + 0.0075 + 0.0075 = 0.018 - expect(cost).toBeCloseTo(0.018, 6) + expect(result.totalCost).toBeCloseTo(0.018, 6) + expect(result.totalInputTokens).toBe(3000) // Total already includes cache + expect(result.totalOutputTokens).toBe(500) }) it("should handle cache reads cost", () => { - const cost = calculateApiCostOpenAI(mockModelInfo, 4000, 500, undefined, 3000) + const result = calculateApiCostOpenAI(mockModelInfo, 4000, 500, undefined, 3000) // Input cost: (3.0 / 1_000_000) * (4000 - 3000) = 0.003 // Output cost: (15.0 / 1_000_000) * 500 = 0.0075 // Cache reads: (0.3 / 1_000_000) * 3000 = 0.0009 // Total: 0.003 + 0.0075 + 0.0009 = 0.0114 - expect(cost).toBe(0.0114) + expect(result.totalCost).toBe(0.0114) + expect(result.totalInputTokens).toBe(4000) // Total already includes cache + expect(result.totalOutputTokens).toBe(500) }) it("should handle all cost components together", () => { - const cost = calculateApiCostOpenAI(mockModelInfo, 6000, 500, 2000, 3000) + const result = calculateApiCostOpenAI(mockModelInfo, 6000, 500, 2000, 3000) // Input cost: (3.0 / 1_000_000) * (6000 - 2000 - 3000) = 0.003 // Output cost: (15.0 / 1_000_000) * 500 = 0.0075 // Cache writes: (3.75 / 1_000_000) * 2000 = 0.0075 // Cache reads: (0.3 / 1_000_000) * 3000 = 0.0009 // Total: 0.003 + 0.0075 + 0.0075 + 0.0009 = 0.0189 - expect(cost).toBe(0.0189) + expect(result.totalCost).toBe(0.0189) + expect(result.totalInputTokens).toBe(6000) // Total already includes cache + expect(result.totalOutputTokens).toBe(500) }) it("should handle missing prices gracefully", () => { @@ -156,22 +180,28 @@ describe("Cost Utility", () => { supportsPromptCache: true, } - const cost = calculateApiCostOpenAI(modelWithoutPrices, 1000, 500, 2000, 3000) - expect(cost).toBe(0) + const result = calculateApiCostOpenAI(modelWithoutPrices, 1000, 500, 2000, 3000) + expect(result.totalCost).toBe(0) + expect(result.totalInputTokens).toBe(1000) // Total already includes cache + expect(result.totalOutputTokens).toBe(500) }) it("should handle zero tokens", () => { - const cost = calculateApiCostOpenAI(mockModelInfo, 0, 0, 0, 0) - expect(cost).toBe(0) + const result = calculateApiCostOpenAI(mockModelInfo, 0, 0, 0, 0) + expect(result.totalCost).toBe(0) + expect(result.totalInputTokens).toBe(0) + expect(result.totalOutputTokens).toBe(0) }) it("should handle undefined cache values", () => { - const cost = calculateApiCostOpenAI(mockModelInfo, 1000, 500) + const result = calculateApiCostOpenAI(mockModelInfo, 1000, 500) // Input cost: (3.0 / 1_000_000) * 1000 = 0.003 // Output cost: (15.0 / 1_000_000) * 500 = 0.0075 // Total: 0.003 + 0.0075 = 0.0105 - expect(cost).toBe(0.0105) + expect(result.totalCost).toBe(0.0105) + expect(result.totalInputTokens).toBe(1000) + expect(result.totalOutputTokens).toBe(500) }) it("should handle missing cache prices", () => { @@ -181,13 +211,15 @@ describe("Cost Utility", () => { cacheReadsPrice: undefined, } - const cost = calculateApiCostOpenAI(modelWithoutCachePrices, 6000, 500, 2000, 3000) + const result = calculateApiCostOpenAI(modelWithoutCachePrices, 6000, 500, 2000, 3000) // Should only include input and output costs // Input cost: (3.0 / 1_000_000) * (6000 - 2000 - 3000) = 0.003 // Output cost: (15.0 / 1_000_000) * 500 = 0.0075 // Total: 0.003 + 0.0075 = 0.0105 - expect(cost).toBe(0.0105) + expect(result.totalCost).toBe(0.0105) + expect(result.totalInputTokens).toBe(6000) // Total already includes cache + expect(result.totalOutputTokens).toBe(500) }) }) }) diff --git a/src/utils/globalContext.ts b/src/utils/globalContext.ts index 882501850d5..1711c2a31e9 100644 --- a/src/utils/globalContext.ts +++ b/src/utils/globalContext.ts @@ -1,13 +1,7 @@ -import { mkdir } from "fs/promises" -import { join } from "path" import { ExtensionContext } from "vscode" - -export async function getGlobalFsPath(context: ExtensionContext): Promise { - return context.globalStorageUri.fsPath -} +import { getSettingsDirectoryPath } from "./storage" export async function ensureSettingsDirectoryExists(context: ExtensionContext): Promise { - const settingsDir = join(context.globalStorageUri.fsPath, "settings") - await mkdir(settingsDir, { recursive: true }) - return settingsDir + // getSettingsDirectoryPath already handles the custom storage path setting + return await getSettingsDirectoryPath(context.globalStorageUri.fsPath) } diff --git a/src/utils/migrateSettings.ts b/src/utils/migrateSettings.ts index 0ddd5536109..5d2625a7070 100644 --- a/src/utils/migrateSettings.ts +++ b/src/utils/migrateSettings.ts @@ -3,6 +3,7 @@ import * as path from "path" import * as fs from "fs/promises" import { fileExistsAtPath } from "./fs" import { GlobalFileNames } from "../shared/globalFileNames" +import { getSettingsDirectoryPath } from "./storage" import * as yaml from "yaml" const deprecatedCustomModesJSONFilename = "custom_modes.json" @@ -26,7 +27,7 @@ export async function migrateSettings( ] try { - const settingsDir = path.join(context.globalStorageUri.fsPath, "settings") + const settingsDir = await getSettingsDirectoryPath(context.globalStorageUri.fsPath) // Check if settings directory exists first if (!(await fileExistsAtPath(settingsDir))) { diff --git a/webview-ui/src/components/chat/Announcement.tsx b/webview-ui/src/components/chat/Announcement.tsx index 1e0a3578d9f..f1312e82430 100644 --- a/webview-ui/src/components/chat/Announcement.tsx +++ b/webview-ui/src/components/chat/Announcement.tsx @@ -4,7 +4,6 @@ import { VSCodeLink } from "@vscode/webview-ui-toolkit/react" import { Package } from "@roo/package" import { useAppTranslation } from "@src/i18n/TranslationContext" -import { useExtensionState } from "@src/context/ExtensionStateContext" import { vscode } from "@src/utils/vscode" import { Dialog, DialogContent, DialogHeader, DialogTitle } from "@src/components/ui" import { Button } from "@src/components/ui" @@ -25,7 +24,6 @@ interface AnnouncementProps { const Announcement = ({ hideAnnouncement }: AnnouncementProps) => { const { t } = useAppTranslation() const [open, setOpen] = useState(true) - const { cloudIsAuthenticated } = useExtensionState() return ( { {t("chat:announcement.title", { version: Package.version })}
    -
    - , - }} - /> + {/* Regular Release Highlights */} +
    +

    {t("chat:announcement.release.heading")}

    +
      +
    • {t("chat:announcement.release.openRouterEmbeddings")}
    • +
    • {t("chat:announcement.release.chutesDynamic")}
    • +
    • {t("chat:announcement.release.queuedMessagesFix")}
    • +
    -

    - {t("chat:announcement.stealthModel.note")} -

    + {/* Horizontal Rule */} +
    + + {/* Cloud Agents Section */} +
    +

    {t("chat:announcement.cloudAgents.heading")}

    -
    - {!cloudIsAuthenticated ? ( +
    + , + }} + /> +
    + +

    + {t("chat:announcement.cloudAgents.prFixerDescription")} +

    + +
    - ) : ( - <> -

    - , - }} - /> -

    - - - )} +
    @@ -103,6 +96,16 @@ const Announcement = ({ hideAnnouncement }: AnnouncementProps) => { }} />
    + + {/* Careers Section */} +
    + , + }} + /> +
    @@ -111,10 +114,10 @@ const Announcement = ({ hideAnnouncement }: AnnouncementProps) => { const XLink = () => ( { e.preventDefault() - vscode.postMessage({ type: "openExternal", url: "https://x.com/roo_code" }) + vscode.postMessage({ type: "openExternal", url: "https://x.com/roocode" }) }}> X @@ -142,5 +145,16 @@ const RedditLink = () => ( ) +const CareersLink = ({ children }: { children?: React.ReactNode }) => ( + { + e.preventDefault() + vscode.postMessage({ type: "openExternal", url: "https://careers.roocode.com" }) + }}> + {children} + +) + export default memo(Announcement) // kilocode_change: file unused, no need to touch anything diff --git a/webview-ui/src/components/chat/AutoApproveDropdown.tsx b/webview-ui/src/components/chat/AutoApproveDropdown.tsx index 08e4a90f6bb..1fbe8cc0db9 100644 --- a/webview-ui/src/components/chat/AutoApproveDropdown.tsx +++ b/webview-ui/src/components/chat/AutoApproveDropdown.tsx @@ -173,6 +173,7 @@ export const AutoApproveDropdown = ({ disabled = false, triggerClassName = "" }: "inline-flex items-center gap-1.5 relative whitespace-nowrap px-1.5 py-1 text-xs", "bg-transparent border border-[rgba(255,255,255,0.08)] rounded-md text-vscode-foreground", "transition-all duration-150 focus:outline-none focus-visible:ring-1 focus-visible:ring-vscode-focusBorder focus-visible:ring-inset", + "max-[300px]:shrink-0", disabled ? "opacity-50 cursor-not-allowed" : "opacity-90 hover:opacity-100 hover:bg-[rgba(255,255,255,0.03)] hover:border-[rgba(255,255,255,0.15)] cursor-pointer", @@ -184,13 +185,20 @@ export const AutoApproveDropdown = ({ disabled = false, triggerClassName = "" }: )} - + {!effectiveAutoApprovalEnabled ? t("chat:autoApprove.triggerLabelOff") : enabledCount === totalCount ? t("chat:autoApprove.triggerLabelAll") : t("chat:autoApprove.triggerLabel", { count: enabledCount })} + + {!effectiveAutoApprovalEnabled + ? t("chat:autoApprove.triggerLabelOffShort") + : enabledCount === totalCount + ? t("chat:autoApprove.triggerLabelAll") + : enabledCount} + void isFollowUpAnswered?: boolean editable?: boolean + hasCheckpoint?: boolean } // eslint-disable-next-line @typescript-eslint/no-empty-object-type @@ -494,39 +495,6 @@ export const ChatRowContent = ({
    ) - case "searchAndReplace": - return ( - <> -
    - {tool.isProtected ? ( - - ) : ( - toolIcon("replace") - )} - - {tool.isProtected && message.type === "ask" - ? t("chat:fileOperations.wantsToEditProtected") - : message.type === "ask" - ? t("chat:fileOperations.wantsToSearchReplace") - : t("chat:fileOperations.didSearchReplace")} - -
    -
    - -
    - - ) case "codebaseSearch": { return (
    diff --git a/webview-ui/src/components/chat/ChatView.tsx b/webview-ui/src/components/chat/ChatView.tsx index e1853061b85..8dec746302b 100644 --- a/webview-ui/src/components/chat/ChatView.tsx +++ b/webview-ui/src/components/chat/ChatView.tsx @@ -205,7 +205,9 @@ const ChatViewComponent: React.ForwardRefRenderFunction("") const [wasStreaming, setWasStreaming] = useState(false) - const [showCheckpointWarning, setShowCheckpointWarning] = useState(false) + const [checkpointWarning, setCheckpointWarning] = useState< + { type: "WAIT_TIMEOUT" | "INIT_TIMEOUT"; timeout: number } | undefined + >(undefined) const [isCondensing, setIsCondensing] = useState(false) const [showAnnouncementModal, setShowAnnouncementModal] = useState(false) const everVisibleMessagesTsRef = useRef>( @@ -630,7 +632,11 @@ const ChatViewComponent: React.ForwardRefRenderFunction 0) { - if (sendingDisabled) { + // Queue message if: + // - Task is busy (sendingDisabled) + // - API request in progress (isStreaming) + // - Queue has items (preserve message order during drain) + if (sendingDisabled || isStreaming || messageQueue.length > 0) { try { console.log("queueMessage", text, images) vscode.postMessage({ type: "queueMessage", text, images }) @@ -686,7 +692,7 @@ const ChatViewComponent: React.ForwardRefRenderFunction { - // Only show the warning when there's a task but no visible messages yet - if (task && modifiedMessages.length === 0 && !isStreaming && !isHidden) { - const timer = setTimeout(() => { - setShowCheckpointWarning(true) - }, 5000) // 5 seconds - - return () => clearTimeout(timer) - } else { - setShowCheckpointWarning(false) - } - }, [task, modifiedMessages.length, isStreaming, isHidden]) - - // Effect to hide the checkpoint warning when messages appear + // Effect to clear checkpoint warning when messages appear or task changes useEffect(() => { - if (modifiedMessages.length > 0 || isStreaming || isHidden) { - setShowCheckpointWarning(false) + if (isHidden || !task) { + setCheckpointWarning(undefined) } - }, [modifiedMessages.length, isStreaming, isHidden]) + }, [modifiedMessages.length, isStreaming, isHidden, task]) const placeholderText = task ? t("chat:typeMessage") : t("chat:typeTask") @@ -1627,6 +1618,7 @@ const ChatViewComponent: React.ForwardRefRenderFunction ) } + const hasCheckpoint = modifiedMessages.some((message) => message.say === "checkpoint_saved") // regular message return ( @@ -1663,6 +1655,7 @@ const ChatViewComponent: React.ForwardRefRenderFunction ) }, @@ -1963,9 +1956,9 @@ const ChatViewComponent: React.ForwardRefRenderFunction )} - {showCheckpointWarning && ( + {checkpointWarning && (
    - +
    )} diff --git a/webview-ui/src/components/chat/CheckpointWarning.tsx b/webview-ui/src/components/chat/CheckpointWarning.tsx index 1b6977d2817..81675f4993a 100644 --- a/webview-ui/src/components/chat/CheckpointWarning.tsx +++ b/webview-ui/src/components/chat/CheckpointWarning.tsx @@ -1,32 +1,45 @@ -import { Trans } from "react-i18next" import { VSCodeLink } from "@vscode/webview-ui-toolkit/react" +import { Trans } from "react-i18next" + +interface CheckpointWarningProps { + warning: { + type: "WAIT_TIMEOUT" | "INIT_TIMEOUT" + timeout: number + } +} + +export const CheckpointWarning = ({ warning }: CheckpointWarningProps) => { + const settingsLink = ( + { + e.preventDefault() + window.postMessage( + { + type: "action", + action: "settingsButtonClicked", + values: { section: "checkpoints" }, + }, + "*", + ) + }} + className="inline" + /> + ) + + // Map warning type to i18n key + const i18nKey = + warning.type === "WAIT_TIMEOUT" ? "errors.wait_checkpoint_long_time" : "errors.init_checkpoint_fail_long_time" -export const CheckpointWarning = () => { return (
    { - e.preventDefault() - window.postMessage( - { - type: "action", - action: "settingsButtonClicked", - values: { section: "checkpoints" }, - }, - "*", - ) - }} - className="inline px-0.5" - /> - ), - }} + i18nKey={i18nKey} + ns="common" + values={{ timeout: warning.timeout }} + components={{ settingsLink }} />
    diff --git a/webview-ui/src/components/chat/CodeIndexPopover.tsx b/webview-ui/src/components/chat/CodeIndexPopover.tsx index 62164b14608..5a289303b76 100644 --- a/webview-ui/src/components/chat/CodeIndexPopover.tsx +++ b/webview-ui/src/components/chat/CodeIndexPopover.tsx @@ -73,6 +73,7 @@ interface LocalCodeIndexSettings { codebaseIndexGeminiApiKey?: string codebaseIndexMistralApiKey?: string codebaseIndexVercelAiGatewayApiKey?: string + codebaseIndexOpenRouterApiKey?: string } // Validation schema for codebase index settings @@ -149,6 +150,16 @@ const createValidationSchema = (provider: EmbedderProvider, t: any) => { .min(1, t("settings:codeIndex.validation.modelSelectionRequired")), }) + case "openrouter": + return baseSchema.extend({ + codebaseIndexOpenRouterApiKey: z + .string() + .min(1, t("settings:codeIndex.validation.openRouterApiKeyRequired")), + codebaseIndexEmbedderModelId: z + .string() + .min(1, t("settings:codeIndex.validation.modelSelectionRequired")), + }) + default: return baseSchema } @@ -194,6 +205,7 @@ export const CodeIndexPopover: React.FC = ({ codebaseIndexGeminiApiKey: "", codebaseIndexMistralApiKey: "", codebaseIndexVercelAiGatewayApiKey: "", + codebaseIndexOpenRouterApiKey: "", }) // Initial settings state - stores the settings when popover opens @@ -229,6 +241,7 @@ export const CodeIndexPopover: React.FC = ({ codebaseIndexGeminiApiKey: "", codebaseIndexMistralApiKey: "", codebaseIndexVercelAiGatewayApiKey: "", + codebaseIndexOpenRouterApiKey: "", } setInitialSettings(settings) setCurrentSettings(settings) @@ -345,6 +358,14 @@ export const CodeIndexPopover: React.FC = ({ ? SECRET_PLACEHOLDER : "" } + if ( + !prev.codebaseIndexOpenRouterApiKey || + prev.codebaseIndexOpenRouterApiKey === SECRET_PLACEHOLDER + ) { + updated.codebaseIndexOpenRouterApiKey = secretStatus.hasOpenRouterApiKey + ? SECRET_PLACEHOLDER + : "" + } return updated } @@ -418,7 +439,8 @@ export const CodeIndexPopover: React.FC = ({ key === "codebaseIndexOpenAiCompatibleApiKey" || key === "codebaseIndexGeminiApiKey" || key === "codebaseIndexMistralApiKey" || - key === "codebaseIndexVercelAiGatewayApiKey" + key === "codebaseIndexVercelAiGatewayApiKey" || + key === "codebaseIndexOpenRouterApiKey" ) { dataToValidate[key] = "placeholder-valid" } @@ -680,6 +702,9 @@ export const CodeIndexPopover: React.FC = ({ {t("settings:codeIndex.vercelAiGatewayProvider")} + + {t("settings:codeIndex.openRouterProvider")} +
    @@ -1142,6 +1167,71 @@ export const CodeIndexPopover: React.FC = ({ )} + {currentSettings.codebaseIndexEmbedderProvider === "openrouter" && ( + <> +
    + + + updateSetting("codebaseIndexOpenRouterApiKey", e.target.value) + } + placeholder={t("settings:codeIndex.openRouterApiKeyPlaceholder")} + className={cn("w-full", { + "border-red-500": formErrors.codebaseIndexOpenRouterApiKey, + })} + /> + {formErrors.codebaseIndexOpenRouterApiKey && ( +

    + {formErrors.codebaseIndexOpenRouterApiKey} +

    + )} +
    + +
    + + + updateSetting("codebaseIndexEmbedderModelId", e.target.value) + } + className={cn("w-full", { + "border-red-500": formErrors.codebaseIndexEmbedderModelId, + })}> + + {t("settings:codeIndex.selectModel")} + + {getAvailableModels().map((modelId) => { + const model = + codebaseIndexModels?.[ + currentSettings.codebaseIndexEmbedderProvider + ]?.[modelId] + return ( + + {modelId}{" "} + {model + ? t("settings:codeIndex.modelDimensions", { + dimension: model.dimension, + }) + : ""} + + ) + })} + + {formErrors.codebaseIndexEmbedderModelId && ( +

    + {formErrors.codebaseIndexEmbedderModelId} +

    + )} +
    + + )} + {/* Qdrant Settings */}
    + + {enableCheckpoints && ( +
    + +
    + { + setCachedStateField("checkpointTimeout", value) + }} + className="flex-1" + data-testid="checkpoint-timeout-slider" + /> + + {checkpointTimeout ?? DEFAULT_CHECKPOINT_TIMEOUT_SECONDS} + +
    +
    + {t("settings:checkpoints.timeout.description")} +
    +
    + )} {/* kilocode_change start - Auto-Purge Settings Section */} diff --git a/webview-ui/src/components/settings/ContextManagementSettings.tsx b/webview-ui/src/components/settings/ContextManagementSettings.tsx index fdf4f0de53c..c24edcfc24e 100644 --- a/webview-ui/src/components/settings/ContextManagementSettings.tsx +++ b/webview-ui/src/components/settings/ContextManagementSettings.tsx @@ -28,6 +28,8 @@ type ContextManagementSettingsProps = HTMLAttributes & { includeDiagnosticMessages?: boolean maxDiagnosticMessages?: number writeDelayMs: number + includeCurrentTime?: boolean + includeCurrentCost?: boolean setCachedStateField: SetCachedStateField< | "autoCondenseContext" | "autoCondenseContextPercent" @@ -43,6 +45,8 @@ type ContextManagementSettingsProps = HTMLAttributes & { | "includeDiagnosticMessages" | "maxDiagnosticMessages" | "writeDelayMs" + | "includeCurrentTime" + | "includeCurrentCost" > } @@ -63,6 +67,8 @@ export const ContextManagementSettings = ({ includeDiagnosticMessages, maxDiagnosticMessages, writeDelayMs, + includeCurrentTime, + includeCurrentCost, className, ...props }: ContextManagementSettingsProps) => { @@ -373,6 +379,34 @@ export const ContextManagementSettings = ({ {t("settings:contextManagement.diagnostics.delayAfterWrite.description")}
    + +
    + setCachedStateField("includeCurrentTime", e.target.checked)} + data-testid="include-current-time-checkbox"> + + +
    + {t("settings:contextManagement.includeCurrentTime.description")} +
    +
    + +
    + setCachedStateField("includeCurrentCost", e.target.checked)} + data-testid="include-current-cost-checkbox"> + + +
    + {t("settings:contextManagement.includeCurrentCost.description")} +
    +
    , - /* kilocode_change: supports computer use => supports browser - ,*/ interface ModelPickerProps { diff --git a/webview-ui/src/components/settings/SettingsView.tsx b/webview-ui/src/components/settings/SettingsView.tsx index 8c3c6a86116..a2accd2c6bd 100644 --- a/webview-ui/src/components/settings/SettingsView.tsx +++ b/webview-ui/src/components/settings/SettingsView.tsx @@ -179,6 +179,7 @@ const SettingsView = forwardRef(({ onDone, t browserToolEnabled, browserViewportSize, enableCheckpoints, + checkpointTimeout, diffEnabled, experiments, morphApiKey, // kilocode_change @@ -245,6 +246,8 @@ const SettingsView = forwardRef(({ onDone, t kiloCodeImageApiKey, openRouterImageGenerationSelectedModel, reasoningBlockCollapsed, + includeCurrentTime, + includeCurrentCost, } = cachedState const apiConfiguration = useMemo(() => cachedState.apiConfiguration ?? {}, [cachedState.apiConfiguration]) @@ -456,6 +459,7 @@ const SettingsView = forwardRef(({ onDone, t vscode.postMessage({ type: "soundVolume", value: soundVolume }) vscode.postMessage({ type: "diffEnabled", bool: diffEnabled }) vscode.postMessage({ type: "enableCheckpoints", bool: enableCheckpoints }) + vscode.postMessage({ type: "checkpointTimeout", value: checkpointTimeout }) vscode.postMessage({ type: "browserViewportSize", text: browserViewportSize }) vscode.postMessage({ type: "remoteBrowserHost", text: remoteBrowserHost }) vscode.postMessage({ type: "remoteBrowserEnabled", bool: remoteBrowserEnabled }) @@ -505,6 +509,8 @@ const SettingsView = forwardRef(({ onDone, t vscode.postMessage({ type: "updateSupportPrompt", values: customSupportPrompts || {} }) vscode.postMessage({ type: "includeTaskHistoryInEnhance", bool: includeTaskHistoryInEnhance ?? true }) vscode.postMessage({ type: "setReasoningBlockCollapsed", bool: reasoningBlockCollapsed ?? true }) + vscode.postMessage({ type: "includeCurrentTime", bool: includeCurrentTime ?? true }) + vscode.postMessage({ type: "includeCurrentCost", bool: includeCurrentCost ?? true }) vscode.postMessage({ type: "upsertApiConfiguration", text: currentApiConfigName, apiConfiguration }) vscode.postMessage({ type: "telemetrySetting", text: telemetrySetting }) vscode.postMessage({ type: "profileThresholds", values: profileThresholds }) @@ -893,6 +899,7 @@ const SettingsView = forwardRef(({ onDone, t {activeTab === "checkpoints" && ( (({ onDone, t includeDiagnosticMessages={includeDiagnosticMessages} maxDiagnosticMessages={maxDiagnosticMessages} writeDelayMs={writeDelayMs} + includeCurrentTime={includeCurrentTime} + includeCurrentCost={includeCurrentCost} setCachedStateField={setCachedStateField} /> )} diff --git a/webview-ui/src/components/settings/SimpleThinkingBudget.tsx b/webview-ui/src/components/settings/SimpleThinkingBudget.tsx new file mode 100644 index 00000000000..60b163738dd --- /dev/null +++ b/webview-ui/src/components/settings/SimpleThinkingBudget.tsx @@ -0,0 +1,120 @@ +import { useEffect } from "react" + +import { type ProviderSettings, type ModelInfo, type ReasoningEffort, reasoningEfforts } from "@roo-code/types" + +import { useAppTranslation } from "@src/i18n/TranslationContext" +import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@src/components/ui" + +interface SimpleThinkingBudgetProps { + apiConfiguration: ProviderSettings + setApiConfigurationField: ( + field: K, + value: ProviderSettings[K], + isUserAction?: boolean, + ) => void + modelInfo?: ModelInfo +} + +// Extended type to include "none" option +type ReasoningEffortWithNone = ReasoningEffort | "none" + +export const SimpleThinkingBudget = ({ + apiConfiguration, + setApiConfigurationField, + modelInfo, +}: SimpleThinkingBudgetProps) => { + const { t } = useAppTranslation() + + // Check model capabilities + const isReasoningEffortSupported = !!modelInfo && modelInfo.supportsReasoningEffort + const isReasoningEffortRequired = !!modelInfo && modelInfo.requiredReasoningEffort + + // Build available reasoning efforts list + // Include "none" option unless reasoning effort is required + const baseEfforts = [...reasoningEfforts] as ReasoningEffort[] + const availableReasoningEfforts: ReadonlyArray = isReasoningEffortRequired + ? baseEfforts + : (["none", ...baseEfforts] as ReasoningEffortWithNone[]) + + // Default reasoning effort - use model's default if available, otherwise "medium" + const modelDefaultReasoningEffort = modelInfo?.reasoningEffort as ReasoningEffort | undefined + const defaultReasoningEffort: ReasoningEffortWithNone = isReasoningEffortRequired + ? modelDefaultReasoningEffort || "medium" + : "none" + + // Current reasoning effort - treat undefined/null as "none" + const currentReasoningEffort: ReasoningEffortWithNone = + (apiConfiguration.reasoningEffort as ReasoningEffort | undefined) || defaultReasoningEffort + + // Set default reasoning effort when model supports it and no value is set + useEffect(() => { + if (isReasoningEffortSupported && !apiConfiguration.reasoningEffort) { + // Only set a default if reasoning is required, otherwise leave as undefined (which maps to "none") + if (isReasoningEffortRequired && defaultReasoningEffort !== "none") { + setApiConfigurationField("reasoningEffort", defaultReasoningEffort as ReasoningEffort, false) + } + } + }, [ + isReasoningEffortSupported, + isReasoningEffortRequired, + apiConfiguration.reasoningEffort, + defaultReasoningEffort, + setApiConfigurationField, + ]) + + useEffect(() => { + if (!isReasoningEffortSupported) return + const shouldEnable = isReasoningEffortRequired || currentReasoningEffort !== "none" + if (shouldEnable && apiConfiguration.enableReasoningEffort !== true) { + setApiConfigurationField("enableReasoningEffort", true, false) + } + }, [ + isReasoningEffortSupported, + isReasoningEffortRequired, + currentReasoningEffort, + apiConfiguration.enableReasoningEffort, + setApiConfigurationField, + ]) + if (!modelInfo || !isReasoningEffortSupported) { + return null + } + + return ( +
    +
    + +
    + +
    + ) +} diff --git a/webview-ui/src/components/settings/TerminalSettings.tsx b/webview-ui/src/components/settings/TerminalSettings.tsx index b37dfadff5e..51089e0c903 100644 --- a/webview-ui/src/components/settings/TerminalSettings.tsx +++ b/webview-ui/src/components/settings/TerminalSettings.tsx @@ -232,7 +232,7 @@ export const TerminalSettings = ({
    setCachedStateField("terminalShellIntegrationDisabled", e.target.checked) }> @@ -244,7 +244,7 @@ export const TerminalSettings = ({ @@ -256,6 +256,34 @@ export const TerminalSettings = ({ {!terminalShellIntegrationDisabled && ( <> +
    + { + setInheritEnv(e.target.checked) + vscode.postMessage({ + type: "updateVSCodeSetting", + setting: "terminal.integrated.inheritEnv", + value: e.target.checked, + }) + }} + data-testid="terminal-inherit-env-checkbox"> + {t("settings:terminal.inheritEnv.label")} + +
    + + + {" "} + + +
    +
    +
    - {/* kilocode_change: supports computer use => supports browser -
    -
    - { - return { - ...(apiConfiguration?.openAiCustomModelInfo || openAiModelInfoSaneDefaults), - supportsComputerUse: checked, - } - })}> - {t("settings:providers.customModel.computerUse.label")} - - - - -
    -
    - {t("settings:providers.customModel.computerUse.description")} -
    -
    - */} -
    {t("settings:providers.apiKeyStorageNotice")}
    - {!apiConfiguration?.requestyApiKey && ( -
    - {t("settings:providers.getRequestyApiKey")} - - )} + + {t("settings:providers.getRequestyApiKey")} + void + routerModels?: RouterModels + cloudIsAuthenticated: boolean + organizationAllowList: OrganizationAllowList + modelValidationError?: string +} + +export const Roo = ({ + apiConfiguration, + setApiConfigurationField, + routerModels, + cloudIsAuthenticated, + organizationAllowList, + modelValidationError, +}: RooProps) => { + const { t } = useAppTranslation() + + return ( + <> + {cloudIsAuthenticated ? ( +
    + {t("settings:providers.roo.authenticatedMessage")} +
    + ) : ( +
    + vscode.postMessage({ type: "rooCloudSignIn" })} + className="w-fit"> + {t("settings:providers.roo.connectButton")} + +
    + )} + + + ) +} diff --git a/webview-ui/src/components/settings/providers/index.ts b/webview-ui/src/components/settings/providers/index.ts index 63badb5b523..1e49a43ae27 100644 --- a/webview-ui/src/components/settings/providers/index.ts +++ b/webview-ui/src/components/settings/providers/index.ts @@ -18,6 +18,7 @@ export { OpenAI } from "./OpenAI" export { OpenAICompatible } from "./OpenAICompatible" export { OpenRouter } from "./OpenRouter" export { QwenCode } from "./QwenCode" +export { Roo } from "./Roo" export { Requesty } from "./Requesty" export { SambaNova } from "./SambaNova" export { Unbound } from "./Unbound" @@ -37,3 +38,4 @@ export { Fireworks } from "./Fireworks" export { Featherless } from "./Featherless" export { VercelAiGateway } from "./VercelAiGateway" export { DeepInfra } from "./DeepInfra" +export { MiniMax } from "./MiniMax" diff --git a/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts b/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts index cc608d50296..45ba7422700 100644 --- a/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts +++ b/webview-ui/src/components/ui/hooks/__tests__/useSelectedModel.spec.ts @@ -151,7 +151,6 @@ describe("useSelectedModel", () => { contextWindow: 8192, supportsImages: false, supportsPromptCache: false, - supportsComputerUse: true, cacheWritesPrice: 0.1, cacheReadsPrice: 0.01, } @@ -199,7 +198,6 @@ describe("useSelectedModel", () => { // Fields from base model that provider doesn't have contextWindow: 8192, // From base (provider doesn't override) supportsPromptCache: false, // From base (provider doesn't override) - supportsComputerUse: true, // From base (provider doesn't have) cacheWritesPrice: 0.1, // From base (provider doesn't have) cacheReadsPrice: 0.01, // From base (provider doesn't have) @@ -262,7 +260,6 @@ describe("useSelectedModel", () => { maxTokens: 8192, contextWindow: 200_000, supportsImages: true, - supportsComputerUse: true, supportsPromptCache: true, inputPrice: 3.0, outputPrice: 15.0, @@ -301,7 +298,7 @@ describe("useSelectedModel", () => { }) describe("loading and error states", () => { - it("should return loading state when router models are loading", () => { + it("should NOT set loading when router models are loading but provider is static (anthropic)", () => { mockUseRouterModels.mockReturnValue({ data: undefined, isLoading: true, @@ -317,10 +314,11 @@ describe("useSelectedModel", () => { const wrapper = createWrapper() const { result } = renderHook(() => useSelectedModel(), { wrapper }) - expect(result.current.isLoading).toBe(true) + // With static provider default (anthropic), useSelectedModel gates router fetches, so loading should be false + expect(result.current.isLoading).toBe(false) }) - it("should return loading state when open router model providers are loading", () => { + it("should NOT set loading when openrouter provider metadata is loading but provider is static (anthropic)", () => { mockUseRouterModels.mockReturnValue({ data: { openrouter: {}, requesty: {}, glama: {}, unbound: {}, litellm: {}, "io-intelligence": {} }, isLoading: false, @@ -336,10 +334,11 @@ describe("useSelectedModel", () => { const wrapper = createWrapper() const { result } = renderHook(() => useSelectedModel(), { wrapper }) - expect(result.current.isLoading).toBe(true) + // With static provider default (anthropic), openrouter providers are irrelevant, so loading should be false + expect(result.current.isLoading).toBe(false) }) - it("should return error state when either hook has an error", () => { + it("should NOT set error when hooks error but provider is static (anthropic)", () => { mockUseRouterModels.mockReturnValue({ data: undefined, isLoading: false, @@ -355,7 +354,8 @@ describe("useSelectedModel", () => { const wrapper = createWrapper() const { result } = renderHook(() => useSelectedModel(), { wrapper }) - expect(result.current.isError).toBe(true) + // Error from gated routerModels should not bubble for static provider default + expect(result.current.isError).toBe(false) }) }) @@ -419,7 +419,6 @@ describe("useSelectedModel", () => { // Verify it inherits other properties from anthropic models expect(result.current.info?.maxTokens).toBe(64_000) expect(result.current.info?.contextWindow).toBe(200_000) - expect(result.current.info?.supportsComputerUse).toBe(true) }) it("should use default claude-code model when no modelId is specified", () => { diff --git a/webview-ui/src/components/ui/hooks/useOpenRouterModelProviders.ts b/webview-ui/src/components/ui/hooks/useOpenRouterModelProviders.ts index 9e21d268f5f..ee998386833 100644 --- a/webview-ui/src/components/ui/hooks/useOpenRouterModelProviders.ts +++ b/webview-ui/src/components/ui/hooks/useOpenRouterModelProviders.ts @@ -91,33 +91,6 @@ async function getOpenRouterProvidersForModel(modelId: string, baseUrl?: string, label: providerName, } - // TODO: This is wrong. We need to fetch the model info from - // OpenRouter instead of hardcoding it here. The endpoints payload - // doesn't include this unfortunately, so we need to get it from the - // main models endpoint. - switch (true) { - case modelId.startsWith("anthropic/claude-3.7-sonnet"): - modelInfo.supportsComputerUse = true - modelInfo.supportsPromptCache = true - modelInfo.cacheWritesPrice = 3.75 - modelInfo.cacheReadsPrice = 0.3 - modelInfo.maxTokens = modelId === "anthropic/claude-3.7-sonnet:thinking" ? 64_000 : 8192 - break - case modelId.startsWith("anthropic/claude-3.5-sonnet-20240620"): - modelInfo.supportsPromptCache = true - modelInfo.cacheWritesPrice = 3.75 - modelInfo.cacheReadsPrice = 0.3 - modelInfo.maxTokens = 8192 - break - // kilocode_change start - //default: - // modelInfo.supportsPromptCache = true - // modelInfo.cacheWritesPrice = 0.3 - // modelInfo.cacheReadsPrice = 0.03 - // break - // kilocode_change end - } - models[providerName] = modelInfo } } catch (error) { diff --git a/webview-ui/src/components/ui/hooks/useRouterModels.ts b/webview-ui/src/components/ui/hooks/useRouterModels.ts index 4d069dfeede..e800c08f21d 100644 --- a/webview-ui/src/components/ui/hooks/useRouterModels.ts +++ b/webview-ui/src/components/ui/hooks/useRouterModels.ts @@ -5,7 +5,12 @@ import { ExtensionMessage } from "@roo/ExtensionMessage" import { vscode } from "@src/utils/vscode" -const getRouterModels = async () => +type UseRouterModelsOptions = { + provider?: string // single provider filter (e.g. "roo") + enabled?: boolean // gate fetching entirely +} + +const getRouterModels = async (provider?: string) => new Promise((resolve, reject) => { const cleanup = () => { window.removeEventListener("message", handler) @@ -20,6 +25,14 @@ const getRouterModels = async () => const message: ExtensionMessage = event.data if (message.type === "routerModels") { + const msgProvider = message?.values?.provider as string | undefined + + // Verify response matches request + if (provider !== msgProvider) { + // Not our response; ignore and wait for the matching one + return + } + clearTimeout(timeout) cleanup() @@ -32,7 +45,11 @@ const getRouterModels = async () => } window.addEventListener("message", handler) - vscode.postMessage({ type: "requestRouterModels" }) + if (provider) { + vscode.postMessage({ type: "requestRouterModels", values: { provider } }) + } else { + vscode.postMessage({ type: "requestRouterModels" }) + } }) // kilocode_change start @@ -48,7 +65,13 @@ type RouterModelsQueryKey = { chutesApiKey?: string // Requesty, Unbound, etc should perhaps also be here, but they already have their own hacks for reloading } - -export const useRouterModels = (queryKey: RouterModelsQueryKey) => - useQuery({ queryKey: ["routerModels", queryKey], queryFn: () => getRouterModels() }) // kilocode_change end + +export const useRouterModels = (queryKey: RouterModelsQueryKey, opts: UseRouterModelsOptions = {}) => { + const provider = opts.provider || undefined + return useQuery({ + queryKey: ["routerModels", provider || "all", queryKey], + queryFn: () => getRouterModels(provider), + enabled: opts.enabled !== false, + }) +} diff --git a/webview-ui/src/components/ui/hooks/useSelectedModel.ts b/webview-ui/src/components/ui/hooks/useSelectedModel.ts index ae254a16ba7..ceaa11880c3 100644 --- a/webview-ui/src/components/ui/hooks/useSelectedModel.ts +++ b/webview-ui/src/components/ui/hooks/useSelectedModel.ts @@ -12,6 +12,8 @@ import { deepSeekModels, moonshotDefaultModelId, moonshotModels, + minimaxDefaultModelId, + minimaxModels, geminiDefaultModelId, geminiModels, // kilocode_change start @@ -33,7 +35,6 @@ import { xaiModels, groqModels, groqDefaultModelId, - // chutesModels, // kilocode_change chutesDefaultModelId, vscodeLlmModels, vscodeLlmDefaultModelId, @@ -59,12 +60,12 @@ import { ioIntelligenceDefaultModelId, ioIntelligenceModels, rooDefaultModelId, - rooModels, qwenCodeDefaultModelId, qwenCodeModels, vercelAiGatewayDefaultModelId, BEDROCK_1M_CONTEXT_MODEL_IDS, deepInfraDefaultModelId, + isDynamicProvider, } from "@roo-code/types" import type { ModelRecord, RouterModels } from "@roo/api" @@ -97,33 +98,52 @@ export const useSelectedModel = (apiConfiguration?: ProviderSettings) => { const { kilocodeDefaultModel, virtualQuotaActiveModel } = useExtensionState() const lmStudioModelId = provider === "lmstudio" ? apiConfiguration?.lmStudioModelId : undefined const ollamaModelId = provider === "ollama" ? apiConfiguration?.ollamaModelId : undefined - - const routerModels = useRouterModels({ - openRouterBaseUrl: apiConfiguration?.openRouterBaseUrl, - openRouterApiKey: apiConfiguration?.apiKey, - kilocodeOrganizationId: apiConfiguration?.kilocodeOrganizationId, - geminiApiKey: apiConfiguration?.geminiApiKey, - googleGeminiBaseUrl: apiConfiguration?.googleGeminiBaseUrl, - }) - const openRouterModelProviders = useModelProviders(kilocodeDefaultModel, apiConfiguration) // kilocode_change end + + // Only fetch router models for dynamic providers + const shouldFetchRouterModels = isDynamicProvider(provider) + const routerModels = useRouterModels( + //kilocode_change start + { + openRouterBaseUrl: apiConfiguration?.openRouterBaseUrl, + openRouterApiKey: apiConfiguration?.apiKey, + kilocodeOrganizationId: apiConfiguration?.kilocodeOrganizationId, + geminiApiKey: apiConfiguration?.geminiApiKey, + googleGeminiBaseUrl: apiConfiguration?.googleGeminiBaseUrl, + }, + // kilocode_change end + { + provider: shouldFetchRouterModels ? provider : undefined, + enabled: shouldFetchRouterModels, + }, + ) + + const openRouterModelProviders = useModelProviders(kilocodeDefaultModel, apiConfiguration) // kilocode_change const lmStudioModels = useLmStudioModels(lmStudioModelId) const ollamaModels = useOllamaModels(ollamaModelId) + // Compute readiness only for the data actually needed for the selected provider + const needRouterModels = shouldFetchRouterModels + const needOpenRouterProviders = provider === "openrouter" + const needLmStudio = typeof lmStudioModelId !== "undefined" + const needOllama = typeof ollamaModelId !== "undefined" + + const isReady = + (!needLmStudio || typeof lmStudioModels.data !== "undefined") && + (!needOllama || typeof ollamaModels.data !== "undefined") && + (!needRouterModels || typeof routerModels.data !== "undefined") && + (!needOpenRouterProviders || typeof openRouterModelProviders.data !== "undefined") + const { id, info } = - apiConfiguration && - (typeof lmStudioModelId === "undefined" || typeof lmStudioModels.data !== "undefined") && - (typeof ollamaModelId === "undefined" || typeof ollamaModels.data !== "undefined") && - typeof routerModels.data !== "undefined" && - typeof openRouterModelProviders.data !== "undefined" + apiConfiguration && isReady ? getSelectedModel({ provider, apiConfiguration, - routerModels: routerModels.data, - openRouterModelProviders: openRouterModelProviders.data, - lmStudioModels: lmStudioModels.data, + routerModels: (routerModels.data || {}) as RouterModels, + openRouterModelProviders: (openRouterModelProviders.data || {}) as Record, + lmStudioModels: (lmStudioModels.data || undefined) as ModelRecord | undefined, kilocodeDefaultModel, - ollamaModels: ollamaModels.data, + ollamaModels: (ollamaModels.data || undefined) as ModelRecord | undefined, virtualQuotaActiveModel, // kilocode_change: Pass virtual quota active model }) : { id: anthropicDefaultModelId, info: undefined } @@ -133,13 +153,15 @@ export const useSelectedModel = (apiConfiguration?: ProviderSettings) => { id, info, isLoading: - routerModels.isLoading || - openRouterModelProviders.isLoading || - (apiConfiguration?.lmStudioModelId && lmStudioModels!.isLoading), + (needRouterModels && routerModels.isLoading) || + (needOpenRouterProviders && openRouterModelProviders.isLoading) || + (needLmStudio && lmStudioModels!.isLoading) || + (needOllama && ollamaModels!.isLoading), isError: - routerModels.isError || - openRouterModelProviders.isError || - (apiConfiguration?.lmStudioModelId && lmStudioModels!.isError), + (needRouterModels && routerModels.isError) || + (needOpenRouterProviders && openRouterModelProviders.isError) || + (needLmStudio && lmStudioModels!.isError) || + (needOllama && ollamaModels!.isError), } } @@ -229,7 +251,7 @@ function getSelectedModel({ } case "chutes": { const id = apiConfiguration.apiModelId ?? chutesDefaultModelId - const info = routerModels.chutes[id] // kilocode_change + const info = routerModels.chutes[id] return { id, info } } case "bedrock": { @@ -284,6 +306,11 @@ function getSelectedModel({ const info = moonshotModels[id as keyof typeof moonshotModels] return { id, info } } + case "minimax": { + const id = apiConfiguration.apiModelId ?? minimaxDefaultModelId + const info = minimaxModels[id as keyof typeof minimaxModels] + return { id, info } + } case "zai": { const isChina = apiConfiguration.zaiApiLine === "china_coding" const models = isChina ? mainlandZAiModels : internationalZAiModels @@ -342,9 +369,9 @@ function getSelectedModel({ // kilocode_change begin case "kilocode": { // Use the fetched models from routerModels - if (routerModels["kilocode-openrouter"] && apiConfiguration.kilocodeModel) { + if (routerModels["kilocode"] && apiConfiguration.kilocodeModel) { // Find the model in the fetched models - const modelEntries = Object.entries(routerModels["kilocode-openrouter"]) + const modelEntries = Object.entries(routerModels["kilocode"]) const selectedModelId = apiConfiguration.kilocodeModel.toLowerCase() @@ -370,7 +397,7 @@ function getSelectedModel({ const invalidOrDefaultModel = apiConfiguration.kilocodeModel ?? kilocodeDefaultModel return { id: invalidOrDefaultModel, - info: routerModels["kilocode-openrouter"][invalidOrDefaultModel], + info: routerModels["kilocode"][invalidOrDefaultModel], } } case "gemini-cli": { @@ -434,21 +461,10 @@ function getSelectedModel({ return { id, info } } case "roo": { - const requestedId = apiConfiguration.apiModelId - - // Check if the requested model exists in rooModels - if (requestedId && rooModels[requestedId as keyof typeof rooModels]) { - return { - id: requestedId, - info: rooModels[requestedId as keyof typeof rooModels], - } - } - - // Fallback to default model if requested model doesn't exist or is not specified - return { - id: rooDefaultModelId, - info: rooModels[rooDefaultModelId as keyof typeof rooModels], - } + // Roo is a dynamic provider - models are loaded from API + const id = apiConfiguration.apiModelId ?? rooDefaultModelId + const info = routerModels.roo[id] + return { id, info } } case "qwen-code": { const id = apiConfiguration.apiModelId ?? qwenCodeDefaultModelId @@ -476,13 +492,7 @@ function getSelectedModel({ // case "human-relay": // case "fake-ai": default: { - provider satisfies - | "anthropic" - | "gemini-cli" - | "qwen-code" - | "human-relay" - | "fake-ai" - | "kilocode-openrouter" + provider satisfies "anthropic" | "gemini-cli" | "qwen-code" | "human-relay" | "fake-ai" | "kilocode" const id = apiConfiguration.apiModelId ?? anthropicDefaultModelId const baseInfo = anthropicModels[id as keyof typeof anthropicModels] diff --git a/webview-ui/src/context/ExtensionStateContext.tsx b/webview-ui/src/context/ExtensionStateContext.tsx index 4b11b976b99..e275f1bb8ae 100644 --- a/webview-ui/src/context/ExtensionStateContext.tsx +++ b/webview-ui/src/context/ExtensionStateContext.tsx @@ -13,6 +13,7 @@ import { type OrganizationAllowList, type CloudOrganizationMembership, ORGANIZATION_ALLOW_ALL, + DEFAULT_CHECKPOINT_TIMEOUT_SECONDS, } from "@roo-code/types" import { ExtensionMessage, ExtensionState, MarketplaceInstalledMetadata, Command } from "@roo/ExtensionMessage" @@ -130,6 +131,8 @@ export interface ExtensionStateContextType extends ExtensionState { setTtsSpeed: (value: number) => void setDiffEnabled: (value: boolean) => void setEnableCheckpoints: (value: boolean) => void + checkpointTimeout: number + setCheckpointTimeout: (value: number) => void setBrowserViewportSize: (value: string) => void setFuzzyMatchThreshold: (value: number) => void setWriteDelayMs: (value: number) => void @@ -205,6 +208,10 @@ export interface ExtensionStateContextType extends ExtensionState { setMaxDiagnosticMessages: (value: number) => void includeTaskHistoryInEnhance?: boolean setIncludeTaskHistoryInEnhance: (value: boolean) => void + includeCurrentTime?: boolean + setIncludeCurrentTime: (value: boolean) => void + includeCurrentCost?: boolean + setIncludeCurrentCost: (value: boolean) => void } export const ExtensionStateContext = createContext(undefined) @@ -245,6 +252,7 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode ttsSpeed: 1.0, diffEnabled: false, enableCheckpoints: true, + checkpointTimeout: DEFAULT_CHECKPOINT_TIMEOUT_SECONDS, // Default to 15 seconds fuzzyMatchThreshold: 1.0, language: "en", // Default language code writeDelayMs: 1000, @@ -334,6 +342,8 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode autoPurgeLastRunTimestamp: undefined, // kilocode_change end openRouterImageGenerationSelectedModel: "", + includeCurrentTime: true, + includeCurrentCost: true, }) const [didHydrateState, setDidHydrateState] = useState(false) @@ -360,6 +370,9 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode global: {}, }) const [includeTaskHistoryInEnhance, setIncludeTaskHistoryInEnhance] = useState(true) + const [prevCloudIsAuthenticated, setPrevCloudIsAuthenticated] = useState(false) + const [includeCurrentTime, setIncludeCurrentTime] = useState(true) + const [includeCurrentCost, setIncludeCurrentCost] = useState(true) const setListApiConfigMeta = useCallback( (value: ProviderSettingsEntry[]) => setState((prevState) => ({ ...prevState, listApiConfigMeta: value })), @@ -397,6 +410,14 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode if ((newState as any).includeTaskHistoryInEnhance !== undefined) { setIncludeTaskHistoryInEnhance((newState as any).includeTaskHistoryInEnhance) } + // Update includeCurrentTime if present in state message + if ((newState as any).includeCurrentTime !== undefined) { + setIncludeCurrentTime((newState as any).includeCurrentTime) + } + // Update includeCurrentCost if present in state message + if ((newState as any).includeCurrentCost !== undefined) { + setIncludeCurrentCost((newState as any).includeCurrentCost) + } // Handle marketplace data if present in state message if (newState.marketplaceItems !== undefined) { setMarketplaceItems(newState.marketplaceItems) @@ -506,6 +527,17 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode vscode.postMessage({ type: "webviewDidLaunch" }) }, []) + // Watch for authentication state changes and refresh Roo models + useEffect(() => { + const currentAuth = state.cloudIsAuthenticated ?? false + const currentProvider = state.apiConfiguration?.apiProvider + if (!prevCloudIsAuthenticated && currentAuth && currentProvider === "roo") { + // User just authenticated and Roo is the active provider - refresh Roo models + vscode.postMessage({ type: "requestRooModels" }) + } + setPrevCloudIsAuthenticated(currentAuth) + }, [state.cloudIsAuthenticated, prevCloudIsAuthenticated, state.apiConfiguration?.apiProvider]) + const contextValue: ExtensionStateContextType = { ...state, reasoningBlockCollapsed: state.reasoningBlockCollapsed ?? true, @@ -570,6 +602,7 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode setTtsSpeed: (value) => setState((prevState) => ({ ...prevState, ttsSpeed: value })), setDiffEnabled: (value) => setState((prevState) => ({ ...prevState, diffEnabled: value })), setEnableCheckpoints: (value) => setState((prevState) => ({ ...prevState, enableCheckpoints: value })), + setCheckpointTimeout: (value) => setState((prevState) => ({ ...prevState, checkpointTimeout: value })), setBrowserViewportSize: (value: string) => setState((prevState) => ({ ...prevState, browserViewportSize: value })), setFuzzyMatchThreshold: (value) => setState((prevState) => ({ ...prevState, fuzzyMatchThreshold: value })), @@ -695,6 +728,10 @@ export const ExtensionStateContextProvider: React.FC<{ children: React.ReactNode // kilocode_change end includeTaskHistoryInEnhance, setIncludeTaskHistoryInEnhance, + includeCurrentTime, + setIncludeCurrentTime, + includeCurrentCost, + setIncludeCurrentCost, } return {children} diff --git a/webview-ui/src/context/__tests__/ExtensionStateContext.roo-auth-gate.spec.tsx b/webview-ui/src/context/__tests__/ExtensionStateContext.roo-auth-gate.spec.tsx new file mode 100644 index 00000000000..d62adf26e93 --- /dev/null +++ b/webview-ui/src/context/__tests__/ExtensionStateContext.roo-auth-gate.spec.tsx @@ -0,0 +1,75 @@ +import { render, waitFor } from "@/utils/test-utils" +import React from "react" + +vi.mock("@src/utils/vscode", () => ({ + vscode: { + postMessage: vi.fn(), + }, +})) + +import { ExtensionStateContextProvider } from "@src/context/ExtensionStateContext" +import { vscode } from "@src/utils/vscode" + +describe("ExtensionStateContext Roo auth gate", () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + function postStateMessage(state: any) { + window.dispatchEvent( + new MessageEvent("message", { + data: { + type: "state", + state, + }, + }), + ) + } + + it("does not post requestRooModels when auth flips and provider !== 'roo'", async () => { + render( + +
    + , + ) + + // Flip auth to true with a non-roo provider (anthropic) + postStateMessage({ + cloudIsAuthenticated: true, + apiConfiguration: { apiProvider: "anthropic" }, + }) + + // Should NOT fire auth-driven Roo refresh + await waitFor(() => { + const calls = (vscode.postMessage as any).mock.calls as any[][] + const hasRequest = calls.some((c) => c[0]?.type === "requestRooModels") + expect(hasRequest).toBe(false) + }) + }) + + it("posts requestRooModels when auth flips and provider === 'roo'", async () => { + render( + +
    + , + ) + + // Ensure prev false (explicit) + postStateMessage({ + cloudIsAuthenticated: false, + apiConfiguration: { apiProvider: "roo" }, + }) + + vi.clearAllMocks() + + // Flip to true with provider roo - should trigger + postStateMessage({ + cloudIsAuthenticated: true, + apiConfiguration: { apiProvider: "roo" }, + }) + + await waitFor(() => { + expect(vscode.postMessage).toHaveBeenCalledWith({ type: "requestRooModels" }) + }) + }) +}) diff --git a/webview-ui/src/context/__tests__/ExtensionStateContext.spec.tsx b/webview-ui/src/context/__tests__/ExtensionStateContext.spec.tsx index 26a576822bf..b6618e6e9c7 100644 --- a/webview-ui/src/context/__tests__/ExtensionStateContext.spec.tsx +++ b/webview-ui/src/context/__tests__/ExtensionStateContext.spec.tsx @@ -4,6 +4,7 @@ import { ProviderSettings, ExperimentId, openRouterDefaultModelId, // kilocode_change + DEFAULT_CHECKPOINT_TIMEOUT_SECONDS, } from "@roo-code/types" import { ExtensionState } from "@roo/ExtensionMessage" @@ -261,12 +262,14 @@ describe("mergeExtensionState", () => { remoteControlEnabled: false, taskSyncEnabled: false, featureRoomoteControlEnabled: false, + checkpointTimeout: DEFAULT_CHECKPOINT_TIMEOUT_SECONDS, // Add the checkpoint timeout property } const prevState: ExtensionState = { ...baseState, apiConfiguration: { modelMaxTokens: 1234, modelMaxThinkingTokens: 123 }, experiments: {} as Record, + checkpointTimeout: DEFAULT_CHECKPOINT_TIMEOUT_SECONDS - 5, } const newState: ExtensionState = { @@ -281,6 +284,7 @@ describe("mergeExtensionState", () => { imageGeneration: false, runSlashCommand: false, } as Record, + checkpointTimeout: DEFAULT_CHECKPOINT_TIMEOUT_SECONDS + 5, } const result = mergeExtensionState(prevState, newState) diff --git a/webview-ui/src/i18n/locales/ar/chat.json b/webview-ui/src/i18n/locales/ar/chat.json index d8a6180e89d..5b381eeda6e 100644 --- a/webview-ui/src/i18n/locales/ar/chat.json +++ b/webview-ui/src/i18n/locales/ar/chat.json @@ -163,6 +163,8 @@ "initializingWarning": "لا يزال يتم إنشاء نقطة الحفظ... إذا طولت، تقدر تطفّي نقاط الحفظ من الإعدادات وتعيد تشغيل المهمة.", "menu": { "viewDiff": "عرض الفرق", + "viewDiffFromInit": "عرض كل التغييرات", + "viewDiffWithCurrent": "عرض التغييرات منذ هذه النقطة", "restore": "استرجاع نقطة الحفظ", "restoreFiles": "استرجاع الملفات", "restoreFilesDescription": "يعيد ملفات مشروعك لنسخة محفوظة عند هذي النقطة.", @@ -170,7 +172,8 @@ "confirm": "تأكيد", "cancel": "إلغاء", "cannotUndo": "هذا الإجراء ما تقدر تتراجع عنه.", - "restoreFilesAndTaskDescription": "يعيد ملفات مشروعك لنسخة محفوظة عند هذي النقطة ويحذف كل الرسائل بعدها." + "restoreFilesAndTaskDescription": "يعيد ملفات مشروعك لنسخة محفوظة عند هذي النقطة ويحذف كل الرسائل بعدها.", + "more": "خيارات إضافية" }, "current": "الحالية" }, @@ -295,6 +298,7 @@ "selectOptionsFirst": "اختر خيار واحد على الأقل أدناه لتفعيل الموافقة التلقائية", "toggleAriaLabel": "تبديل الموافقة التلقائية", "disabledAriaLabel": "الموافقة التلقائية معطّلة - اختر الخيارات أولاً", + "triggerLabelOffShort": "مطفأة", "tooltip": "الموافقة التلقائية على طلبات استخدام الأدوات", "all": "الكل", "triggerLabel_zero": "الموافقة التلقائية: مطفأة", @@ -315,6 +319,19 @@ "connectButton": "اتصل بـ Roo Code Cloud", "selectModel": "اختر roo/sonic من موفر Roo Code Cloud في
    الإعدادات لتبدأ" }, + "release": { + "heading": "الجديد في الإضافة:", + "openRouterEmbeddings": "دعم نماذج التضمين من OpenRouter", + "chutesDynamic": "Chutes الآن يحمّل أحدث النماذج ديناميكياً", + "queuedMessagesFix": "إصلاحات لمشكلة فقدان الرسائل في الطابور" + }, + "cloudAgents": { + "heading": "الجديد في السحابة:", + "prFixer": "نقدّم وكيل PR Fixer السحابي لاستكمال PR Reviewer.", + "prFixerDescription": "PR Fixer يطبّق تغييرات عالية الجودة على طلبات السحب مباشرة من GitHub. فعّله عبر تعليق في PR وسيقرأ كامل سجل التعليقات لفهم السياق والاتفاقات والمقايضات - ثم ينفّذ الإصلاح المناسب.", + "tryPrFixerButton": "جرّب PR Fixer" + }, + "careers": "أيضاً، نحن نوظّف!", "description": "Kilo Code {{version}} يجلب ميزات وتحسينات كبيرة بناءً على ملاحظاتكم.", "whatsNew": "الجديد", "feature1": "إطلاق سوق Kilo Code: السوق صار متاح! اكتشف وثبّت الأنماط و MCPs بسهولة.", diff --git a/webview-ui/src/i18n/locales/ar/common.json b/webview-ui/src/i18n/locales/ar/common.json index 8126ca73e93..636c3eef95d 100644 --- a/webview-ui/src/i18n/locales/ar/common.json +++ b/webview-ui/src/i18n/locales/ar/common.json @@ -118,5 +118,9 @@ "months_ago": "منذ {{count}} شهر", "year_ago": "منذ سنة", "years_ago": "منذ {{count}} سنة" + }, + "errors": { + "wait_checkpoint_long_time": "انتظرنا {{timeout}} ثانية لتهيئة نقطة الحفظ. إذا كنت لا تحتاج ميزة نقاط الحفظ، يرجى إيقافها في إعدادات نقاط الحفظ.", + "init_checkpoint_fail_long_time": "استغرقت تهيئة نقطة الحفظ أكثر من {{timeout}} ثانية، لذلك تم تعطيل نقاط الحفظ لهذه المهمة. يمكنك تعطيل نقاط الحفظ أو تمديد وقت الانتظار في إعدادات نقاط الحفظ." } } diff --git a/webview-ui/src/i18n/locales/ar/mcp.json b/webview-ui/src/i18n/locales/ar/mcp.json index e9f23d05716..a0844ce8232 100644 --- a/webview-ui/src/i18n/locales/ar/mcp.json +++ b/webview-ui/src/i18n/locales/ar/mcp.json @@ -26,12 +26,12 @@ "tabs": { "tools": "الأدوات", "resources": "الموارد", - "errors": "الأخطاء" + "logs": "السجلات" }, "emptyState": { "noTools": "ما فيه أدوات", "noResources": "ما فيه موارد", - "noErrors": "ما فيه أخطاء" + "noLogs": "ما فيه سجلات بعد" }, "networkTimeout": { "label": "مهلة الشبكة", diff --git a/webview-ui/src/i18n/locales/ar/settings.json b/webview-ui/src/i18n/locales/ar/settings.json index 8b24eda8b48..45677650065 100644 --- a/webview-ui/src/i18n/locales/ar/settings.json +++ b/webview-ui/src/i18n/locales/ar/settings.json @@ -71,6 +71,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "مفتاح API", "vercelAiGatewayApiKeyPlaceholder": "أدخل مفتاح Vercel AI Gateway API", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "مفتاح OpenRouter API", + "openRouterApiKeyPlaceholder": "أدخل مفتاح OpenRouter API", "openaiCompatibleProvider": "متوافق مع OpenAI", "openAiKeyLabel": "مفتاح OpenAI API", "openAiKeyPlaceholder": "أدخل مفتاح OpenAI API", @@ -137,6 +140,7 @@ "geminiApiKeyRequired": "مفتاح Gemini API مطلوب", "mistralApiKeyRequired": "مفتاح Mistral API مطلوب", "vercelAiGatewayApiKeyRequired": "مفتاح Vercel AI Gateway API مطلوب", + "openRouterApiKeyRequired": "مفتاح OpenRouter API مطلوب", "ollamaBaseUrlRequired": "رابط Ollama الأساسي مطلوب", "baseUrlRequired": "الرابط الأساسي مطلوب", "modelDimensionMinValue": "بُعد النموذج يجب أن يكون أكبر من 0" @@ -311,11 +315,14 @@ "getFireworksApiKey": "احصل على مفتاح Fireworks", "syntheticApiKey": "مفتاح Synthetic API", "getSyntheticApiKey": "احصل على مفتاح Synthetic API", - "zaiApiKey": "مفتاح Z.AI", - "getZaiApiKey": "احصل على مفتاح Z.AI", "moonshotApiKey": "مفتاح Moonshot", "getMoonshotApiKey": "احصل على مفتاح Moonshot", "moonshotBaseUrl": "نقطة دخول Moonshot", + "minimaxApiKey": "مفتاح MiniMax API", + "getMiniMaxApiKey": "احصل على مفتاح MiniMax API", + "minimaxBaseUrl": "نقطة دخول MiniMax", + "zaiApiKey": "مفتاح Z.AI", + "getZaiApiKey": "احصل على مفتاح Z.AI", "zaiEntrypoint": "نقطة دخول Z AI", "zaiEntrypointDescription": "يرجى اختيار نقطة دخول API المناسبة حسب موقعك. إذا كنت في الصين، اختر open.bigmodel.cn. وإلا، اختر api.z.ai.", "geminiApiKey": "مفتاح Gemini", @@ -498,11 +505,12 @@ "warning": "⚠️ تعيين القيمة إلى 0 يسمح بإعادة المحاولة غير المحدودة مما قد يستهلك استخدام API كبير" }, "reasoningEffort": { - "label": "جهد الاستنتاج", + "label": "جهد الاستنتاج للنموذج", + "none": "لا شيء", "minimal": "الحد الأدنى (الأسرع)", - "high": "عالي", + "low": "منخفض", "medium": "متوسط", - "low": "منخفض" + "high": "عالي" }, "verbosity": { "label": "مستوى التفصيل في المخرجات", @@ -599,6 +607,10 @@ } }, "checkpoints": { + "timeout": { + "label": "مهلة تهيئة نقطة الحفظ (ثواني)", + "description": "أقصى وقت انتظار لتهيئة خدمة نقاط الحفظ. الافتراضي 15 ثانية. النطاق: 10-60 ثانية." + }, "enable": { "label": "تفعيل نقاط الحفظ التلقائية", "description": "ينشئ نقاط حفظ آليًا أثناء التنفيذ. <0>تعرف أكثر" @@ -737,6 +749,14 @@ "inheritDescription": "هذا الملف يرث النسبة العامة ({{threshold}}%)", "usesGlobal": "(يستخدم {{threshold}}% عام)" }, + "includeCurrentTime": { + "label": "تضمين الوقت الحالي في السياق", + "description": "عند التفعيل، سيتم تضمين الوقت الحالي ومعلومات المنطقة الزمنية في موجه النظام. عطّل هذا إذا كانت النماذج تتوقف عن العمل بسبب مخاوف الوقت." + }, + "includeCurrentCost": { + "label": "تضمين التكلفة الحالية في السياق", + "description": "عند التفعيل، سيتم تضمين تكلفة استخدام API الحالية في موجه النظام. عطّل هذا إذا كانت النماذج تتوقف عن العمل بسبب مخاوف التكلفة." + }, "diagnostics": { "includeMessages": { "label": "تضمين رسائل التشخيص تلقائيًا في السياق", @@ -847,10 +867,6 @@ "name": "استخدم استراتيجية diff الموحدة التجريبية", "description": "قد تقلل من الإعادات لكنها مخاطرة." }, - "SEARCH_AND_REPLACE": { - "name": "أداة بحث واستبدال تجريبية", - "description": "تمكّن استبدال متعدد في طلب واحد." - }, "INSERT_BLOCK": { "name": "أداة إدراج محتوى تجريبية", "description": "تدرج محتوى في أسطر محددة بدون diff." diff --git a/webview-ui/src/i18n/locales/ca/chat.json b/webview-ui/src/i18n/locales/ca/chat.json index 9ae2199a187..96d9076c19c 100644 --- a/webview-ui/src/i18n/locales/ca/chat.json +++ b/webview-ui/src/i18n/locales/ca/chat.json @@ -156,6 +156,9 @@ "initializingWarning": "Encara s'està inicialitzant el punt de control... Si això triga massa, pots desactivar els punts de control a la configuració i reiniciar la teva tasca.", "menu": { "viewDiff": "Veure diferències", + "more": "Més opcions", + "viewDiffFromInit": "Veure tots els canvis", + "viewDiffWithCurrent": "Veure els canvis des d'aquest punt de control", "restore": "Restaurar punt de control", "restoreFiles": "Restaurar arxius", "restoreFilesDescription": "Restaura els arxius del teu projecte a una instantània presa en aquest punt.", @@ -272,6 +275,7 @@ "toggleAriaLabel": "Commuta l'aprovació automàtica", "disabledAriaLabel": "Aprovació automàtica desactivada - selecciona primer les opcions", "triggerLabelOff": "Aprovació automàtica desactivada", + "triggerLabelOffShort": "Off", "triggerLabel_zero": "0 aprovacions automàtiques", "triggerLabel_one": "1 aprovació automàtica", "triggerLabel_other": "{{count}} aprovacions automàtiques", @@ -301,6 +305,19 @@ "selectModel": "Selecciona roo/code-supernova del proveïdor Roo Code Cloud a Configuració per començar.", "goToSettingsButton": "Anar a Configuració" }, + "release": { + "heading": "Novetats a l'extensió:", + "openRouterEmbeddings": "Suport per a models d'incrustació d'OpenRouter", + "chutesDynamic": "Chutes ara carrega els últims models de forma dinàmica", + "queuedMessagesFix": "Correccions per a missatges en cua que es perden" + }, + "cloudAgents": { + "heading": "Novetats al núvol:", + "prFixer": "Presentem l'agent al núvol PR Fixer per complementar el Revisor de PR.", + "prFixerDescription": "El PR Fixer de Roo Code aplica canvis d'alta qualitat a les teves PR directament des de GitHub. Invoca'l mitjançant un comentari a la PR i llegirà tot l'historial de comentaris per entendre el context, els acords i els compromisos, i després implementarà la solució correcta.", + "tryPrFixerButton": "Prova el PR Fixer" + }, + "careers": "A més, estem contractant!", "socialLinks": "Uneix-te a nosaltres a X, Discord, o r/RooCode 🚀" }, "browser": { diff --git a/webview-ui/src/i18n/locales/ca/common.json b/webview-ui/src/i18n/locales/ca/common.json index 9e0bbdc8ca7..d135d2624df 100644 --- a/webview-ui/src/i18n/locales/ca/common.json +++ b/webview-ui/src/i18n/locales/ca/common.json @@ -114,5 +114,9 @@ "months_ago": "fa {{count}} mesos", "year_ago": "fa un any", "years_ago": "fa {{count}} anys" + }, + "errors": { + "wait_checkpoint_long_time": "Has esperat {{timeout}} segons per inicialitzar el punt de control. Si no necessites aquesta funció, desactiva-la a la configuració del punt de control.", + "init_checkpoint_fail_long_time": "La inicialització del punt de control ha trigat més de {{timeout}} segons, per això els punts de control estan desactivats per a aquesta tasca. Pots desactivar els punts de control o augmentar el temps d'espera a la configuració del punt de control." } } diff --git a/webview-ui/src/i18n/locales/ca/mcp.json b/webview-ui/src/i18n/locales/ca/mcp.json index 3cc7ef040a1..295b0ab0922 100644 --- a/webview-ui/src/i18n/locales/ca/mcp.json +++ b/webview-ui/src/i18n/locales/ca/mcp.json @@ -25,12 +25,12 @@ "tabs": { "tools": "Eines", "resources": "Recursos", - "errors": "Errors" + "logs": "Registres" }, "emptyState": { "noTools": "No s'han trobat eines", "noResources": "No s'han trobat recursos", - "noErrors": "No s'han trobat errors" + "noLogs": "Encara no hi ha registres" }, "networkTimeout": { "label": "Temps d'espera de xarxa", diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json index 011595ce668..9fec116f028 100644 --- a/webview-ui/src/i18n/locales/ca/settings.json +++ b/webview-ui/src/i18n/locales/ca/settings.json @@ -60,6 +60,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "Clau API", "vercelAiGatewayApiKeyPlaceholder": "Introduïu la vostra clau API de Vercel AI Gateway", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "Clau de l'API d'OpenRouter", + "openRouterApiKeyPlaceholder": "Introduïu la vostra clau de l'API d'OpenRouter", "openaiCompatibleProvider": "Compatible amb OpenAI", "openAiKeyLabel": "Clau API OpenAI", "openAiKeyPlaceholder": "Introduïu la vostra clau API OpenAI", @@ -126,7 +129,8 @@ "vercelAiGatewayApiKeyRequired": "Es requereix la clau API de Vercel AI Gateway", "ollamaBaseUrlRequired": "Cal una URL base d'Ollama", "baseUrlRequired": "Cal una URL base", - "modelDimensionMinValue": "La dimensió del model ha de ser superior a 0" + "modelDimensionMinValue": "La dimensió del model ha de ser superior a 0", + "openRouterApiKeyRequired": "Clau API d'OpenRouter és requerida" }, "advancedConfigLabel": "Configuració avançada", "searchMinScoreLabel": "Llindar de puntuació de cerca", @@ -309,6 +313,9 @@ "getZaiApiKey": "Obtenir clau API de Z AI", "zaiEntrypoint": "Punt d'entrada de Z AI", "zaiEntrypointDescription": "Si us plau, seleccioneu el punt d'entrada de l'API apropiat segons la vostra ubicació. Si sou a la Xina, trieu open.bigmodel.cn. Altrament, trieu api.z.ai.", + "minimaxApiKey": "Clau API de MiniMax", + "getMiniMaxApiKey": "Obtenir clau API de MiniMax", + "minimaxBaseUrl": "Punt d'entrada de MiniMax", "geminiApiKey": "Clau API de Gemini", "getGroqApiKey": "Obtenir clau API de Groq", "groqApiKey": "Clau API de Groq", @@ -362,7 +369,7 @@ "enablePromptCachingTitle": "Habilitar l'emmagatzematge en caché de prompts per millorar el rendiment i reduir els costos per als models compatibles.", "cacheUsageNote": "Nota: Si no veieu l'ús de la caché, proveu de seleccionar un model diferent i després tornar a seleccionar el model desitjat.", "vscodeLmModel": "Model de llenguatge", - "vscodeLmWarning": "Nota: Aquesta és una integració molt experimental i el suport del proveïdor variarà. Si rebeu un error sobre un model no compatible, és un problema del proveïdor.", + "vscodeLmWarning": "Nota: Els models accessibles a través de l’API VS Code Language Model poden estar encapsulats o ajustats pel proveïdor; per tant, el comportament pot diferir de l’ús directe del mateix model des d’un proveïdor o enrutador típic. Per utilitzar un model del desplegable «Language Model», primer canvia a aquest model i després fes clic a «Acceptar» a l’avís de Copilot Chat; en cas contrari pots veure un error com 400 «The requested model is not supported».", "geminiParameters": { "urlContext": { "title": "Activa el context d'URL", @@ -439,7 +446,7 @@ }, "computerUse": { "label": "Ús de l'ordinador", - "description": "Aquest model és capaç d'interactuar amb un navegador? (com Claude 3.7 Sonnet)" + "description": "Aquest model és capaç d'interactuar amb un navegador?" }, "promptCache": { "label": "Emmagatzematge en caché de prompts", @@ -481,6 +488,7 @@ }, "reasoningEffort": { "label": "Esforç de raonament del model", + "none": "Cap", "minimal": "Mínim (el més ràpid)", "high": "Alt", "medium": "Mitjà", @@ -553,6 +561,10 @@ } }, "checkpoints": { + "timeout": { + "label": "Temps d'espera per inicialitzar el punt de control (segons)", + "description": "Temps màxim d'espera per inicialitzar el servei de punts de control. El valor per defecte és 15 segons. Rang: 10-60 segons." + }, "enable": { "label": "Habilitar punts de control automàtics", "description": "Quan està habilitat, Kilo Code crearà automàticament punts de control durant l'execució de tasques, facilitant la revisió de canvis o la reversió a estats anteriors. <0>Més informació" @@ -680,6 +692,14 @@ "label": "Mida total màxima d'imatges", "mb": "MB", "description": "Límit de mida acumulativa màxima (en MB) per a totes les imatges processades en una sola operació read_file. Quan es llegeixen múltiples imatges, la mida de cada imatge s'afegeix al total. Si incloure una altra imatge excediria aquest límit, serà omesa." + }, + "includeCurrentTime": { + "label": "Inclou l'hora actual en el context", + "description": "Quan està activat, l'hora actual i la informació del fus horari s'inclouran a la indicació del sistema. Desactiveu-ho si els models deixen de funcionar per problemes amb l'hora." + }, + "includeCurrentCost": { + "label": "Inclou el cost actual en el context", + "description": "Quan està activat, el cost d'ús actual de l'API s'inclourà a la indicació del sistema. Desactiveu-ho si els models deixen de funcionar per problemes amb el cost." } }, "terminal": { @@ -688,56 +708,56 @@ "description": "Configuració bàsica del terminal" }, "advanced": { - "label": "Configuració del terminal: Avançada", - "description": "Les següents opcions poden requerir reiniciar el terminal per aplicar la configuració." + "label": "Configuració del terminal: Avançat", + "description": "Aquests paràmetres només s'apliquen quan 'Utilitza terminal en línia' està desactivat. Només afecten el terminal de VS Code i poden requerir reiniciar l'IDE." }, "outputLineLimit": { - "label": "Límit de sortida de terminal", - "description": "Nombre màxim de línies a incloure a la sortida del terminal en executar comandes. Quan s'excedeix, s'eliminaran línies del mig, estalviant token. <0>Més informació" + "label": "Límit de sortida del terminal", + "description": "Conserva les primeres i últimes línies i descarta les del mig per mantenir-se sota el límit. Redueix per estalviar tokens; augmenta per donar a Roo més detalls del mig. Roo veu un marcador on s'ha omès el contingut.<0>Aprèn-ne més" }, "outputCharacterLimit": { "label": "Límit de caràcters del terminal", - "description": "Nombre màxim de caràcters a incloure en la sortida del terminal en executar ordres. Aquest límit té precedència sobre el límit de línies per evitar problemes de memòria amb línies extremadament llargues. Quan se superi, la sortida es truncarà. <0>Més informació" + "description": "Anul·la el límit de línies per evitar problemes de memòria imposant un límit dur a la mida de sortida. Si se supera, manté l'inici i el final i mostra un marcador a Roo on s'ha omès el contingut. <0>Aprèn-ne més" }, "shellIntegrationTimeout": { - "label": "Temps d'espera d'integració de shell del terminal", - "description": "Temps màxim d'espera per a la inicialització de la integració de shell abans d'executar comandes. Per a usuaris amb temps d'inici de shell llargs, aquest valor pot necessitar ser augmentat si veieu errors \"Shell Integration Unavailable\" al terminal. <0>Més informació" + "label": "Temps d'espera d'integració del shell del terminal", + "description": "Quant de temps esperar la integració del shell de VS Code abans d'executar comandes. Augmenta si el teu shell s'inicia lentament o veus errors 'Integració del Shell No Disponible'. <0>Aprèn-ne més" }, "shellIntegrationDisabled": { - "label": "Desactiva la integració de l'intèrpret d'ordres del terminal", - "description": "Activa això si les ordres del terminal no funcionen correctament o si veus errors de 'Shell Integration Unavailable'. Això utilitza un mètode més senzill per executar ordres, evitant algunes funcions avançades del terminal. <0>Més informació" + "label": "Utilitza terminal en línia (recomanat)", + "description": "Executa comandes al terminal en línia (xat) per evitar perfils/integració del shell per a execucions més ràpides i fiables. Quan està desactivat, Roo usa el terminal de VS Code amb el teu perfil de shell, prompts i connectors. <0>Aprèn-ne més" }, "commandDelay": { "label": "Retard de comanda del terminal", - "description": "Retard en mil·lisegons a afegir després de l'execució de la comanda. La configuració predeterminada de 0 desactiva completament el retard. Això pot ajudar a assegurar que la sortida de la comanda es capturi completament en terminals amb problemes de temporització. En la majoria de terminals s'implementa establint `PROMPT_COMMAND='sleep N'` i Powershell afegeix `start-sleep` al final de cada comanda. Originalment era una solució per al error VSCode#237208 i pot no ser necessari. <0>Més informació" + "description": "Afegeix una pausa breu després de cada comanda perquè el terminal de VS Code pugui buidar tota la sortida (bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep). Usa només si veus que falta sortida final; altrament deixa a 0. <0>Aprèn-ne més" }, "compressProgressBar": { - "label": "Comprimir sortida de barra de progrés", - "description": "Quan està habilitat, processa la sortida del terminal amb retorns de carro (\\r) per simular com un terminal real mostraria el contingut. Això elimina els estats intermedis de les barres de progrés, mantenint només l'estat final, la qual cosa conserva espai de context per a informació més rellevant. <0>Més informació" + "label": "Comprimeix sortida de barra de progrés", + "description": "Col·lapsa barres de progrés/spinners perquè només es mantingui l'estat final (estalvia tokens). <0>Aprèn-ne més" }, "powershellCounter": { - "label": "Habilita la solució temporal del comptador PowerShell", - "description": "Quan està habilitat, afegeix un comptador a les comandes PowerShell per assegurar l'execució correcta de les comandes. Això ajuda amb els terminals PowerShell que poden tenir problemes amb la captura de sortida. <0>Més informació" + "label": "Activa solució de comptador de PowerShell", + "description": "Activa quan falta o es duplica la sortida de PowerShell; afegeix un petit comptador a cada comanda per estabilitzar la sortida. Mantén desactivat si la sortida ja es veu correcta. <0>Aprèn-ne més" }, "zshClearEolMark": { - "label": "Neteja la marca EOL de ZSH", - "description": "Quan està habilitat, neteja la marca de final de línia de ZSH establint PROMPT_EOL_MARK=''. Això evita problemes amb la interpretació de la sortida de comandes quan acaba amb caràcters especials com '%'. <0>Més informació" + "label": "Neteja marca EOL de ZSH", + "description": "Activa quan vegis % extraviats al final de línies o l'anàlisi sembli incorrecta; omet la marca de final de línia (%) de Zsh. <0>Aprèn-ne més" }, "zshOhMy": { - "label": "Habilita la integració Oh My Zsh", - "description": "Quan està habilitat, estableix ITERM_SHELL_INTEGRATION_INSTALLED=Yes per habilitar les característiques d'integració del shell Oh My Zsh. Aplicar aquesta configuració pot requerir reiniciar l'IDE. <0>Més informació" + "label": "Activa integració amb Oh My Zsh", + "description": "Activa quan el teu tema/connectors d'Oh My Zsh esperin integració del shell; estableix ITERM_SHELL_INTEGRATION_INSTALLED=Yes. Desactiva per evitar establir aquesta variable. <0>Aprèn-ne més" }, "zshP10k": { - "label": "Habilita la integració Powerlevel10k", - "description": "Quan està habilitat, estableix POWERLEVEL9K_TERM_SHELL_INTEGRATION=true per habilitar les característiques d'integració del shell Powerlevel10k. <0>Més informació" + "label": "Activa integració amb Powerlevel10k", + "description": "Activa quan usis integració del shell de Powerlevel10k. <0>Aprèn-ne més" }, "zdotdir": { - "label": "Habilitar gestió de ZDOTDIR", - "description": "Quan està habilitat, crea un directori temporal per a ZDOTDIR per gestionar correctament la integració del shell zsh. Això assegura que la integració del shell de VSCode funcioni correctament amb zsh mentre es preserva la teva configuració de zsh. <0>Més informació" + "label": "Activa gestió de ZDOTDIR", + "description": "Activa quan la integració del shell de zsh falli o entri en conflicte amb els teus dotfiles. <0>Aprèn-ne més" }, "inheritEnv": { "label": "Hereta variables d'entorn", - "description": "Quan està habilitat, el terminal hereta les variables d'entorn del procés pare de VSCode, com ara la configuració d'integració del shell definida al perfil d'usuari. Això commuta directament la configuració global de VSCode `terminal.integrated.inheritEnv`. <0>Més informació" + "description": "Activa per heretar variables d'entorn del procés pare de VS Code. <0>Aprèn-ne més" } }, "advancedSettings": { @@ -746,7 +766,7 @@ "advanced": { "diff": { "label": "Habilitar edició mitjançant diffs", - "description": "Quan està habilitat, Kilo Code podrà editar fitxers més ràpidament i rebutjarà automàticament escriptures completes de fitxers truncats. Funciona millor amb l'últim model Claude 4 Sonnet.", + "description": "Quan està habilitat, Kilo Code podrà editar fitxers més ràpidament i rebutjarà automàticament escriptures completes de fitxers truncats", "strategy": { "label": "Estratègia de diff", "options": { @@ -775,10 +795,6 @@ "name": "Utilitzar estratègia diff unificada experimental", "description": "Activar l'estratègia diff unificada experimental. Aquesta estratègia podria reduir el nombre de reintents causats per errors del model, però pot causar comportaments inesperats o edicions incorrectes. Activeu-la només si enteneu els riscos i esteu disposats a revisar acuradament tots els canvis." }, - "SEARCH_AND_REPLACE": { - "name": "Utilitzar eina de cerca i reemplaçament experimental", - "description": "Activar l'eina de cerca i reemplaçament experimental, permetent a Kilo Code reemplaçar múltiples instàncies d'un terme de cerca en una sola petició." - }, "INSERT_BLOCK": { "name": "Utilitzar eina d'inserció de contingut experimental", "description": "Activar l'eina d'inserció de contingut experimental, permetent a Kilo Code inserir contingut a números de línia específics sense necessitat de crear un diff." @@ -865,8 +881,6 @@ "modelInfo": { "supportsImages": "Suporta imatges", "noImages": "No suporta imatges", - "supportsComputerUse": "Suporta ús de l'ordinador", - "noComputerUse": "No suporta ús de l'ordinador", "supportsPromptCache": "Suporta emmagatzematge en caché de prompts", "noPromptCache": "No suporta emmagatzematge en caché de prompts", "contextWindow": "Finestra de context:", diff --git a/webview-ui/src/i18n/locales/cs/chat.json b/webview-ui/src/i18n/locales/cs/chat.json index 082e80ea113..49c8f6c7443 100644 --- a/webview-ui/src/i18n/locales/cs/chat.json +++ b/webview-ui/src/i18n/locales/cs/chat.json @@ -158,6 +158,8 @@ "initializingWarning": "Stále se inicializuje kontrolní bod... Pokud to trvá příliš dlouho, můžeš vypnout kontrolní body v nastavení a restartovat svůj úkol.", "menu": { "viewDiff": "Zobrazit rozdíly", + "viewDiffFromInit": "Zobrazit všechny změny", + "viewDiffWithCurrent": "Zobrazit změny od tohoto kontrolního bodu", "restore": "Obnovit kontrolní bod", "restoreFiles": "Obnovit soubory", "restoreFilesDescription": "Obnoví soubory tvého projektu zpět na snímek pořízený v tomto bodě.", @@ -165,7 +167,8 @@ "confirm": "Potvrdit", "cancel": "Zrušit", "cannotUndo": "Tuto akci nelze vrátit zpět.", - "restoreFilesAndTaskDescription": "Obnoví soubory tvého projektu zpět na snímek pořízený v tomto bodě a smaže všechny zprávy po tomto bodě." + "restoreFilesAndTaskDescription": "Obnoví soubory tvého projektu zpět na snímek pořízený v tomto bodě a smaže všechny zprávy po tomto bodě.", + "more": "Více možností" }, "current": "Aktuální" }, @@ -297,7 +300,8 @@ "triggerLabel_one": "Automatické schválení: 1", "triggerLabel_other": "Automatické schválení: {{count}}", "triggerLabelAll": "Automatické schválení: YOLO", - "triggerLabelOff": "Automatické schvalování vypnuto" + "triggerLabelOff": "Automatické schvalování vypnuto", + "triggerLabelOffShort": "Vypnuto" }, "modeSelector": { "title": "Režimy", @@ -309,24 +313,28 @@ "organizationModes": "Režimy organizace" }, "announcement": { - "title": "🎉 Vydán Kilo Code {{version}}", - "description": "Kilo Code {{version}} přináší hlavní nové funkce a vylepšení na základě tvé zpětné vazby.", - "whatsNew": "Co je nového", - "feature1": "Spuštění Kilo Code Marketplace: Tržiště je nyní živé! Objevuj a instaluj režimy a MCP snadněji než kdy dříve.", - "feature2": "Modely Gemini 2.5: Přidána podpora pro nové modely Gemini 2.5 Pro, Flash a Flash Lite.", - "feature3": "Podpora souborů Excel a další: Přidána podpora souborů Excel (.xlsx) a mnoho oprav chyb a vylepšení!", - "hideButton": "Skrýt oznámení", - "detailsDiscussLinks": "Získej více podrobností a diskutuj na Discord a Reddit 🚀", - "learnMore": "Dozvědět se více", - "visitCloudButton": "Navštívit Roo Code Cloud", - "socialLinks": "Následujte nás", + "title": "🎉 Vydán Roo Code {{version}}", "stealthModel": { - "feature": "Časově omezený ZDARMA stealth model - Bleskurychle rychlý rozumový model, který vyniká v agentním kódování s kontextovým oknem 262k, dostupný prostřednictvím Roo Code Cloud.", - "note": "(Poznámka: prompty a dokončení jsou zaznamenávány tvůrcem modelu za účelem vylepšení modelu)", + "feature": "Časově omezený ZDARMA stealth model - Code Supernova: Nyní vylepšen s kontextovým oknem 1M tokenů! Všestranný agentní model pro kódování, který podporuje obrazové vstupy, dostupný prostřednictvím Roo Code Cloud.", + "note": "(Poznámka: prompty a dokončení jsou zaznamenávány tvůrcem modelu a používány k vylepšení modelu)", "connectButton": "Připojit se k Roo Code Cloud", - "selectModel": "Vyber roo/sonic z poskytovatele Roo Code Cloud v
    Nastaveních pro začátek", + "selectModel": "Vyber roo/code-supernova z poskytovatele Roo Code Cloud v Nastavení pro začátek.", "goToSettingsButton": "Jít do Nastavení" - } + }, + "release": { + "heading": "Nové v rozšíření:", + "openRouterEmbeddings": "Podpora pro embedding modely OpenRouter", + "chutesDynamic": "Chutes nyní dynamicky načítá nejnovější modely", + "queuedMessagesFix": "Opravy pro ztracené zprávy ve frontě" + }, + "cloudAgents": { + "heading": "Nové v cloudu:", + "prFixer": "Představujeme cloudového agenta PR Fixer jako doplněk k PR Reviewer.", + "prFixerDescription": "PR Fixer aplikuje vysoce kvalitní změny na tvé PR přímo z GitHubu. Vyvolej ho pomocí komentáře k PR a přečte si celou historii komentářů, aby pochopil kontext, dohody a kompromisy - a pak implementuje správnou opravu.", + "tryPrFixerButton": "Vyzkoušet PR Fixer" + }, + "careers": "Také nabíráme!", + "socialLinks": "Připoj se k nám na X, Discord nebo r/RooCode 🚀" }, "reasoning": { "thinking": "Přemýšlím", diff --git a/webview-ui/src/i18n/locales/cs/common.json b/webview-ui/src/i18n/locales/cs/common.json index 2414c81576b..af317b5e802 100644 --- a/webview-ui/src/i18n/locales/cs/common.json +++ b/webview-ui/src/i18n/locales/cs/common.json @@ -118,5 +118,9 @@ "months_ago": "před {{count}} měsíci", "year_ago": "před rokem", "years_ago": "před {{count}} lety" + }, + "errors": { + "wait_checkpoint_long_time": "Čekalo se {{timeout}} sekund na inicializaci kontrolního bodu. Pokud funkci kontrolních bodů nepotřebuješ, vypni ji v nastavení kontrolních bodů.", + "init_checkpoint_fail_long_time": "Inicializace kontrolního bodu trvá déle než {{timeout}} sekund, proto jsou kontrolní body pro tento úkol zakázány. Můžeš zakázat kontrolní body nebo prodloužit čekací dobu v nastavení kontrolních bodů." } } diff --git a/webview-ui/src/i18n/locales/cs/mcp.json b/webview-ui/src/i18n/locales/cs/mcp.json index d2410190db3..60a607e4ea7 100644 --- a/webview-ui/src/i18n/locales/cs/mcp.json +++ b/webview-ui/src/i18n/locales/cs/mcp.json @@ -26,12 +26,12 @@ "tabs": { "tools": "Nástroje", "resources": "Zdroje", - "errors": "Chyby" + "logs": "Logy" }, "emptyState": { "noTools": "Nenalezeny žádné nástroje", "noResources": "Nenalezeny žádné zdroje", - "noErrors": "Nenalezeny žádné chyby" + "noLogs": "Zatím žádné logy" }, "networkTimeout": { "label": "Časový limit sítě", diff --git a/webview-ui/src/i18n/locales/cs/settings.json b/webview-ui/src/i18n/locales/cs/settings.json index 1fcaa73887a..1612f71a347 100644 --- a/webview-ui/src/i18n/locales/cs/settings.json +++ b/webview-ui/src/i18n/locales/cs/settings.json @@ -71,6 +71,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "Klíč API", "vercelAiGatewayApiKeyPlaceholder": "Zadejte svůj klíč API Vercel AI Gateway", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "Klíč API OpenRouter", + "openRouterApiKeyPlaceholder": "Zadejte svůj klíč API OpenRouter", "openaiCompatibleProvider": "Kompatibilní s OpenAI", "openAiKeyLabel": "Klíč API OpenAI", "openAiKeyPlaceholder": "Zadejte svůj klíč API OpenAI", @@ -137,6 +140,7 @@ "geminiApiKeyRequired": "Klíč API Gemini je povinný", "mistralApiKeyRequired": "Klíč API Mistral je povinný", "vercelAiGatewayApiKeyRequired": "Je vyžadován klíč API Vercel AI Gateway", + "openRouterApiKeyRequired": "Klíč API OpenRouter je povinný", "ollamaBaseUrlRequired": "Základní URL Ollama je povinné", "baseUrlRequired": "Základní URL je povinné", "modelDimensionMinValue": "Dimenze modelu musí být větší než 0" @@ -316,6 +320,9 @@ "moonshotApiKey": "Klíč API Moonshot", "getMoonshotApiKey": "Získat klíč API Moonshot", "moonshotBaseUrl": "Vstupní bod Moonshot", + "minimaxApiKey": "Klíč API MiniMax", + "getMiniMaxApiKey": "Získat klíč API MiniMax", + "minimaxBaseUrl": "Vstupní bod MiniMax", "zaiEntrypoint": "Vstupní bod Z AI", "zaiEntrypointDescription": "Vyberte prosím vhodný vstupní bod API podle vaší polohy. Pokud jste v Číně, vyberte open.bigmodel.cn. Jinak vyberte api.z.ai.", "geminiApiKey": "Klíč API Gemini", @@ -503,10 +510,11 @@ }, "reasoningEffort": { "label": "Úsilí modelu při uvažování", + "none": "Žádné", "minimal": "Minimální (nejrychlejší)", - "high": "Vysoké", + "low": "Nízké", "medium": "Střední", - "low": "Nízké" + "high": "Vysoké" }, "verbosity": { "label": "Podrobnost výstupu", @@ -599,6 +607,10 @@ } }, "checkpoints": { + "timeout": { + "label": "Časový limit inicializace kontrolního bodu (sekundy)", + "description": "Maximální doba čekání na inicializaci služby kontrolních bodů. Výchozí je 15 sekund. Rozsah: 10-60 sekund." + }, "enable": { "label": "Povolit automatické kontrolní body", "description": "Pokud je povoleno, Kilo Code automaticky vytvoří kontrolní body během provádění úkolu, což usnadní kontrolu změn nebo návrat do předchozích stavů. <0>Dozvědět se více" @@ -718,6 +730,16 @@ "lines": "řádky", "always_full_read": "Vždy číst celý soubor" }, + "maxImageFileSize": { + "label": "Maximální velikost souboru obrázku", + "mb": "MB", + "description": "Maximální velikost (v MB) pro soubory obrázků, které může nástroj pro čtení souborů zpracovat." + }, + "maxTotalImageSize": { + "label": "Maximální celková velikost obrázků", + "mb": "MB", + "description": "Maximální kumulativní limit velikosti (v MB) pro všechny obrázky zpracované v jedné operaci read_file. Při čtení více obrázků se velikost každého obrázku přičte k celkové velikosti. Pokud by zahrnutí dalšího obrázku překročilo tento limit, bude přeskočen." + }, "condensingThreshold": { "label": "Prahová hodnota spouštění zhušťování", "selectProfile": "Nakonfigurovat prahovou hodnotu pro profil", @@ -743,15 +765,13 @@ "description": "Čas čekání po zápisu souborů před pokračováním (v milisekundách). Vyšší hodnoty mohou zlepšit přesnost, ale zpomalují detekci chyb." } }, - "maxImageFileSize": { - "label": "Maximální velikost souboru obrázku", - "mb": "MB", - "description": "Maximální velikost (v MB) pro soubory obrázků, které mohou být zahrnuty do požadavků. Větší soubory budou automaticky změněny na menší velikost." + "includeCurrentTime": { + "label": "Zahrnout aktuální čas do kontextu", + "description": "Pokud je povoleno, aktuální čas a informace o časovém pásmu budou zahrnuty do systémové výzvy. Zakažte toto, pokud modely přestávají pracovat kvůli obavám o čas." }, - "maxTotalImageSize": { - "label": "Maximální celková velikost obrázků", - "mb": "MB", - "description": "Maximální kumulativní limit velikosti (v MB) pro všechny obrázky v jednom požadavku. Při překročení budou obrázky změněny na menší velikost, aby se vešly do tohoto limitu." + "includeCurrentCost": { + "label": "Zahrnout aktuální náklady do kontextu", + "description": "Pokud je povoleno, aktuální náklady na využití API budou zahrnuty do systémové výzvy. Zakažte toto, pokud modely přestávají pracovat kvůli obavám o náklady." } }, "terminal": { @@ -872,10 +892,6 @@ "relace": "Relace Apply v3" } }, - "SEARCH_AND_REPLACE": { - "name": "Použít experimentální nástroj hledání a nahrazování", - "description": "Povolit experimentální nástroj hledání a nahrazování, který umožňuje Kilo Code nahradit více výskytů hledaného výrazu v jednom požadavku." - }, "INSERT_BLOCK": { "name": "Použít experimentální nástroj vkládání obsahu", "description": "Povolit experimentální nástroj vkládání obsahu, který umožňuje Kilo Code vkládat obsah na konkrétní čísla řádků bez nutnosti vytvářet diff." diff --git a/webview-ui/src/i18n/locales/de/chat.json b/webview-ui/src/i18n/locales/de/chat.json index 2b7ca7cae36..6d9c395b02e 100644 --- a/webview-ui/src/i18n/locales/de/chat.json +++ b/webview-ui/src/i18n/locales/de/chat.json @@ -156,6 +156,9 @@ "initializingWarning": "Checkpoint wird noch initialisiert... Falls dies zu lange dauert, kannst du Checkpoints in den Einstellungen deaktivieren und deine Aufgabe neu starten.", "menu": { "viewDiff": "Unterschiede anzeigen", + "more": "Weitere Optionen", + "viewDiffFromInit": "Alle Änderungen anzeigen", + "viewDiffWithCurrent": "Änderungen seit diesem Checkpoint anzeigen", "restore": "Checkpoint wiederherstellen", "restoreFiles": "Dateien wiederherstellen", "restoreFilesDescription": "Stellt die Dateien deines Projekts auf einen Snapshot zurück, der an diesem Punkt erstellt wurde.", @@ -272,6 +275,7 @@ "toggleAriaLabel": "Automatische Genehmigung umschalten", "disabledAriaLabel": "Automatische Genehmigung deaktiviert - wähle zuerst Optionen aus", "triggerLabelOff": "Automatische Genehmigung aus", + "triggerLabelOffShort": "Aus", "triggerLabel_zero": "0 automatisch genehmigt", "triggerLabel_one": "1 automatisch genehmigt", "triggerLabel_other": "{{count}} automatisch genehmigt", @@ -301,6 +305,19 @@ "selectModel": "Wähle roo/code-supernova vom Roo Code Cloud-Provider in den Einstellungen aus, um zu beginnen.", "goToSettingsButton": "Zu den Einstellungen" }, + "release": { + "heading": "Neu in der Extension:", + "openRouterEmbeddings": "Unterstützung für OpenRouter-Embedding-Modelle", + "chutesDynamic": "Chutes lädt die neuesten Modelle jetzt dynamisch", + "queuedMessagesFix": "Behebungen für verlorene Nachrichten in der Warteschlange" + }, + "cloudAgents": { + "heading": "Neu in der Cloud:", + "prFixer": "Wir stellen den PR Fixer Cloud-Agent vor, der den PR Reviewer ergänzt.", + "prFixerDescription": "Roo Codes PR Fixer wendet hochwertige Änderungen direkt auf deine PRs in GitHub an. Rufe ihn über einen PR-Kommentar auf und er liest den gesamten Kommentarverlauf, um Kontext, Vereinbarungen und Kompromisse zu verstehen — dann setzt er die richtige Lösung um.", + "tryPrFixerButton": "PR Fixer ausprobieren" + }, + "careers": "Außerdem, wir stellen ein!", "socialLinks": "Folge uns auf X, Discord oder r/RooCode 🚀" }, "browser": { diff --git a/webview-ui/src/i18n/locales/de/common.json b/webview-ui/src/i18n/locales/de/common.json index e20016c7394..86d13621834 100644 --- a/webview-ui/src/i18n/locales/de/common.json +++ b/webview-ui/src/i18n/locales/de/common.json @@ -114,5 +114,9 @@ "months_ago": "vor {{count}} Monaten", "year_ago": "vor einem Jahr", "years_ago": "vor {{count}} Jahren" + }, + "errors": { + "wait_checkpoint_long_time": "Du hast {{timeout}} Sekunden auf die Initialisierung des Checkpoints gewartet. Wenn du die Checkpoint-Funktion nicht brauchst, kannst du sie in den Checkpoint-Einstellungen ausschalten.", + "init_checkpoint_fail_long_time": "Die Initialisierung des Checkpoints dauert länger als {{timeout}} Sekunden, deshalb sind Checkpoints für diese Aufgabe deaktiviert. Du kannst Checkpoints ausschalten oder die Wartezeit in den Checkpoint-Einstellungen verlängern." } } diff --git a/webview-ui/src/i18n/locales/de/mcp.json b/webview-ui/src/i18n/locales/de/mcp.json index cc9ebeb08c4..4d020e8f29b 100644 --- a/webview-ui/src/i18n/locales/de/mcp.json +++ b/webview-ui/src/i18n/locales/de/mcp.json @@ -25,12 +25,12 @@ "tabs": { "tools": "Tools", "resources": "Ressourcen", - "errors": "Fehler" + "logs": "Protokolle" }, "emptyState": { "noTools": "Keine Tools gefunden", "noResources": "Keine Ressourcen gefunden", - "noErrors": "Keine Fehler gefunden" + "noLogs": "Noch keine Protokolle" }, "networkTimeout": { "label": "Netzwerk-Timeout", diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json index babc63d7beb..5c5e041e122 100644 --- a/webview-ui/src/i18n/locales/de/settings.json +++ b/webview-ui/src/i18n/locales/de/settings.json @@ -62,6 +62,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "API-Schlüssel", "vercelAiGatewayApiKeyPlaceholder": "Gib deinen Vercel AI Gateway API-Schlüssel ein", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "OpenRouter API-Schlüssel", + "openRouterApiKeyPlaceholder": "Gib deinen OpenRouter API-Schlüssel ein", "mistralProvider": "Mistral", "mistralApiKeyLabel": "API-Schlüssel:", "mistralApiKeyPlaceholder": "Gib deinen Mistral-API-Schlüssel ein", @@ -126,7 +129,8 @@ "vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API-Schlüssel ist erforderlich", "ollamaBaseUrlRequired": "Ollama-Basis-URL ist erforderlich", "baseUrlRequired": "Basis-URL ist erforderlich", - "modelDimensionMinValue": "Modellabmessung muss größer als 0 sein" + "modelDimensionMinValue": "Modellabmessung muss größer als 0 sein", + "openRouterApiKeyRequired": "OpenRouter API-Schlüssel ist erforderlich" }, "advancedConfigLabel": "Erweiterte Konfiguration", "searchMinScoreLabel": "Suchergebnis-Schwellenwert", @@ -308,7 +312,10 @@ "zaiApiKey": "Z AI API-Schlüssel", "getZaiApiKey": "Z AI API-Schlüssel erhalten", "zaiEntrypoint": "Z AI Einstiegspunkt", - "zaiEntrypointDescription": "Bitte wähle den entsprechenden API-Einstiegspunkt basierend auf deinem Standort aus. Wenn du dich in China befindest, wähle open.bigmodel.cn. Andernfalls wähle api.z.ai.", + "zaiEntrypointDescription": "Bitte wählen Sie den entsprechenden API-Einstiegspunkt basierend auf Ihrem Standort. Wenn Sie sich in China befinden, wählen Sie open.bigmodel.cn. Andernfalls wählen Sie api.z.ai.", + "minimaxApiKey": "MiniMax API-Schlüssel", + "getMiniMaxApiKey": "MiniMax API-Schlüssel erhalten", + "minimaxBaseUrl": "MiniMax-Einstiegspunkt", "geminiApiKey": "Gemini API-Schlüssel", "getGroqApiKey": "Groq API-Schlüssel erhalten", "groqApiKey": "Groq API-Schlüssel", @@ -362,7 +369,7 @@ "enablePromptCachingTitle": "Prompt-Caching aktivieren, um die Leistung zu verbessern und Kosten für unterstützte Modelle zu reduzieren.", "cacheUsageNote": "Hinweis: Wenn du keine Cache-Nutzung siehst, versuche ein anderes Modell auszuwählen und dann dein gewünschtes Modell erneut auszuwählen.", "vscodeLmModel": "Sprachmodell", - "vscodeLmWarning": "Hinweis: Dies ist eine sehr experimentelle Integration und die Anbieterunterstützung variiert. Wenn du einen Fehler über ein nicht unterstütztes Modell erhälst, liegt das Problem auf Anbieterseite.", + "vscodeLmWarning": "Hinweis: Über die VS Code Language Model API abgerufene Modelle können vom Anbieter ummantelt oder feinabgestimmt sein. Daher kann sich ihr Verhalten von der direkten Nutzung desselben Modells bei einem typischen Anbieter oder Router unterscheiden. Um ein Modell aus der Auswahlliste „Language Model“ zu verwenden, wechsle zunächst zu diesem Modell und klicke dann im Copilot‑Chat auf „Akzeptieren“; andernfalls kann ein Fehler wie 400 „The requested model is not supported“ auftreten.", "geminiParameters": { "urlContext": { "title": "URL-Kontext aktivieren", @@ -439,7 +446,7 @@ }, "computerUse": { "label": "Computer-Nutzung", - "description": "Ist dieses Modell in der Lage, mit einem Browser zu interagieren? (z.B. Claude 3.7 Sonnet)" + "description": "Ist dieses Modell in der Lage, mit einem Browser zu interagieren?" }, "promptCache": { "label": "Prompt-Caching", @@ -481,6 +488,7 @@ }, "reasoningEffort": { "label": "Modell-Denkaufwand", + "none": "Keine", "minimal": "Minimal (schnellste)", "high": "Hoch", "medium": "Mittel", @@ -553,6 +561,10 @@ } }, "checkpoints": { + "timeout": { + "label": "Timeout für Checkpoint-Initialisierung (Sekunden)", + "description": "Maximale Wartezeit für die Initialisierung des Checkpoint-Dienstes. Standard ist 15 Sekunden. Bereich: 10-60 Sekunden." + }, "enable": { "label": "Automatische Kontrollpunkte aktivieren", "description": "Wenn aktiviert, erstellt Kilo Code automatisch Kontrollpunkte während der Aufgabenausführung, was die Überprüfung von Änderungen oder die Rückkehr zu früheren Zuständen erleichtert. <0>Mehr erfahren" @@ -637,8 +649,8 @@ "description": "Wenn aktiviert, werden Dateien, die mit Mustern in .kilocodeignore übereinstimmen, in Listen mit einem Schlosssymbol angezeigt. Wenn deaktiviert, werden diese Dateien vollständig aus Dateilisten und Suchen ausgeblendet." }, "maxConcurrentFileReads": { - "label": "Concurrent file reads limit", - "description": "Maximum number of files the 'read_file' tool can process concurrently. Higher values may speed up reading multiple small files but increase memory usage." + "label": "Gleichzeitige Dateilesungen Limit", + "description": "Maximale Anzahl von Dateien, die das 'read_file'-Tool gleichzeitig verarbeiten kann. Höhere Werte können das Lesen mehrerer kleiner Dateien beschleunigen, erhöhen aber den Speicherverbrauch." }, "maxReadFile": { "label": "Schwellenwert für automatische Dateilesekürzung", @@ -679,7 +691,15 @@ "maxTotalImageSize": { "label": "Maximale Gesamtbildgröße", "mb": "MB", - "description": "Maximales kumulatives Größenlimit (in MB) für alle Bilder, die in einer einzelnen read_file-Operation verarbeitet werden. Beim Lesen mehrerer Bilder wird die Größe jedes Bildes zur Gesamtsumme addiert. Wenn das Einbeziehen eines weiteren Bildes dieses Limit überschreitten würde, wird es übersprungen." + "description": "Maximales kumulatives Größenlimit (in MB) für alle Bilder, die in einer einzelnen read_file-Operation verarbeitet werden. Beim Lesen mehrerer Bilder wird die Größe jedes Bildes zur Gesamtsumme addiert. Wenn das Einbeziehen eines weiteren Bildes dieses Limit überschreiten würde, wird es übersprungen." + }, + "includeCurrentTime": { + "label": "Aktuelle Uhrzeit in den Kontext einbeziehen", + "description": "Wenn aktiviert, werden die aktuelle Uhrzeit und Zeitzoneninformationen in den System-Prompt aufgenommen. Deaktiviere diese Option, wenn Modelle aufgrund von Zeitbedenken die Arbeit einstellen." + }, + "includeCurrentCost": { + "label": "Aktuelle Kosten in den Kontext einbeziehen", + "description": "Wenn aktiviert, werden die aktuellen API-Nutzungskosten in den System-Prompt aufgenommen. Deaktiviere diese Option, wenn Modelle aufgrund von Kostenbedenken die Arbeit einstellen." } }, "terminal": { @@ -689,55 +709,55 @@ }, "advanced": { "label": "Terminal-Einstellungen: Erweitert", - "description": "Die folgenden Optionen erfordern möglicherweise einen Terminal-Neustart, um die Einstellung zu übernehmen." + "description": "Diese Einstellungen gelten nur, wenn 'Inline-Terminal verwenden' deaktiviert ist. Sie betreffen nur das VS Code-Terminal und können einen IDE-Neustart erfordern." }, "outputLineLimit": { "label": "Terminal-Ausgabelimit", - "description": "Maximale Anzahl von Zeilen, die in der Terminal-Ausgabe bei der Ausführung von Befehlen enthalten sein sollen. Bei Überschreitung werden Zeilen aus der Mitte entfernt, wodurch Token gespart werden. <0>Mehr erfahren" + "description": "Behält erste und letzte Zeilen und verwirft die mittleren, um unter dem Limit zu bleiben. Niedriger für Token-Ersparnis; höher für mehr Details aus der Mitte für Roo. Roo sieht einen Platzhalter, wo Inhalt übersprungen wird.<0>Mehr erfahren" }, "outputCharacterLimit": { "label": "Terminal-Zeichenlimit", - "description": "Maximale Anzahl von Zeichen, die in die Terminalausgabe bei der Ausführung von Befehlen aufgenommen werden sollen. Dieses Limit hat Vorrang vor dem Zeilenlimit, um Speicherprobleme durch extrem lange Zeilen zu vermeiden. Bei Überschreitung wird die Ausgabe abgeschnitten. <0>Mehr erfahren" + "description": "Überschreibt das Zeilenlimit, um Speicherprobleme durch eine harte Obergrenze für die Ausgabegröße zu vermeiden. Bei Überschreitung behält es Anfang und Ende und zeigt Roo einen Platzhalter, wo Inhalt übersprungen wird. <0>Mehr erfahren" }, "shellIntegrationTimeout": { - "label": "Terminal-Shell-Integrationszeit-Limit", - "description": "Maximale Wartezeit für die Shell-Integration, bevor Befehle ausgeführt werden. Für Benutzer mit langen Shell-Startzeiten musst du diesen Wert möglicherweise erhöhen, wenn du Fehler vom Typ \"Shell Integration Unavailable\" im Terminal siehst. <0>Mehr erfahren" + "label": "Terminal-Shell-Integrations-Timeout", + "description": "Wie lange auf VS Code Shell-Integration gewartet wird, bevor Befehle ausgeführt werden. Erhöhe den Wert, wenn deine Shell langsam startet oder du 'Shell-Integration nicht verfügbar'-Fehler siehst. <0>Mehr erfahren" }, "shellIntegrationDisabled": { - "label": "Terminal-Shell-Integration deaktivieren", - "description": "Aktiviere dies, wenn Terminalbefehle nicht korrekt funktionieren oder du Fehler wie 'Shell Integration Unavailable' siehst. Dies verwendet eine einfachere Methode zur Ausführung von Befehlen und umgeht einige erweiterte Terminalfunktionen. <0>Mehr erfahren" + "label": "Inline-Terminal verwenden (empfohlen)", + "description": "Führe Befehle im Inline-Terminal (Chat) aus, um Shell-Profile/Integration für schnellere, zuverlässigere Läufe zu umgehen. Wenn deaktiviert, nutzt Roo das VS Code-Terminal mit deinem Shell-Profil, Prompts und Plugins. <0>Mehr erfahren" }, "commandDelay": { "label": "Terminal-Befehlsverzögerung", - "description": "Verzögerung in Millisekunden, die nach der Befehlsausführung hinzugefügt wird. Die Standardeinstellung von 0 deaktiviert die Verzögerung vollständig. Dies kann dazu beitragen, dass die Befehlsausgabe in Terminals mit Timing-Problemen vollständig erfasst wird. In den meisten Terminals wird dies durch Setzen von `PROMPT_COMMAND='sleep N'` und Powershell fügt `start-sleep` am Ende jedes Befehls hinzu. Ursprünglich war dies eine Lösung für VSCode-Bug#237208 und ist möglicherweise nicht mehr erforderlich. <0>Mehr erfahren" + "description": "Fügt nach jedem Befehl eine kurze Pause hinzu, damit das VS Code-Terminal alle Ausgaben leeren kann (bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep). Verwende dies nur, wenn du fehlende Tail-Ausgabe siehst; sonst lass es bei 0. <0>Mehr erfahren" }, "compressProgressBar": { "label": "Fortschrittsbalken-Ausgabe komprimieren", - "description": "Wenn aktiviert, verarbeitet diese Option Terminal-Ausgaben mit Wagenrücklaufzeichen (\\r), um zu simulieren, wie ein echtes Terminal Inhalte anzeigen würde. Dies entfernt Zwischenzustände von Fortschrittsbalken und behält nur den Endzustand bei, wodurch Kontextraum für relevantere Informationen gespart wird. <0>Mehr erfahren" + "description": "Klappt Fortschrittsbalken/Spinner zusammen, sodass nur der Endzustand erhalten bleibt (spart Token). <0>Mehr erfahren" }, "powershellCounter": { "label": "PowerShell-Zähler-Workaround aktivieren", - "description": "Wenn aktiviert, fügt einen Zähler zu PowerShell-Befehlen hinzu, um die korrekte Befehlsausführung sicherzustellen. Dies hilft bei PowerShell-Terminals, die Probleme mit der Ausgabeerfassung haben könnten. <0>Mehr erfahren" + "description": "Schalte dies ein, wenn PowerShell-Ausgabe fehlt oder dupliziert wird; es fügt jedem Befehl einen kleinen Zähler hinzu, um die Ausgabe zu stabilisieren. Lass es ausgeschaltet, wenn die Ausgabe bereits korrekt aussieht. <0>Mehr erfahren" }, "zshClearEolMark": { - "label": "ZSH-Zeilenende-Markierung löschen", - "description": "Wenn aktiviert, wird die ZSH-Zeilenende-Markierung durch Setzen von PROMPT_EOL_MARK='' gelöscht. Dies verhindert Probleme bei der Interpretation der Befehlsausgabe, wenn diese mit Sonderzeichen wie '%' endet. <0>Mehr erfahren" + "label": "ZSH-EOL-Markierung löschen", + "description": "Schalte dies ein, wenn du verirrte % am Zeilenende siehst oder das Parsen falsch aussieht; es lässt Zshs Zeilenende-Markierung (%) weg. <0>Mehr erfahren" }, "zshOhMy": { "label": "Oh My Zsh-Integration aktivieren", - "description": "Wenn aktiviert, wird ITERM_SHELL_INTEGRATION_INSTALLED=Yes gesetzt, um die Shell-Integrationsfunktionen von Oh My Zsh zu aktivieren. Das Anwenden dieser Einstellung erfordert möglicherweise einen Neustart der IDE. <0>Mehr erfahren" + "description": "Schalte dies ein, wenn dein Oh My Zsh-Theme/Plugins Shell-Integration erwarten; es setzt ITERM_SHELL_INTEGRATION_INSTALLED=Yes. Schalte dies aus, um das Setzen dieser Variable zu vermeiden. <0>Mehr erfahren" }, "zshP10k": { "label": "Powerlevel10k-Integration aktivieren", - "description": "Wenn aktiviert, wird POWERLEVEL9K_INSTANT_PROMPT=quiet gesetzt, um die Powerlevel10k-Integration zu aktivieren. Dies kann die Leistung verbessern, indem der Prompt sofort angezeigt wird. <0>Mehr erfahren" + "description": "Schalte dies ein, wenn du Powerlevel10k-Shell-Integration verwendest. <0>Mehr erfahren" }, "zdotdir": { - "label": "ZDOTDIR Handhabung aktivieren", - "description": "Wenn aktiviert, wird ein temporäres Verzeichnis für ZDOTDIR erstellt, um die Zsh-Shell-Integration ordnungsgemäß zu handhaben. Dies stellt sicher, dass die VSCode-Shell-Integration mit Zsh korrekt funktioniert, während deine Zsh-Konfiguration erhalten bleibt. <0>Mehr erfahren" + "label": "ZDOTDIR-Handhabung aktivieren", + "description": "Schalte dies ein, wenn zsh-Shell-Integration fehlschlägt oder mit deinen Dotfiles kollidiert. <0>Mehr erfahren" }, "inheritEnv": { "label": "Umgebungsvariablen erben", - "description": "Wenn aktiviert, erbt das Terminal Umgebungsvariablen aus dem übergeordneten Prozess von VSCode, wie z.B. benutzerdefinierte Shell-Integrationseinstellungen. Dies schaltet direkt die globale VSCode-Einstellung `terminal.integrated.inheritEnv` um. <0>Mehr erfahren" + "description": "Schalte dies ein, um Umgebungsvariablen vom übergeordneten VS Code-Prozess zu erben. <0>Mehr erfahren" } }, "advancedSettings": { @@ -746,7 +766,7 @@ "advanced": { "diff": { "label": "Bearbeitung durch Diffs aktivieren", - "description": "Wenn aktiviert, kann Kilo Code Dateien schneller bearbeiten und lehnt automatisch abgeschnittene vollständige Dateischreibvorgänge ab. Funktioniert am besten mit dem neuesten Claude 3.7 Sonnet-Modell.", + "description": "Wenn aktiviert, kann Kilo Code Dateien schneller bearbeiten und lehnt automatisch abgeschnittene vollständige Dateischreibvorgänge ab", "strategy": { "label": "Diff-Strategie", "options": { @@ -775,10 +795,6 @@ "name": "Experimentelle einheitliche Diff-Strategie verwenden", "description": "Aktiviere die experimentelle einheitliche Diff-Strategie. Diese Strategie könnte die Anzahl der durch Modellfehler verursachten Wiederholungsversuche reduzieren, kann aber zu unerwartetem Verhalten oder falschen Bearbeitungen führen. Aktiviere sie nur, wenn du die Risiken verstehst und bereit bist, alle Änderungen sorgfältig zu überprüfen." }, - "SEARCH_AND_REPLACE": { - "name": "Experimentelles Such- und Ersetzungswerkzeug verwenden", - "description": "Aktiviere das experimentelle Such- und Ersetzungswerkzeug, mit dem Kilo Code mehrere Instanzen eines Suchbegriffs in einer Anfrage ersetzen kann." - }, "INSERT_BLOCK": { "name": "Experimentelles Inhalts-Einfügewerkzeug verwenden", "description": "Aktiviere das experimentelle Inhalts-Einfügewerkzeug, mit dem Kilo Code Inhalte an bestimmten Zeilennummern einfügen kann, ohne einen Diff erstellen zu müssen." @@ -861,8 +877,6 @@ "modelInfo": { "supportsImages": "Unterstützt Bilder", "noImages": "Unterstützt keine Bilder", - "supportsComputerUse": "Unterstützt Computernutzung", - "noComputerUse": "Unterstützt keine Computernutzung", "supportsPromptCache": "Unterstützt Prompt-Cache", "noPromptCache": "Unterstützt keinen Prompt-Cache", "contextWindow": "Kontextfenster:", diff --git a/webview-ui/src/i18n/locales/en/chat.json b/webview-ui/src/i18n/locales/en/chat.json index 90481287a0d..5aae2a81dd1 100644 --- a/webview-ui/src/i18n/locales/en/chat.json +++ b/webview-ui/src/i18n/locales/en/chat.json @@ -162,6 +162,8 @@ "initializingWarning": "Still initializing checkpoint... If this takes too long, you can disable checkpoints in settings and restart your task.", "menu": { "viewDiff": "View Diff", + "viewDiffFromInit": "View All Changes", + "viewDiffWithCurrent": "View Changes Since This Checkpoint", "restore": "Restore Checkpoint", "restoreFiles": "Restore Files", "restoreFilesDescription": "Restores your project's files back to a snapshot taken at this point.", @@ -169,7 +171,8 @@ "confirm": "Confirm", "cancel": "Cancel", "cannotUndo": "This action cannot be undone.", - "restoreFilesAndTaskDescription": "Restores your project's files back to a snapshot taken at this point and deletes all messages after this point." + "restoreFilesAndTaskDescription": "Restores your project's files back to a snapshot taken at this point and deletes all messages after this point.", + "more": "More options" }, "current": "Current" }, @@ -197,8 +200,6 @@ "wantsToGenerateImageProtected": "Kilo Code wants to generate an image in a protected location", "didGenerateImage": "Kilo Code generated an image", "wantsToCreate": "Kilo Code wants to create a new file", - "wantsToSearchReplace": "Kilo Code wants to search and replace in this file", - "didSearchReplace": "Kilo Code performed search and replace on this file", "wantsToInsert": "Kilo Code wants to insert content into this file", "wantsToInsertWithLineNumber": "Kilo Code wants to insert content into this file at line {{lineNumber}}", "wantsToInsertAtEnd": "Kilo Code wants to append content to the end of this file" @@ -295,6 +296,7 @@ "toggleAriaLabel": "Toggle auto-approval", "disabledAriaLabel": "Auto-approval disabled - select options first", "triggerLabelOff": "Auto-approve off", + "triggerLabelOffShort": "Off", "triggerLabel_zero": "0 auto-approve", "triggerLabel_one": "1 auto-approved", "triggerLabel_other": "{{count}} auto-approved", @@ -309,6 +311,19 @@ "selectModel": "Select roo/code-supernova from the Roo Code Cloud provider in Settings to get started.", "goToSettingsButton": "Go to Settings" }, + "release": { + "heading": "New in the Extension:", + "openRouterEmbeddings": "Support for OpenRouter embedding models", + "chutesDynamic": "Chutes now loads the latest models dynamically", + "queuedMessagesFix": "Fixes for queued messages getting lost" + }, + "cloudAgents": { + "heading": "New in the Cloud:", + "prFixer": "Introducing the PR Fixer cloud agent to complement the PR Reviewer.", + "prFixerDescription": "The PR Fixer applies high-quality changes to your PRs, right from GitHub. Invoke via a PR comment and it will read the entire comment history to understand context, agreements, and tradeoffs - then implement the right fix.", + "tryPrFixerButton": "Try PR Fixer" + }, + "careers": "Also, we're hiring!", "socialLinks": "Join us on X, Discord, or r/RooCode 🚀" }, "reasoning": { diff --git a/webview-ui/src/i18n/locales/en/common.json b/webview-ui/src/i18n/locales/en/common.json index 8d0ad0c37e8..375cb4e70c0 100644 --- a/webview-ui/src/i18n/locales/en/common.json +++ b/webview-ui/src/i18n/locales/en/common.json @@ -114,5 +114,9 @@ "months_ago": "{{count}} months ago", "year_ago": "a year ago", "years_ago": "{{count}} years ago" + }, + "errors": { + "wait_checkpoint_long_time": "Waited {{timeout}} seconds for checkpoint initialization. If you don't need the checkpoint feature, please turn it off in the checkpoint settings.", + "init_checkpoint_fail_long_time": "Checkpoint initialization has taken more than {{timeout}} seconds, so checkpoints are disabled for this task. You can disable checkpoints or extend the waiting time in the checkpoint settings." } } diff --git a/webview-ui/src/i18n/locales/en/mcp.json b/webview-ui/src/i18n/locales/en/mcp.json index e4823046e63..70d8261bad9 100644 --- a/webview-ui/src/i18n/locales/en/mcp.json +++ b/webview-ui/src/i18n/locales/en/mcp.json @@ -26,12 +26,12 @@ "tabs": { "tools": "Tools", "resources": "Resources", - "errors": "Errors" + "logs": "Logs" }, "emptyState": { "noTools": "No tools found", "noResources": "No resources found", - "noErrors": "No errors found" + "noLogs": "No logs yet" }, "networkTimeout": { "label": "Network Timeout", diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json index 1e06881e156..de9f0d939c6 100644 --- a/webview-ui/src/i18n/locales/en/settings.json +++ b/webview-ui/src/i18n/locales/en/settings.json @@ -71,6 +71,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "API Key", "vercelAiGatewayApiKeyPlaceholder": "Enter your Vercel AI Gateway API key", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "OpenRouter API Key", + "openRouterApiKeyPlaceholder": "Enter your OpenRouter API key", "openaiCompatibleProvider": "OpenAI Compatible", "openAiKeyLabel": "OpenAI API Key", "openAiKeyPlaceholder": "Enter your OpenAI API key", @@ -137,6 +140,7 @@ "geminiApiKeyRequired": "Gemini API key is required", "mistralApiKeyRequired": "Mistral API key is required", "vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API key is required", + "openRouterApiKeyRequired": "OpenRouter API key is required", "ollamaBaseUrlRequired": "Ollama base URL is required", "baseUrlRequired": "Base URL is required", "modelDimensionMinValue": "Model dimension must be greater than 0" @@ -279,7 +283,7 @@ "success": "Models list refreshed successfully!", "error": "Failed to refresh models list. Please try again." }, - "getRequestyApiKey": "Get Requesty API Key", + "getRequestyApiKey": "Create new Requesty API Key", "getRequestyBaseUrl": "Base URL", "requestyUseCustomBaseUrl": "Use custom base URL", "openRouterTransformsText": "Compress prompts and message chains to the context size (OpenRouter Transforms)", @@ -310,6 +314,9 @@ "moonshotApiKey": "Moonshot API Key", "getMoonshotApiKey": "Get Moonshot API Key", "moonshotBaseUrl": "Moonshot Entrypoint", + "minimaxApiKey": "MiniMax API Key", + "getMiniMaxApiKey": "Get MiniMax API Key", + "minimaxBaseUrl": "MiniMax Entrypoint", "zaiApiKey": "Z AI API Key", "getZaiApiKey": "Get Z AI API Key", "zaiEntrypoint": "Z AI Entrypoint", @@ -367,7 +374,7 @@ "enablePromptCachingTitle": "Enable prompt caching to improve performance and reduce costs for supported models.", "cacheUsageNote": "Note: If you don't see cache usage, try selecting a different model and then selecting your desired model again.", "vscodeLmModel": "Language Model", - "vscodeLmWarning": "Note: This is a very experimental integration and provider support will vary. If you get an error about a model not being supported, that's an issue on the provider's end.", + "vscodeLmWarning": "Note: Models accessed via the VS Code Language Model API may be wrapped or fine-tuned by the provider, so behavior can differ from using the same model directly from a typical provider or router. To use a model from the Language Model dropdown, first switch to that model and then click Accept in the Copilot Chat prompt; otherwise you may see an error such as 400 \"The requested model is not supported\".", "geminiParameters": { "urlContext": { "title": "Enable URL context", @@ -444,7 +451,7 @@ }, "computerUse": { "label": "Computer Use", - "description": "Is this model capable of interacting with a browser? (e.g. Claude 3.7 Sonnet)." + "description": "Is this model capable of interacting with a browser?" }, "promptCache": { "label": "Prompt Caching", @@ -486,6 +493,7 @@ }, "reasoningEffort": { "label": "Model Reasoning Effort", + "none": "None", "minimal": "Minimal (Fastest)", "low": "Low", "medium": "Medium", @@ -558,6 +566,10 @@ } }, "checkpoints": { + "timeout": { + "label": "Checkpoint initialization timeout (seconds)", + "description": "Maximum time to wait for checkpoint service initialization. Default is 15 seconds. Range: 10-60 seconds." + }, "enable": { "label": "Enable automatic checkpoints", "description": "When enabled, Kilo Code will automatically create checkpoints during task execution, making it easy to review changes or revert to earlier states. <0>Learn more" @@ -711,6 +723,14 @@ "profileDescription": "Custom threshold for this profile only (overrides global default)", "inheritDescription": "This profile inherits the global default threshold ({{threshold}}%)", "usesGlobal": "(uses global {{threshold}}%)" + }, + "includeCurrentTime": { + "label": "Include current time in context", + "description": "When enabled, the current time and timezone information will be included in the system prompt. Disable this if models are stopping work due to time concerns." + }, + "includeCurrentCost": { + "label": "Include current cost in context", + "description": "When enabled, the current API usage cost will be included in the system prompt. Disable this if models are stopping work due to cost concerns." } }, "terminal": { @@ -720,55 +740,55 @@ }, "advanced": { "label": "Terminal Settings: Advanced", - "description": "The following options may require a terminal restart to apply the setting." + "description": "These settings apply only when 'Use Inline Terminal' is disabled. They affect the VS Code terminal only and may require restarting the IDE." }, "outputLineLimit": { "label": "Terminal output limit", - "description": "Maximum number of lines to include in terminal output when executing commands. When exceeded lines will be removed from the middle, saving tokens. <0>Learn more" + "description": "Keeps the first and last lines and drops the middle to stay under the limit. Lower to save tokens; raise to give Roo more middle detail. Roo sees a placeholder where the content is skipped.<0>Learn more" }, "outputCharacterLimit": { "label": "Terminal character limit", - "description": "Maximum number of characters to include in terminal output when executing commands. This limit takes precedence over the line limit to prevent memory issues from extremely long lines. When exceeded, output will be truncated. <0>Learn more" + "description": "Overrides the line limit to prevent memory issues by enforcing a hard cap on output size. If exceeded, keeps the beginning and end and shows a placeholder to Roo where content is skipped. <0>Learn more" }, "shellIntegrationTimeout": { "label": "Terminal shell integration timeout", - "description": "Maximum time to wait for shell integration to initialize before executing commands. For users with long shell startup times, this value may need to be increased if you see \"Shell Integration Unavailable\" errors in the terminal. <0>Learn more" + "description": "How long to wait for VS Code shell integration before running commands. Raise if your shell starts slowly or you see 'Shell Integration Unavailable' errors. <0>Learn more" }, "shellIntegrationDisabled": { - "label": "Disable terminal shell integration", - "description": "Enable this if terminal commands aren't working correctly or you see 'Shell Integration Unavailable' errors. This uses a simpler method to run commands, bypassing some advanced terminal features. <0>Learn more" + "label": "Use Inline Terminal (recommended)", + "description": "Run commands in the Inline Terminal (chat) to bypass shell profiles/integration for faster, more reliable runs. When disabled Roo uses the VS Code terminal with your shell profile, prompts, and plugins. <0>Learn more" }, "commandDelay": { "label": "Terminal command delay", - "description": "Delay in milliseconds to add after command execution. The default setting of 0 disables the delay completely. This can help ensure command output is fully captured in terminals with timing issues. In most terminals it is implemented by setting `PROMPT_COMMAND='sleep N'` and Powershell appends `start-sleep` to the end of each command. Originally was workaround for VSCode bug#237208 and may not be needed. <0>Learn more" + "description": "Adds a short pause after each command so the VS Code terminal can flush all output (bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep). Use only if you see missing tail output; otherwise leave at 0. <0>Learn more" }, "compressProgressBar": { "label": "Compress progress bar output", - "description": "When enabled, processes terminal output with carriage returns (\\r) to simulate how a real terminal would display content. This removes intermediate progress bar states, retaining only the final state, which conserves context space for more relevant information. <0>Learn more" + "description": "Collapses progress bars/spinners so only the final state is kept (saves tokens). <0>Learn more" }, "powershellCounter": { "label": "Enable PowerShell counter workaround", - "description": "When enabled, adds a counter to PowerShell commands to ensure proper command execution. This helps with PowerShell terminals that might have issues with command output capture. <0>Learn more" + "description": "Turn this on when PowerShell output is missing or duplicated; it appends a tiny counter to each command to stabilize output. Keep this off if output already looks correct. <0>Learn more" }, "zshClearEolMark": { "label": "Clear ZSH EOL mark", - "description": "When enabled, clears the ZSH end-of-line mark by setting PROMPT_EOL_MARK=''. This prevents issues with command output interpretation when output ends with special characters like '%'. <0>Learn more" + "description": "Turn this on when you see stray % at the end of lines or parsing looks wrong; it omits Zsh’s end‑of‑line mark (%). <0>Learn more" }, "zshOhMy": { "label": "Enable Oh My Zsh integration", - "description": "When enabled, sets ITERM_SHELL_INTEGRATION_INSTALLED=Yes to enable Oh My Zsh shell integration features. Applying this setting might require restarting the IDE. <0>Learn more" + "description": "Turn this on when your Oh My Zsh theme/plugins expect shell integration; it sets ITERM_SHELL_INTEGRATION_INSTALLED=Yes. Turn this off to avoid setting that variable. <0>Learn more" }, "zshP10k": { "label": "Enable Powerlevel10k integration", - "description": "When enabled, sets POWERLEVEL9K_TERM_SHELL_INTEGRATION=true to enable Powerlevel10k shell integration features. <0>Learn more" + "description": "Turn this on when using Powerlevel10k shell integration. <0>Learn more" }, "zdotdir": { "label": "Enable ZDOTDIR handling", - "description": "When enabled, creates a temporary directory for ZDOTDIR to handle zsh shell integration properly. This ensures VSCode shell integration works correctly with zsh while preserving your zsh configuration. <0>Learn more" + "description": "Turn this on when zsh shell integration fails or conflicts with your dotfiles. <0>Learn more" }, "inheritEnv": { "label": "Inherit environment variables", - "description": "When enabled, the terminal will inherit environment variables from VSCode's parent process, such as user-profile-defined shell integration settings. This directly toggles VSCode global setting `terminal.integrated.inheritEnv`. <0>Learn more" + "description": "Turn this on to inherit environment variables from the parent VS Code process. <0>Learn more" } }, "advancedSettings": { @@ -777,7 +797,7 @@ "advanced": { "diff": { "label": "Enable editing through diffs", - "description": "When enabled, Kilo Code will be able to edit files more quickly and will automatically reject truncated full-file writes. Works best with the latest Claude 4 Sonnet model.", + "description": "When enabled, Kilo Code will be able to edit files more quickly and will automatically reject truncated full-file writes", "strategy": { "label": "Diff strategy", "options": { @@ -806,10 +826,6 @@ "name": "Use experimental unified diff strategy", "description": "Enable the experimental unified diff strategy. This strategy might reduce the number of retries caused by model errors but may cause unexpected behavior or incorrect edits. Only enable if you understand the risks and are willing to carefully review all changes." }, - "SEARCH_AND_REPLACE": { - "name": "Use experimental search and replace tool", - "description": "Enable the experimental search and replace tool, allowing Kilo Code to replace multiple instances of a search term in one request." - }, "INSERT_BLOCK": { "name": "Use experimental insert content tool", "description": "Enable the experimental insert content tool, allowing Kilo Code to insert content at specific line numbers without needing to create a diff." @@ -892,8 +908,6 @@ "modelInfo": { "supportsImages": "Supports images", "noImages": "Does not support images", - "supportsComputerUse": "Supports computer use", - "noComputerUse": "Does not support computer use", "supportsPromptCache": "Supports prompt caching", "noPromptCache": "Does not support prompt caching", "contextWindow": "Context Window:", diff --git a/webview-ui/src/i18n/locales/es/chat.json b/webview-ui/src/i18n/locales/es/chat.json index 50a366c59c5..9914990b6d9 100644 --- a/webview-ui/src/i18n/locales/es/chat.json +++ b/webview-ui/src/i18n/locales/es/chat.json @@ -156,6 +156,9 @@ "initializingWarning": "Todavía inicializando el punto de control... Si esto tarda demasiado, puedes desactivar los puntos de control en la configuración y reiniciar tu tarea.", "menu": { "viewDiff": "Ver diferencias", + "more": "Más opciones", + "viewDiffFromInit": "Ver todos los cambios", + "viewDiffWithCurrent": "Ver cambios desde este punto de control", "restore": "Restaurar punto de control", "restoreFiles": "Restaurar archivos", "restoreFilesDescription": "Restaura los archivos de tu proyecto a una instantánea tomada en este punto.", @@ -272,6 +275,7 @@ "toggleAriaLabel": "Conmutar la aprobación automática", "disabledAriaLabel": "Aprobación automática deshabilitada: selecciona primero las opciones", "triggerLabelOff": "Aprobación automática desactivada", + "triggerLabelOffShort": "Desactivado", "triggerLabel_zero": "0 aprobaciones automáticas", "triggerLabel_one": "1 aprobación automática", "triggerLabel_other": "{{count}} aprobaciones automáticas", @@ -301,6 +305,19 @@ "selectModel": "Selecciona roo/code-supernova del proveedor Roo Code Cloud en Configuración para comenzar.", "goToSettingsButton": "Ir a Configuración" }, + "release": { + "heading": "Novedades en la Extensión:", + "openRouterEmbeddings": "Soporte para modelos de embeddings de OpenRouter", + "chutesDynamic": "Chutes ahora carga los modelos más recientes dinámicamente", + "queuedMessagesFix": "Correcciones para mensajes en cola que se perdían" + }, + "cloudAgents": { + "heading": "Novedades en la Nube:", + "prFixer": "Presentamos el agente en la nube PR Fixer para complementar el PR Reviewer.", + "prFixerDescription": "El PR Fixer de Roo Code aplica cambios de alta calidad a tus PRs, directamente desde GitHub. Invócalo mediante un comentario en el PR y leerá todo el historial de comentarios para entender el contexto, los acuerdos y las compensaciones — luego implementará la solución correcta.", + "tryPrFixerButton": "Probar PR Fixer" + }, + "careers": "Además, ¡estamos contratando!", "socialLinks": "Únete a nosotros en X, Discord, o r/RooCode 🚀" }, "browser": { diff --git a/webview-ui/src/i18n/locales/es/common.json b/webview-ui/src/i18n/locales/es/common.json index e529f20163c..4bfe801a8b4 100644 --- a/webview-ui/src/i18n/locales/es/common.json +++ b/webview-ui/src/i18n/locales/es/common.json @@ -114,5 +114,9 @@ "months_ago": "hace {{count}} meses", "year_ago": "hace un año", "years_ago": "hace {{count}} años" + }, + "errors": { + "wait_checkpoint_long_time": "Has esperado {{timeout}} segundos para la inicialización del punto de control. Si no necesitas esta función, desactívala en la configuración del punto de control.", + "init_checkpoint_fail_long_time": "La inicialización del punto de control ha tardado más de {{timeout}} segundos, por lo que los puntos de control están desactivados para esta tarea. Puedes desactivar los puntos de control o aumentar el tiempo de espera en la configuración del punto de control." } } diff --git a/webview-ui/src/i18n/locales/es/mcp.json b/webview-ui/src/i18n/locales/es/mcp.json index 38a51477ad7..14580af3b2e 100644 --- a/webview-ui/src/i18n/locales/es/mcp.json +++ b/webview-ui/src/i18n/locales/es/mcp.json @@ -25,12 +25,12 @@ "tabs": { "tools": "Herramientas", "resources": "Recursos", - "errors": "Errores" + "logs": "Registros" }, "emptyState": { "noTools": "No se encontraron herramientas", "noResources": "No se encontraron recursos", - "noErrors": "No se encontraron errores" + "noLogs": "Aún no hay registros" }, "networkTimeout": { "label": "Tiempo de espera de red", diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json index c9ac01aa2ba..6150d29c4e0 100644 --- a/webview-ui/src/i18n/locales/es/settings.json +++ b/webview-ui/src/i18n/locales/es/settings.json @@ -62,6 +62,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "Clave API", "vercelAiGatewayApiKeyPlaceholder": "Introduce tu clave API de Vercel AI Gateway", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "Clave de API de OpenRouter", + "openRouterApiKeyPlaceholder": "Introduce tu clave de API de OpenRouter", "mistralProvider": "Mistral", "mistralApiKeyLabel": "Clave API:", "mistralApiKeyPlaceholder": "Introduce tu clave de API de Mistral", @@ -126,7 +129,8 @@ "vercelAiGatewayApiKeyRequired": "Se requiere la clave API de Vercel AI Gateway", "ollamaBaseUrlRequired": "Se requiere la URL base de Ollama", "baseUrlRequired": "Se requiere la URL base", - "modelDimensionMinValue": "La dimensión del modelo debe ser mayor que 0" + "modelDimensionMinValue": "La dimensión del modelo debe ser mayor que 0", + "openRouterApiKeyRequired": "Se requiere la clave API de OpenRouter" }, "advancedConfigLabel": "Configuración avanzada", "searchMinScoreLabel": "Umbral de puntuación de búsqueda", @@ -309,6 +313,9 @@ "getZaiApiKey": "Obtener clave API de Z AI", "zaiEntrypoint": "Punto de entrada de Z AI", "zaiEntrypointDescription": "Por favor, seleccione el punto de entrada de API apropiado según su ubicación. Si está en China, elija open.bigmodel.cn. De lo contrario, elija api.z.ai.", + "minimaxApiKey": "Clave API de MiniMax", + "getMiniMaxApiKey": "Obtener clave API de MiniMax", + "minimaxBaseUrl": "Punto de entrada de MiniMax", "geminiApiKey": "Clave API de Gemini", "getGroqApiKey": "Obtener clave API de Groq", "groqApiKey": "Clave API de Groq", @@ -362,7 +369,7 @@ "enablePromptCachingTitle": "Habilitar el caché de prompts para mejorar el rendimiento y reducir costos para modelos compatibles.", "cacheUsageNote": "Nota: Si no ve el uso del caché, intente seleccionar un modelo diferente y luego seleccionar nuevamente su modelo deseado.", "vscodeLmModel": "Modelo de lenguaje", - "vscodeLmWarning": "Nota: Esta es una integración muy experimental y el soporte del proveedor variará. Si recibe un error sobre un modelo no compatible, es un problema del proveedor.", + "vscodeLmWarning": "Nota: Los modelos a los que se accede a través de la API de modelos de lenguaje de VS Code pueden estar envueltos o ajustados por el proveedor, por lo que su comportamiento puede diferir del uso directo del mismo modelo desde un proveedor o enrutador típico. Para usar un modelo del menú desplegable «Language Model», primero cambia a ese modelo y luego haz clic en «Aceptar» en el aviso de Copilot Chat; de lo contrario, puedes ver un error como 400 «The requested model is not supported».", "geminiParameters": { "urlContext": { "title": "Habilitar contexto de URL", @@ -439,7 +446,7 @@ }, "computerUse": { "label": "Uso del ordenador", - "description": "¿Es este modelo capaz de interactuar con un navegador? (ej. Claude 3.7 Sonnet)" + "description": "¿Es este modelo capaz de interactuar con un navegador?" }, "promptCache": { "label": "Caché de prompts", @@ -481,6 +488,7 @@ }, "reasoningEffort": { "label": "Esfuerzo de razonamiento del modelo", + "none": "Ninguno", "minimal": "Mínimo (el más rápido)", "high": "Alto", "medium": "Medio", @@ -553,6 +561,10 @@ } }, "checkpoints": { + "timeout": { + "label": "Tiempo de espera para inicializar el punto de control (segundos)", + "description": "Tiempo máximo de espera para inicializar el servicio de puntos de control. El valor por defecto es 15 segundos. Rango: 10-60 segundos." + }, "enable": { "label": "Habilitar puntos de control automáticos", "description": "Cuando está habilitado, Kilo Code creará automáticamente puntos de control durante la ejecución de tareas, facilitando la revisión de cambios o la reversión a estados anteriores. <0>Más información" @@ -680,6 +692,14 @@ "profileDescription": "Umbral personalizado solo para este perfil (anula el predeterminado global)", "inheritDescription": "Este perfil hereda el umbral predeterminado global ({{threshold}}%)", "usesGlobal": "(usa global {{threshold}}%)" + }, + "includeCurrentTime": { + "label": "Incluir hora actual en el contexto", + "description": "Cuando está habilitado, la hora actual y la información de la zona horaria se incluirán en el prompt del sistema. Deshabilítelo si los modelos dejan de funcionar por problemas de tiempo." + }, + "includeCurrentCost": { + "label": "Incluir costo actual en el contexto", + "description": "Cuando está habilitado, el costo de uso actual de la API se incluirá en el prompt del sistema. Deshabilítelo si los modelos dejan de funcionar por problemas de costos." } }, "terminal": { @@ -689,55 +709,55 @@ }, "advanced": { "label": "Configuración del terminal: Avanzada", - "description": "Las siguientes opciones pueden requerir reiniciar el terminal para aplicar la configuración." + "description": "Estos ajustes solo se aplican cuando 'Usar terminal en línea' está desactivado. Afectan solo al terminal de VS Code y pueden requerir reiniciar el IDE." }, "outputLineLimit": { - "label": "Límite de salida de terminal", - "description": "Número máximo de líneas a incluir en la salida del terminal al ejecutar comandos. Cuando se excede, se eliminarán líneas del medio, ahorrando token. <0>Más información" + "label": "Límite de salida del terminal", + "description": "Mantiene las primeras y últimas líneas y descarta las intermedias para mantenerse bajo el límite. Reduce para ahorrar tokens; aumenta para dar a Roo más detalles intermedios. Roo ve un marcador donde se omite el contenido.<0>Más información" }, "outputCharacterLimit": { "label": "Límite de caracteres del terminal", - "description": "Número máximo de caracteres a incluir en la salida del terminal al ejecutar comandos. Este límite tiene prioridad sobre el límite de líneas para evitar problemas de memoria con líneas extremadamente largas. Cuando se excede, la salida se truncará. <0>Aprende más" + "description": "Anula el límite de líneas para evitar problemas de memoria imponiendo un límite estricto al tamaño de salida. Si se excede, mantiene el inicio y el final y muestra un marcador a Roo donde se omite el contenido. <0>Más información" }, "shellIntegrationTimeout": { "label": "Tiempo de espera de integración del shell del terminal", - "description": "Tiempo máximo de espera para la inicialización de la integración del shell antes de ejecutar comandos. Para usuarios con tiempos de inicio de shell largos, este valor puede necesitar ser aumentado si ve errores \"Shell Integration Unavailable\" en el terminal. <0>Más información" + "description": "Cuánto tiempo esperar la integración del shell de VS Code antes de ejecutar comandos. Aumenta si tu shell inicia lentamente o ves errores 'Integración del Shell No Disponible'. <0>Más información" }, "shellIntegrationDisabled": { - "label": "Desactivar la integración del shell del terminal", - "description": "Activa esto si los comandos del terminal no funcionan correctamente o si ves errores de 'Shell Integration Unavailable'. Esto utiliza un método más simple para ejecutar comandos, omitiendo algunas funciones avanzadas del terminal. <0>Más información" + "label": "Usar terminal en línea (recomendado)", + "description": "Ejecuta comandos en el terminal en línea (chat) para evitar perfiles/integración del shell para ejecuciones más rápidas y confiables. Cuando está desactivado, Roo usa el terminal de VS Code con tu perfil de shell, prompts y plugins. <0>Más información" }, "commandDelay": { "label": "Retraso de comando del terminal", - "description": "Retraso en milisegundos para añadir después de la ejecución del comando. La configuración predeterminada de 0 desactiva completamente el retraso. Esto puede ayudar a asegurar que la salida del comando se capture completamente en terminales con problemas de temporización. En la mayoría de terminales se implementa estableciendo `PROMPT_COMMAND='sleep N'` y Powershell añade `start-sleep` al final de cada comando. Originalmente era una solución para el error VSCode#237208 y puede no ser necesario. <0>Más información" + "description": "Añade una pausa breve después de cada comando para que el terminal de VS Code pueda vaciar toda la salida (bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep). Usa solo si ves salida final faltante; si no, deja en 0. <0>Más información" }, "compressProgressBar": { - "label": "Comprimir salida de barras de progreso", - "description": "Cuando está habilitado, procesa la salida del terminal con retornos de carro (\\r) para simular cómo un terminal real mostraría el contenido. Esto elimina los estados intermedios de las barras de progreso, conservando solo el estado final, lo que ahorra espacio de contexto para información más relevante. <0>Más información" + "label": "Comprimir salida de barra de progreso", + "description": "Colapsa barras de progreso/spinners para que solo se mantenga el estado final (ahorra tokens). <0>Más información" }, "powershellCounter": { - "label": "Habilitar solución temporal del contador de PowerShell", - "description": "Cuando está habilitado, agrega un contador a los comandos de PowerShell para garantizar la ejecución correcta de los comandos. Esto ayuda con las terminales PowerShell que pueden tener problemas con la captura de salida de comandos. <0>Más información" + "label": "Activar solución del contador de PowerShell", + "description": "Activa cuando falta o se duplica la salida de PowerShell; añade un pequeño contador a cada comando para estabilizar la salida. Mantén desactivado si la salida ya se ve correcta. <0>Más información" }, "zshClearEolMark": { - "label": "Limpiar marca de fin de línea de ZSH", - "description": "Cuando está habilitado, limpia la marca de fin de línea de ZSH estableciendo PROMPT_EOL_MARK=''. Esto evita problemas con la interpretación de la salida de comandos cuando termina con caracteres especiales como '%'. <0>Más información" + "label": "Limpiar marca EOL de ZSH", + "description": "Activa cuando veas % extraviados al final de líneas o el análisis parezca incorrecto; omite la marca de final de línea (%) de Zsh. <0>Más información" }, "zshOhMy": { - "label": "Habilitar integración Oh My Zsh", - "description": "Cuando está habilitado, establece ITERM_SHELL_INTEGRATION_INSTALLED=Yes para habilitar las características de integración del shell Oh My Zsh. Aplicar esta configuración puede requerir reiniciar el IDE. <0>Más información" + "label": "Activar integración con Oh My Zsh", + "description": "Activa cuando tu tema/plugins de Oh My Zsh esperen integración del shell; establece ITERM_SHELL_INTEGRATION_INSTALLED=Yes. Desactiva para evitar establecer esa variable. <0>Más información" }, "zshP10k": { - "label": "Habilitar integración Powerlevel10k", - "description": "Cuando está habilitado, establece POWERLEVEL9K_TERM_SHELL_INTEGRATION=true para habilitar las características de integración del shell Powerlevel10k. <0>Más información" + "label": "Activar integración con Powerlevel10k", + "description": "Activa cuando uses integración del shell de Powerlevel10k. <0>Más información" }, "zdotdir": { - "label": "Habilitar gestión de ZDOTDIR", - "description": "Cuando está habilitado, crea un directorio temporal para ZDOTDIR para manejar correctamente la integración del shell zsh. Esto asegura que la integración del shell de VSCode funcione correctamente con zsh mientras preserva tu configuración de zsh. <0>Más información" + "label": "Activar manejo de ZDOTDIR", + "description": "Activa cuando la integración del shell de zsh falle o entre en conflicto con tus dotfiles. <0>Más información" }, "inheritEnv": { "label": "Heredar variables de entorno", - "description": "Cuando está habilitado, el terminal hereda las variables de entorno del proceso padre de VSCode, como la configuración de integración del shell definida en el perfil del usuario. Esto alterna directamente la configuración global de VSCode `terminal.integrated.inheritEnv`. <0>Más información" + "description": "Activa para heredar variables de entorno del proceso padre de VS Code. <0>Más información" } }, "advancedSettings": { @@ -746,7 +766,7 @@ "advanced": { "diff": { "label": "Habilitar edición a través de diffs", - "description": "Cuando está habilitado, Kilo Code podrá editar archivos más rápidamente y rechazará automáticamente escrituras completas de archivos truncados. Funciona mejor con el último modelo Claude 4 Sonnet.", + "description": "Cuando está habilitado, Kilo Code podrá editar archivos más rápidamente y rechazará automáticamente escrituras completas de archivos truncados", "strategy": { "label": "Estrategia de diff", "options": { @@ -775,10 +795,6 @@ "name": "Usar estrategia de diff unificada experimental", "description": "Habilitar la estrategia de diff unificada experimental. Esta estrategia podría reducir el número de reintentos causados por errores del modelo, pero puede causar comportamientos inesperados o ediciones incorrectas. Habilítela solo si comprende los riesgos y está dispuesto a revisar cuidadosamente todos los cambios." }, - "SEARCH_AND_REPLACE": { - "name": "Usar herramienta experimental de búsqueda y reemplazo", - "description": "Habilitar la herramienta experimental de búsqueda y reemplazo, permitiendo a Kilo Code reemplazar múltiples instancias de un término de búsqueda en una sola solicitud." - }, "INSERT_BLOCK": { "name": "Usar herramienta experimental de inserción de contenido", "description": "Habilitar la herramienta experimental de inserción de contenido, permitiendo a Kilo Code insertar contenido en números de línea específicos sin necesidad de crear un diff." @@ -865,8 +881,6 @@ "modelInfo": { "supportsImages": "Soporta imágenes", "noImages": "No soporta imágenes", - "supportsComputerUse": "Soporta uso del ordenador", - "noComputerUse": "No soporta uso del ordenador", "supportsPromptCache": "Soporta caché de prompts", "noPromptCache": "No soporta caché de prompts", "contextWindow": "Ventana de contexto", diff --git a/webview-ui/src/i18n/locales/fr/chat.json b/webview-ui/src/i18n/locales/fr/chat.json index bd69c7ae919..ce54fab4cf3 100644 --- a/webview-ui/src/i18n/locales/fr/chat.json +++ b/webview-ui/src/i18n/locales/fr/chat.json @@ -156,6 +156,9 @@ "initializingWarning": "Initialisation du point de contrôle en cours... Si cela prend trop de temps, tu peux désactiver les points de contrôle dans les paramètres et redémarrer ta tâche.", "menu": { "viewDiff": "Voir les différences", + "more": "Plus d'options", + "viewDiffFromInit": "Voir toutes les modifications", + "viewDiffWithCurrent": "Voir les modifications depuis ce point de contrôle", "restore": "Restaurer le point de contrôle", "restoreFiles": "Restaurer les fichiers", "restoreFilesDescription": "Restaure les fichiers de votre projet à un instantané pris à ce moment.", @@ -272,6 +275,7 @@ "toggleAriaLabel": "Basculer l'approbation automatique", "disabledAriaLabel": "Approbation automatique désactivée - sélectionnez d'abord les options", "triggerLabelOff": "Approbation automatique désactivée", + "triggerLabelOffShort": "Désactivé", "triggerLabel_zero": "0 approuvé automatiquement", "triggerLabel_one": "1 approuvé automatiquement", "triggerLabel_other": "{{count}} approuvés automatiquement", @@ -301,6 +305,19 @@ "selectModel": "Sélectionnez roo/code-supernova du fournisseur Roo Code Cloud dans Paramètres pour commencer.", "goToSettingsButton": "Aller aux Paramètres" }, + "release": { + "heading": "Nouveautés dans l'Extension :", + "openRouterEmbeddings": "Prise en charge des modèles d'embeddings OpenRouter", + "chutesDynamic": "Chutes charge maintenant les derniers modèles dynamiquement", + "queuedMessagesFix": "Corrections pour les messages en file d'attente qui se perdent" + }, + "cloudAgents": { + "heading": "Nouveautés dans le Cloud :", + "prFixer": "Présentation de l'agent cloud PR Fixer pour compléter le Réviseur de PR.", + "prFixerDescription": "Le PR Fixer de Roo Code applique des modifications de haute qualité à vos PR, directement depuis GitHub. Invoquez-le via un commentaire de PR et il lira tout l'historique des commentaires pour comprendre le contexte, les accords et les compromis - puis implémentera la bonne correction.", + "tryPrFixerButton": "Essayer le PR Fixer" + }, + "careers": "Aussi, on recrute !", "socialLinks": "Rejoins-nous sur X, Discord, ou r/RooCode 🚀" }, "browser": { diff --git a/webview-ui/src/i18n/locales/fr/common.json b/webview-ui/src/i18n/locales/fr/common.json index 82dc5431385..8a7825e1348 100644 --- a/webview-ui/src/i18n/locales/fr/common.json +++ b/webview-ui/src/i18n/locales/fr/common.json @@ -114,5 +114,9 @@ "months_ago": "il y a {{count}} mois", "year_ago": "il y a un an", "years_ago": "il y a {{count}} ans" + }, + "errors": { + "wait_checkpoint_long_time": "Tu as attendu {{timeout}} secondes pour l'initialisation du checkpoint. Si tu n'as pas besoin de cette fonction, désactive-la dans les paramètres du checkpoint.", + "init_checkpoint_fail_long_time": "L'initialisation du checkpoint a pris plus de {{timeout}} secondes, donc les checkpoints sont désactivés pour cette tâche. Tu peux désactiver les checkpoints ou prolonger le délai dans les paramètres du checkpoint." } } diff --git a/webview-ui/src/i18n/locales/fr/mcp.json b/webview-ui/src/i18n/locales/fr/mcp.json index 90507e5a9b8..e14f73a9fe8 100644 --- a/webview-ui/src/i18n/locales/fr/mcp.json +++ b/webview-ui/src/i18n/locales/fr/mcp.json @@ -25,12 +25,12 @@ "tabs": { "tools": "Outils", "resources": "Ressources", - "errors": "Erreurs" + "logs": "Journaux" }, "emptyState": { "noTools": "Aucun outil trouvé", "noResources": "Aucune ressource trouvée", - "noErrors": "Aucune erreur trouvée" + "noLogs": "Pas encore de journaux" }, "networkTimeout": { "label": "Délai d'attente réseau", diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json index 4b1bb0424fb..e0158264bee 100644 --- a/webview-ui/src/i18n/locales/fr/settings.json +++ b/webview-ui/src/i18n/locales/fr/settings.json @@ -62,6 +62,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "Clé API", "vercelAiGatewayApiKeyPlaceholder": "Entrez votre clé API Vercel AI Gateway", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "Clé d'API OpenRouter", + "openRouterApiKeyPlaceholder": "Entrez votre clé d'API OpenRouter", "mistralProvider": "Mistral", "mistralApiKeyLabel": "Clé d'API:", "mistralApiKeyPlaceholder": "Entrez votre clé API Mistral", @@ -126,7 +129,8 @@ "vercelAiGatewayApiKeyRequired": "La clé API Vercel AI Gateway est requise", "ollamaBaseUrlRequired": "L'URL de base Ollama est requise", "baseUrlRequired": "L'URL de base est requise", - "modelDimensionMinValue": "La dimension du modèle doit être supérieure à 0" + "modelDimensionMinValue": "La dimension du modèle doit être supérieure à 0", + "openRouterApiKeyRequired": "Clé API OpenRouter est requise" }, "advancedConfigLabel": "Configuration avancée", "searchMinScoreLabel": "Seuil de score de recherche", @@ -309,6 +313,9 @@ "getZaiApiKey": "Obtenir la clé API Z AI", "zaiEntrypoint": "Point d'entrée Z AI", "zaiEntrypointDescription": "Veuillez sélectionner le point d'entrée API approprié en fonction de votre emplacement. Si vous êtes en Chine, choisissez open.bigmodel.cn. Sinon, choisissez api.z.ai.", + "minimaxApiKey": "Clé API MiniMax", + "getMiniMaxApiKey": "Obtenir la clé API MiniMax", + "minimaxBaseUrl": "Point d'entrée MiniMax", "geminiApiKey": "Clé API Gemini", "getGroqApiKey": "Obtenir la clé API Groq", "groqApiKey": "Clé API Groq", @@ -362,7 +369,7 @@ "enablePromptCachingTitle": "Activer la mise en cache des prompts pour améliorer les performances et réduire les coûts pour les modèles pris en charge.", "cacheUsageNote": "Remarque : Si vous ne voyez pas l'utilisation du cache, essayez de sélectionner un modèle différent puis de sélectionner à nouveau votre modèle souhaité.", "vscodeLmModel": "Modèle de langage", - "vscodeLmWarning": "Remarque : Il s'agit d'une intégration très expérimentale et le support des fournisseurs variera. Si vous recevez une erreur concernant un modèle non pris en charge, c'est un problème du côté du fournisseur.", + "vscodeLmWarning": "Remarque : Les modèles accessibles via l’API VS Code Language Model peuvent être encapsulés ou ajustés par le fournisseur ; leur comportement peut donc différer de l’utilisation directe du même modèle auprès d’un fournisseur ou routeur classique. Pour utiliser un modèle depuis la liste « Language Model », bascule d’abord sur ce modèle puis clique sur « Accepter » dans l’invite de Copilot Chat ; sinon, une erreur telle que 400 « The requested model is not supported » peut apparaître.", "geminiParameters": { "urlContext": { "title": "Activer le contexte d'URL", @@ -439,7 +446,7 @@ }, "computerUse": { "label": "Utilisation de l'ordinateur", - "description": "Ce modèle est-il capable d'interagir avec un navigateur ? (ex. Claude 3.7 Sonnet)" + "description": "Ce modèle est-il capable d'interagir avec un navigateur ?" }, "promptCache": { "label": "Mise en cache des prompts", @@ -481,6 +488,7 @@ }, "reasoningEffort": { "label": "Effort de raisonnement du modèle", + "none": "Aucun", "minimal": "Minimal (le plus rapide)", "high": "Élevé", "medium": "Moyen", @@ -553,6 +561,10 @@ } }, "checkpoints": { + "timeout": { + "label": "Délai d'initialisation du point de contrôle (secondes)", + "description": "Temps d'attente maximum pour l'initialisation du service de points de contrôle. Par défaut : 15 secondes. Plage : 10-60 secondes." + }, "enable": { "label": "Activer les points de contrôle automatiques", "description": "Lorsque cette option est activée, Kilo Code créera automatiquement des points de contrôle pendant l'exécution des tâches, facilitant la révision des modifications ou le retour à des états antérieurs. <0>En savoir plus" @@ -680,6 +692,14 @@ "profileDescription": "Seuil personnalisé pour ce profil uniquement (remplace le défaut global)", "inheritDescription": "Ce profil hérite du seuil par défaut global ({{threshold}}%)", "usesGlobal": "(utilise global {{threshold}}%)" + }, + "includeCurrentTime": { + "label": "Inclure l'heure actuelle dans le contexte", + "description": "Lorsque cette option est activée, l'heure actuelle et les informations de fuseau horaire seront incluses dans le prompt système. Désactivez cette option si les modèles cessent de fonctionner en raison de problèmes liés à l'heure." + }, + "includeCurrentCost": { + "label": "Inclure le coût actuel dans le contexte", + "description": "Lorsque cette option est activée, le coût d'utilisation actuel de l'API sera inclus dans le prompt système. Désactivez cette option si les modèles cessent de fonctionner en raison de problèmes de coût." } }, "terminal": { @@ -689,55 +709,55 @@ }, "advanced": { "label": "Paramètres du terminal : Avancé", - "description": "Les options suivantes peuvent nécessiter un redémarrage du terminal pour appliquer le paramètre." + "description": "Ces paramètres s'appliquent uniquement lorsque 'Utiliser le terminal en ligne' est désactivé. Ils affectent uniquement le terminal VS Code et peuvent nécessiter un redémarrage de l'IDE." }, "outputLineLimit": { "label": "Limite de sortie du terminal", - "description": "Nombre maximum de lignes à inclure dans la sortie du terminal lors de l'exécution de commandes. Lorsque ce nombre est dépassé, les lignes seront supprimées du milieu, économisant des token. <0>En savoir plus" + "description": "Conserve les premières et dernières lignes et supprime celles du milieu pour rester sous la limite. Réduire pour économiser des jetons ; augmenter pour donner à Roo plus de détails intermédiaires. Roo voit un espace réservé là où le contenu est ignoré.<0>En savoir plus" }, "outputCharacterLimit": { "label": "Limite de caractères du terminal", - "description": "Nombre maximum de caractères à inclure dans la sortie du terminal lors de l'exécution de commandes. Cette limite prévaut sur la limite de lignes pour éviter les problèmes de mémoire avec des lignes extrêmement longues. Lorsque cette limite est dépassée, la sortie sera tronquée. <0>En savoir plus" + "description": "Remplace la limite de lignes pour éviter les problèmes de mémoire en imposant un plafond strict sur la taille de sortie. Si dépassé, conserve le début et la fin et affiche un espace réservé à Roo là où le contenu est ignoré. <0>En savoir plus" }, "shellIntegrationTimeout": { - "label": "Délai d'intégration du shell du terminal", - "description": "Temps maximum d'attente pour l'initialisation de l'intégration du shell avant d'exécuter des commandes. Pour les utilisateurs avec des temps de démarrage de shell longs, cette valeur peut nécessiter d'être augmentée si vous voyez des erreurs \"Shell Integration Unavailable\" dans le terminal. <0>En savoir plus" + "label": "Délai d'attente d'intégration du shell du terminal", + "description": "Temps d'attente de l'intégration du shell de VS Code avant d'exécuter des commandes. Augmentez si votre shell démarre lentement ou si vous voyez des erreurs 'Intégration du Shell Indisponible'. <0>En savoir plus" }, "shellIntegrationDisabled": { - "label": "Désactiver l'intégration du shell du terminal", - "description": "Active ceci si les commandes du terminal ne fonctionnent pas correctement ou si tu vois des erreurs 'Shell Integration Unavailable'. Cela utilise une méthode plus simple pour exécuter les commandes, en contournant certaines fonctionnalités avancées du terminal. <0>En savoir plus" + "label": "Utiliser le terminal en ligne (recommandé)", + "description": "Exécute des commandes dans le terminal en ligne (chat) pour contourner les profils/intégration du shell pour des exécutions plus rapides et fiables. Lorsque désactivé, Roo utilise le terminal VS Code avec votre profil de shell, invites et plugins. <0>En savoir plus" }, "commandDelay": { "label": "Délai de commande du terminal", - "description": "Délai en millisecondes à ajouter après l'exécution de la commande. Le paramètre par défaut de 0 désactive complètement le délai. Cela peut aider à garantir que la sortie de la commande est entièrement capturée dans les terminaux avec des problèmes de synchronisation. Dans la plupart des terminaux, cela est implémenté en définissant `PROMPT_COMMAND='sleep N'` et Powershell ajoute `start-sleep` à la fin de chaque commande. À l'origine, c'était une solution pour le bug VSCode#237208 et peut ne pas être nécessaire. <0>En savoir plus" + "description": "Ajoute une courte pause après chaque commande pour que le terminal VS Code puisse vider toute la sortie (bash/zsh : PROMPT_COMMAND sleep ; PowerShell : start-sleep). Utilisez uniquement si vous voyez une sortie de fin manquante ; sinon laissez à 0. <0>En savoir plus" }, "compressProgressBar": { - "label": "Compresser la sortie des barres de progression", - "description": "Lorsque activé, traite la sortie du terminal avec des retours chariot (\\r) pour simuler l'affichage d'un terminal réel. Cela supprime les états intermédiaires des barres de progression, ne conservant que l'état final, ce qui économise de l'espace de contexte pour des informations plus pertinentes. <0>En savoir plus" + "label": "Compresser la sortie de barre de progression", + "description": "Réduit les barres de progression/spinners pour ne conserver que l'état final (économise des jetons). <0>En savoir plus" }, "powershellCounter": { - "label": "Activer le contournement du compteur PowerShell", - "description": "Lorsqu'activé, ajoute un compteur aux commandes PowerShell pour assurer une exécution correcte des commandes. Cela aide avec les terminaux PowerShell qui peuvent avoir des problèmes de capture de sortie. <0>En savoir plus" + "label": "Activer la solution de contournement du compteur PowerShell", + "description": "Activez lorsque la sortie PowerShell est manquante ou dupliquée ; ajoute un petit compteur à chaque commande pour stabiliser la sortie. Laissez désactivé si la sortie semble déjà correcte. <0>En savoir plus" }, "zshClearEolMark": { - "label": "Effacer la marque de fin de ligne ZSH", - "description": "Lorsqu'activé, efface la marque de fin de ligne ZSH en définissant PROMPT_EOL_MARK=''. Cela évite les problèmes d'interprétation de la sortie des commandes lorsqu'elle se termine par des caractères spéciaux comme '%'. <0>En savoir plus" + "label": "Effacer la marque EOL ZSH", + "description": "Activez lorsque vous voyez des % égarés en fin de ligne ou que l'analyse semble incorrecte ; omet la marque de fin de ligne (%) de Zsh. <0>En savoir plus" }, "zshOhMy": { "label": "Activer l'intégration Oh My Zsh", - "description": "Lorsqu'activé, définit ITERM_SHELL_INTEGRATION_INSTALLED=Yes pour activer les fonctionnalités d'intégration du shell Oh My Zsh. L'application de ce paramètre peut nécessiter le redémarrage de l'IDE. <0>En savoir plus" + "description": "Activez lorsque votre thème/plugins Oh My Zsh attendent l'intégration du shell ; définit ITERM_SHELL_INTEGRATION_INSTALLED=Yes. Désactivez pour éviter de définir cette variable. <0>En savoir plus" }, "zshP10k": { "label": "Activer l'intégration Powerlevel10k", - "description": "Lorsqu'activé, définit POWERLEVEL9K_TERM_SHELL_INTEGRATION=true pour activer les fonctionnalités d'intégration du shell Powerlevel10k. <0>En savoir plus" + "description": "Activez lorsque vous utilisez l'intégration du shell Powerlevel10k. <0>En savoir plus" }, "zdotdir": { "label": "Activer la gestion ZDOTDIR", - "description": "Lorsque activé, crée un répertoire temporaire pour ZDOTDIR afin de gérer correctement l'intégration du shell zsh. Cela garantit le bon fonctionnement de l'intégration du shell VSCode avec zsh tout en préservant votre configuration zsh. <0>En savoir plus" + "description": "Activez lorsque l'intégration du shell zsh échoue ou entre en conflit avec vos dotfiles. <0>En savoir plus" }, "inheritEnv": { "label": "Hériter des variables d'environnement", - "description": "Lorsqu'activé, le terminal hérite des variables d'environnement du processus parent VSCode, comme les paramètres d'intégration du shell définis dans le profil utilisateur. Cela bascule directement le paramètre global VSCode `terminal.integrated.inheritEnv`. <0>En savoir plus" + "description": "Activez pour hériter des variables d'environnement du processus parent VS Code. <0>En savoir plus" } }, "advancedSettings": { @@ -746,7 +766,7 @@ "advanced": { "diff": { "label": "Activer l'édition via des diffs", - "description": "Lorsque cette option est activée, Kilo Code pourra éditer des fichiers plus rapidement et rejettera automatiquement les écritures de fichiers complets tronqués. Fonctionne mieux avec le dernier modèle Claude 4 Sonnet.", + "description": "Lorsque cette option est activée, Kilo Code pourra éditer des fichiers plus rapidement et rejettera automatiquement les écritures de fichiers complets tronqués", "strategy": { "label": "Stratégie de diff", "options": { @@ -775,10 +795,6 @@ "name": "Utiliser la stratégie diff unifiée expérimentale", "description": "Activer la stratégie diff unifiée expérimentale. Cette stratégie pourrait réduire le nombre de tentatives causées par des erreurs de modèle, mais peut provoquer des comportements inattendus ou des modifications incorrectes. Activez-la uniquement si vous comprenez les risques et êtes prêt à examiner attentivement tous les changements." }, - "SEARCH_AND_REPLACE": { - "name": "Utiliser l'outil de recherche et remplacement expérimental", - "description": "Activer l'outil de recherche et remplacement expérimental, permettant à Kilo Code de remplacer plusieurs occurrences d'un terme de recherche en une seule requête." - }, "INSERT_BLOCK": { "name": "Utiliser l'outil d'insertion de contenu expérimental", "description": "Activer l'outil d'insertion de contenu expérimental, permettant à Kilo Code d'insérer du contenu à des numéros de ligne spécifiques sans avoir besoin de créer un diff." @@ -865,8 +881,6 @@ "modelInfo": { "supportsImages": "Prend en charge les images", "noImages": "Ne prend pas en charge les images", - "supportsComputerUse": "Prend en charge l'utilisation de l'ordinateur", - "noComputerUse": "Ne prend pas en charge l'utilisation de l'ordinateur", "supportsPromptCache": "Prend en charge la mise en cache des prompts", "noPromptCache": "Ne prend pas en charge la mise en cache des prompts", "contextWindow": "Fenêtre de contexte :", diff --git a/webview-ui/src/i18n/locales/hi/chat.json b/webview-ui/src/i18n/locales/hi/chat.json index dc40754efd2..c9a7b290320 100644 --- a/webview-ui/src/i18n/locales/hi/chat.json +++ b/webview-ui/src/i18n/locales/hi/chat.json @@ -156,6 +156,9 @@ "initializingWarning": "चेकपॉइंट अभी भी आरंभ हो रहा है... अगर यह बहुत समय ले रहा है, तो आप सेटिंग्स में चेकपॉइंट को अक्षम कर सकते हैं और अपने कार्य को पुनः आरंभ कर सकते हैं।", "menu": { "viewDiff": "अंतर देखें", + "more": "अधिक विकल्प", + "viewDiffFromInit": "सभी परिवर्तन देखें", + "viewDiffWithCurrent": "इस चेकपॉइंट के बाद से परिवर्तन देखें", "restore": "चेकपॉइंट पुनर्स्थापित करें", "restoreFiles": "फ़ाइलें पुनर्स्थापित करें", "restoreFilesDescription": "आपके प्रोजेक्ट की फ़ाइलों को इस बिंदु पर लिए गए स्नैपशॉट पर पुनर्स्थापित करता है।", @@ -272,6 +275,7 @@ "toggleAriaLabel": "स्वतः-अनुमोदन टॉगल करें", "disabledAriaLabel": "स्वतः-अनुमोदन अक्षम है - पहले विकल्प चुनें", "triggerLabelOff": "स्वतः-अनुमोदन बंद", + "triggerLabelOffShort": "बंद", "triggerLabel_zero": "0 स्वतः-अनुमोदन", "triggerLabel_one": "1 स्वतः-अनुमोदित", "triggerLabel_other": "{{count}} स्वतः-अनुमोदित", @@ -301,6 +305,19 @@ "selectModel": "आरंभ करने के लिए सेटिंग्स में Roo Code Cloud प्रोवाइडर से roo/code-supernova चुनें।", "goToSettingsButton": "सेटिंग्स पर जाएं" }, + "release": { + "heading": "एक्सटेंशन में नया:", + "openRouterEmbeddings": "OpenRouter एम्बेडिंग मॉडल के लिए समर्थन", + "chutesDynamic": "Chutes अब नवीनतम मॉडल को गतिशील रूप से लोड करता है", + "queuedMessagesFix": "कतारबद्ध संदेशों के खो जाने के लिए फिक्स" + }, + "cloudAgents": { + "heading": "क्लाउड में नया:", + "prFixer": "PR Reviewer को पूरक बनाने के लिए PR Fixer क्लाउड एजेंट पेश है।", + "prFixerDescription": "Roo Code का PR Fixer सीधे GitHub से आपके PR में उच्च-गुणवत्ता वाले परिवर्तन लागू करता है। PR टिप्पणी के माध्यम से आह्वान करें और यह संदर्भ, समझौतों और ट्रेड-ऑफ को समझने के लिए पूरे टिप्पणी इतिहास को पढ़ेगा - फिर सही फिक्स लागू करेगा।", + "tryPrFixerButton": "PR Fixer आज़माएं" + }, + "careers": "साथ ही, हम भर्ती कर रहे हैं!", "socialLinks": "X, Discord, या r/RooCode पर हमसे जुड़ें 🚀" }, "browser": { diff --git a/webview-ui/src/i18n/locales/hi/common.json b/webview-ui/src/i18n/locales/hi/common.json index b459646a0bd..bb622d852a2 100644 --- a/webview-ui/src/i18n/locales/hi/common.json +++ b/webview-ui/src/i18n/locales/hi/common.json @@ -114,5 +114,9 @@ "months_ago": "{{count}} महीने पहले", "year_ago": "एक साल पहले", "years_ago": "{{count}} साल पहले" + }, + "errors": { + "wait_checkpoint_long_time": "तुमने {{timeout}} सेकंड तक चेकपॉइंट इनिशियलाइज़ेशन का इंतजार किया। अगर तुम्हें यह फ़ीचर नहीं चाहिए, तो चेकपॉइंट सेटिंग्स में बंद कर दो।", + "init_checkpoint_fail_long_time": "चेकपॉइंट इनिशियलाइज़ेशन {{timeout}} सेकंड से ज़्यादा समय ले रहा है, इसलिए इस कार्य के लिए चेकपॉइंट बंद कर दिए गए हैं। तुम चेकपॉइंट बंद कर सकते हो या चेकपॉइंट सेटिंग्स में इंतजार का समय बढ़ा सकते हो।" } } diff --git a/webview-ui/src/i18n/locales/hi/mcp.json b/webview-ui/src/i18n/locales/hi/mcp.json index 4c543beb0d9..b535578449b 100644 --- a/webview-ui/src/i18n/locales/hi/mcp.json +++ b/webview-ui/src/i18n/locales/hi/mcp.json @@ -25,12 +25,12 @@ "tabs": { "tools": "टूल्स", "resources": "संसाधन", - "errors": "त्रुटियाँ" + "logs": "लॉग्स" }, "emptyState": { "noTools": "कोई टूल नहीं मिला", "noResources": "कोई संसाधन नहीं मिला", - "noErrors": "कोई त्रुटि नहीं मिली" + "noLogs": "अभी तक कोई लॉग नहीं" }, "networkTimeout": { "label": "नेटवर्क टाइमआउट", diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json index 805131ac4f7..6b17790e29e 100644 --- a/webview-ui/src/i18n/locales/hi/settings.json +++ b/webview-ui/src/i18n/locales/hi/settings.json @@ -57,6 +57,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "API कुंजी", "vercelAiGatewayApiKeyPlaceholder": "अपनी Vercel AI Gateway API कुंजी दर्ज करें", + "openRouterProvider": "ओपनराउटर", + "openRouterApiKeyLabel": "ओपनराउटर एपीआई कुंजी", + "openRouterApiKeyPlaceholder": "अपनी ओपनराउटर एपीआई कुंजी दर्ज करें", "mistralProvider": "Mistral", "mistralApiKeyLabel": "API कुंजी:", "mistralApiKeyPlaceholder": "अपनी मिस्ट्रल एपीआई कुंजी दर्ज करें", @@ -126,7 +129,8 @@ "vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API कुंजी आवश्यक है", "ollamaBaseUrlRequired": "Ollama आधार URL आवश्यक है", "baseUrlRequired": "आधार URL आवश्यक है", - "modelDimensionMinValue": "मॉडल आयाम 0 से बड़ा होना चाहिए" + "modelDimensionMinValue": "मॉडल आयाम 0 से बड़ा होना चाहिए", + "openRouterApiKeyRequired": "OpenRouter API कुंजी आवश्यक है" }, "advancedConfigLabel": "उन्नत कॉन्फ़िगरेशन", "searchMinScoreLabel": "खोज स्कोर थ्रेसहोल्ड", @@ -309,6 +313,9 @@ "getZaiApiKey": "Z AI API कुंजी प्राप्त करें", "zaiEntrypoint": "Z AI प्रवेश बिंदु", "zaiEntrypointDescription": "कृपया अपने स्थान के आधार पर उपयुक्त API प्रवेश बिंदु का चयन करें। यदि आप चीन में हैं, तो open.bigmodel.cn चुनें। अन्यथा, api.z.ai चुनें।", + "minimaxApiKey": "MiniMax API कुंजी", + "getMiniMaxApiKey": "MiniMax API कुंजी प्राप्त करें", + "minimaxBaseUrl": "MiniMax प्रवेश बिंदु", "geminiApiKey": "Gemini API कुंजी", "getGroqApiKey": "Groq API कुंजी प्राप्त करें", "groqApiKey": "Groq API कुंजी", @@ -362,7 +369,7 @@ "enablePromptCachingTitle": "समर्थित मॉडल के लिए प्रदर्शन में सुधार और लागत को कम करने के लिए प्रॉम्प्ट कैशिंग सक्षम करें।", "cacheUsageNote": "नोट: यदि आप कैश उपयोग नहीं देखते हैं, तो एक अलग मॉडल चुनने का प्रयास करें और फिर अपने वांछित मॉडल को पुनः चुनें।", "vscodeLmModel": "भाषा मॉडल", - "vscodeLmWarning": "नोट: यह एक बहुत ही प्रायोगिक एकीकरण है और प्रदाता समर्थन भिन्न होगा। यदि आपको किसी मॉडल के समर्थित न होने की त्रुटि मिलती है, तो यह प्रदाता की ओर से एक समस्या है।", + "vscodeLmWarning": "नोट: VS Code Language Model API के माध्यम से उपलब्ध मॉडल प्रदाता द्वारा रैप या फाइन‑ट्यून किए जा सकते हैं, इसलिए इनका व्यवहार किसी सामान्य प्रदाता या राउटर से सीधे उसी मॉडल का उपयोग करने की तुलना में अलग हो सकता है। «Language Model» ड्रॉपडाउन से मॉडल उपयोग करने के लिए पहले उसी मॉडल पर स्विच करें और फिर Copilot Chat प्रॉम्प्ट में «Accept» पर क्लिक करें; अन्यथा 400 \"The requested model is not supported\" जैसी त्रुटि दिखाई दे सकती है।", "geminiParameters": { "urlContext": { "title": "URL संदर्भ सक्षम करें", @@ -439,7 +446,7 @@ }, "computerUse": { "label": "कंप्यूटर उपयोग", - "description": "क्या यह मॉडल ब्राउज़र के साथ इंटरैक्ट करने में सक्षम है? (उदा. Claude 3.7 Sonnet)।" + "description": "क्या यह मॉडल ब्राउज़र के साथ इंटरैक्ट करने में सक्षम है? (उदा. Claude Sonnet)।" }, "promptCache": { "label": "प्रॉम्प्ट कैशिंग", @@ -481,6 +488,7 @@ }, "reasoningEffort": { "label": "मॉडल तर्क प्रयास", + "none": "कोई नहीं", "minimal": "न्यूनतम (सबसे तेज़)", "high": "उच्च", "medium": "मध्यम", @@ -553,6 +561,10 @@ } }, "checkpoints": { + "timeout": { + "label": "चेकपॉइंट इनिशियलाइज़ेशन टाइमआउट (सेकंड)", + "description": "चेकपॉइंट सेवा इनिशियलाइज़ करने के लिए अधिकतम प्रतीक्षा समय। डिफ़ॉल्ट 15 सेकंड है। सीमा: 10-60 सेकंड।" + }, "enable": { "label": "स्वचालित चेकपॉइंट सक्षम करें", "description": "जब सक्षम होता है, तो Kilo Code कार्य निष्पादन के दौरान स्वचालित रूप से चेकपॉइंट बनाएगा, जिससे परिवर्तनों की समीक्षा करना या पहले की स्थितियों पर वापस जाना आसान हो जाएगा। <0>अधिक जानें" @@ -681,6 +693,14 @@ "label": "अधिकतम कुल छवि आकार", "mb": "MB", "description": "एकल read_file ऑपरेशन में संसाधित सभी छवियों के लिए अधिकतम संचयी आकार सीमा (MB में)। कई छवियों को पढ़ते समय, प्रत्येक छवि का आकार कुल में जोड़ा जाता है। यदि किसी अन्य छवि को शामिल करने से यह सीमा पार हो जाएगी, तो उसे छोड़ दिया जाएगा।" + }, + "includeCurrentTime": { + "label": "संदर्भ में वर्तमान समय शामिल करें", + "description": "सक्षम होने पर, वर्तमान समय और समयक्षेत्र की जानकारी सिस्टम प्रॉम्प्ट में शामिल की जाएगी। यदि मॉडल समय संबंधी चिंताओं के कारण काम करना बंद कर देते हैं तो इसे अक्षम करें।" + }, + "includeCurrentCost": { + "label": "संदर्भ में वर्तमान लागत शामिल करें", + "description": "सक्षम होने पर, वर्तमान एपीआई उपयोग लागत सिस्टम प्रॉम्प्ट में शामिल की जाएगी। यदि मॉडल लागत संबंधी चिंताओं के कारण काम करना बंद कर देते हैं तो इसे अक्षम करें।" } }, "terminal": { @@ -690,55 +710,55 @@ }, "advanced": { "label": "टर्मिनल सेटिंग्स: उन्नत", - "description": "निम्नलिखित विकल्पों को लागू करने के लिए टर्मिनल को पुनरारंभ करने की आवश्यकता हो सकती है" + "description": "ये सेटिंग्स केवल तभी लागू होती हैं जब 'इनलाइन टर्मिनल उपयोग करें' अक्षम हो। ये केवल VS Code टर्मिनल को प्रभावित करती हैं और IDE को पुनरारंभ की आवश्यकता हो सकती है।" }, "outputLineLimit": { "label": "टर्मिनल आउटपुट सीमा", - "description": "कमांड निष्पादित करते समय टर्मिनल आउटपुट में शामिल करने के लिए पंक्तियों की अधिकतम संख्या। पार होने पर पंक्तियाँ मध्य से हटा दी जाएंगी, token बचाते हुए। <0>अधिक जानें" + "description": "सीमा के अंदर रहने के लिए पहली और आखिरी पंक्तियाँ रखता है और बीच वाली हटा देता है। token बचाने के लिए कम करें; Roo को अधिक मध्य विवरण देने के लिए बढ़ाएं। Roo उस स्थान पर प्लेसहोल्डर देखता है जहां सामग्री छोड़ी गई है।<0>अधिक जानें" }, "outputCharacterLimit": { "label": "टर्मिनल वर्ण सीमा", - "description": "कमांड निष्पादित करते समय टर्मिनल आउटपुट में शामिल किए जाने वाले वर्णों की अधिकतम संख्या। यह सीमा अत्यधिक लंबी लाइनों से मेमोरी समस्याओं को रोकने के लिए लाइन सीमा पर पूर्वता लेती है। जब यह सीमा पार हो जाती है, तो आउटपुट छोटा कर दिया जाएगा। <0>और जानें" + "description": "मेमोरी समस्याओं को रोकने के लिए आउटपुट आकार पर कठोर सीमा लगाकर लाइन सीमा को ओवरराइड करता है। यदि पार हो जाती है, तो शुरुआत और अंत रखता है और Roo को प्लेसहोल्डर दिखाता है जहां सामग्री छोड़ी गई है। <0>अधिक जानें" }, "shellIntegrationTimeout": { "label": "टर्मिनल शेल एकीकरण टाइमआउट", - "description": "कमांड निष्पादित करने से पहले शेल एकीकरण के आरंभ होने के लिए प्रतीक्षा का अधिकतम समय। लंबे शेल स्टार्टअप समय वाले उपयोगकर्ताओं के लिए, यदि आप टर्मिनल में \"Shell Integration Unavailable\" त्रुटियाँ देखते हैं तो इस मान को बढ़ाने की आवश्यकता हो सकती है। <0>अधिक जानें" + "description": "कमांड चलाने से पहले VS Code शेल एकीकरण की प्रतीक्षा करने का समय। यदि आपका शेल धीरे शुरू होता है या आप 'Shell Integration Unavailable' त्रुटियां देखते हैं तो बढ़ाएं। <0>अधिक जानें" }, "shellIntegrationDisabled": { - "label": "टर्मिनल शेल एकीकरण अक्षम करें", - "description": "इसे सक्षम करें यदि टर्मिनल कमांड सही ढंग से काम नहीं कर रहे हैं या आपको 'शेल एकीकरण अनुपलब्ध' त्रुटियाँ दिखाई देती हैं। यह कमांड चलाने के लिए एक सरल विधि का उपयोग करता है, कुछ उन्नत टर्मिनल सुविधाओं को दरकिनार करते हुए। <0>अधिक जानें" + "label": "इनलाइन टर्मिनल का उपयोग करें (अनुशंसित)", + "description": "तेज़, अधिक विश्वसनीय रन के लिए शेल प्रोफ़ाइल/एकीकरण को बायपास करने हेतु इनलाइन टर्मिनल (चैट) में कमांड चलाएं। अक्षम होने पर, Roo आपकी शेल प्रोफ़ाइल, प्रॉम्प्ट और प्लगइन के साथ VS Code टर्मिनल का उपयोग करता है। <0>अधिक जानें" }, "commandDelay": { "label": "टर्मिनल कमांड विलंब", - "description": "कमांड निष्पादन के बाद जोड़ने के लिए मिलीसेकंड में विलंब। 0 का डिफ़ॉल्ट सेटिंग विलंब को पूरी तरह से अक्षम कर देता है। यह टाइमिंग समस्याओं वाले टर्मिनलों में कमांड आउटपुट को पूरी तरह से कैप्चर करने में मदद कर सकता है। अधिकांश टर्मिनलों में यह `PROMPT_COMMAND='sleep N'` सेट करके कार्यान्वित किया जाता है और Powershell प्रत्येक कमांड के अंत में `start-sleep` जोड़ता है। मूल रूप से यह VSCode बग#237208 के लिए एक समाधान था और इसकी आवश्यकता नहीं हो सकती है। <0>अधिक जानें" + "description": "प्रत्येक कमांड के बाद छोटा विराम जोड़ता है ताकि VS Code टर्मिनल सभी आउटपुट फ्लश कर सके (bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep)। केवल तभी उपयोग करें जब टेल आउटपुट गायब हो; अन्यथा 0 पर छोड़ दें। <0>अधिक जानें" }, "compressProgressBar": { "label": "प्रगति बार आउटपुट संपीड़ित करें", - "description": "जब सक्षम किया जाता है, तो कैरिज रिटर्न (\\r) के साथ टर्मिनल आउटपुट को संसाधित करता है, जो वास्तविक टर्मिनल द्वारा सामग्री प्रदर्शित करने के तरीके का अनुकरण करता है। यह प्रगति बार के मध्यवर्ती स्थितियों को हटाता है, केवल अंतिम स्थिति को बनाए रखता है, जिससे अधिक प्रासंगिक जानकारी के लिए संदर्भ स्थान संरक्षित होता है। <0>अधिक जानें" + "description": "प्रगति बार/स्पिनर को संक्षिप्त करता है ताकि केवल अंतिम स्थिति रखी जाए (token बचाता है)। <0>अधिक जानें" }, "powershellCounter": { "label": "PowerShell काउंटर समाधान सक्षम करें", - "description": "सक्षम होने पर, कमांड के सही निष्पादन को सुनिश्चित करने के लिए PowerShell कमांड में एक काउंटर जोड़ता है। यह उन PowerShell टर्मिनलों के साथ मदद करता है जिनमें आउटपुट कैप्चर करने में समस्याएं हो सकती हैं। <0>अधिक जानें" + "description": "जब PowerShell आउटपुट गायब हो या डुप्लिकेट हो तो इसे चालू करें; यह आउटपुट को स्थिर करने के लिए प्रत्येक कमांड में एक छोटा काउंटर जोड़ता है। यदि आउटपुट पहले से सही दिखता है तो इसे बंद रखें। <0>अधिक जानें" }, "zshClearEolMark": { "label": "ZSH EOL मार्क साफ़ करें", - "description": "सक्षम होने पर, PROMPT_EOL_MARK='' सेट करके ZSH लाइन-समाप्ति मार्क को साफ़ करता है। यह कमांड आउटपुट की व्याख्या में समस्याओं को रोकता है जब आउटपुट '%' जैसे विशेष वर्णों के साथ समाप्त होता है। <0>अधिक जानें" + "description": "जब आप लाइन के अंत में भटके हुए % देखें या पार्सिंग गलत लगे तो इसे चालू करें; यह Zsh के एंड-ऑफ-लाइन मार्क (%) को छोड़ देता है। <0>अधिक जानें" }, "zshOhMy": { "label": "Oh My Zsh एकीकरण सक्षम करें", - "description": "सक्षम होने पर, Oh My Zsh शेल एकीकरण सुविधाओं को सक्षम करने के लिए ITERM_SHELL_INTEGRATION_INSTALLED=Yes सेट करता है। इस सेटिंग को लागू करने के लिए IDE को पुनरारंभ करने की आवश्यकता हो सकती है। <0>अधिक जानें" + "description": "जब आपकी Oh My Zsh थीम/प्लगइन शेल एकीकरण की उम्मीद करते हैं तो इसे चालू करें; यह ITERM_SHELL_INTEGRATION_INSTALLED=Yes सेट करता है। इस वेरिएबल को सेट करने से बचने के लिए इसे बंद करें। <0>अधिक जानें" }, "zshP10k": { "label": "Powerlevel10k एकीकरण सक्षम करें", - "description": "सक्षम होने पर, Powerlevel10k शेल एकीकरण सुविधाओं को सक्षम करने के लिए POWERLEVEL9K_TERM_SHELL_INTEGRATION=true सेट करता है। <0>अधिक जानें" + "description": "Powerlevel10k शेल एकीकरण का उपयोग करते समय इसे चालू करें। <0>अधिक जानें" }, "zdotdir": { "label": "ZDOTDIR प्रबंधन सक्षम करें", - "description": "सक्षम होने पर, zsh शेल एकीकरण को सही ढंग से संभालने के लिए ZDOTDIR के लिए एक अस्थायी डायरेक्टरी बनाता है। यह आपके zsh कॉन्फ़िगरेशन को बनाए रखते हुए VSCode शेल एकीकरण को zsh के साथ सही ढंग से काम करने की सुनिश्चितता करता है। <0>अधिक जानें" + "description": "जब zsh शेल एकीकरण विफल हो या आपकी dotfiles के साथ संघर्ष हो तो इसे चालू करें। <0>अधिक जानें" }, "inheritEnv": { "label": "पर्यावरण चर विरासत में लें", - "description": "सक्षम होने पर, टर्मिनल VSCode के मूल प्रक्रिया से पर्यावरण चर विरासत में लेता है, जैसे उपयोगकर्ता प्रोफ़ाइल में परिभाषित शेल एकीकरण सेटिंग्स। यह VSCode की वैश्विक सेटिंग `terminal.integrated.inheritEnv` को सीधे टॉगल करता है। <0>अधिक जानें" + "description": "पैरेंट VS Code प्रोसेस से पर्यावरण चर विरासत में लेने के लिए इसे चालू करें। <0>अधिक जानें" } }, "advancedSettings": { @@ -747,7 +767,7 @@ "advanced": { "diff": { "label": "diffs के माध्यम से संपादन सक्षम करें", - "description": "जब सक्षम होता है, Kilo Code फाइलों को तेजी से संपादित कर सकेगा और स्वचालित रूप से काटे गए पूर्ण-फाइल लेखन को अस्वीकार करेगा। नवीनतम Claude 4 Sonnet मॉडल के साथ सबसे अच्छा काम करता है।", + "description": "जब सक्षम होता है, Roo फाइलों को तेजी से संपादित कर सकेगा और स्वचालित रूप से काटे गए पूर्ण-फाइल लेखन को अस्वीकार करेगा।", "strategy": { "label": "Diff रणनीति", "options": { @@ -776,10 +796,6 @@ "name": "प्रायोगिक एकीकृत diff रणनीति का उपयोग करें", "description": "प्रायोगिक एकीकृत diff रणनीति सक्षम करें। यह रणनीति मॉडल त्रुटियों के कारण पुनः प्रयासों की संख्या को कम कर सकती है, लेकिन अप्रत्याशित व्यवहार या गलत संपादन का कारण बन सकती है। केवल तभी सक्षम करें जब आप जोखिमों को समझते हों और सभी परिवर्तनों की सावधानीपूर्वक समीक्षा करने के लिए तैयार हों।" }, - "SEARCH_AND_REPLACE": { - "name": "प्रायोगिक खोज और प्रतिस्थापन उपकरण का उपयोग करें", - "description": "प्रायोगिक खोज और प्रतिस्थापन उपकरण सक्षम करें, जो Kilo Code को एक अनुरोध में खोज शब्द के कई उदाहरणों को बदलने की अनुमति देता है।" - }, "INSERT_BLOCK": { "name": "प्रायोगिक सामग्री सम्मिलित करने के उपकरण का उपयोग करें", "description": "प्रायोगिक सामग्री सम्मिलित करने के उपकरण को सक्षम करें, जो Kilo Code को diff बनाए बिना विशिष्ट लाइन नंबरों पर सामग्री सम्मिलित करने की अनुमति देता है।" @@ -866,8 +882,6 @@ "modelInfo": { "supportsImages": "छवियों का समर्थन करता है", "noImages": "छवियों का समर्थन नहीं करता है", - "supportsComputerUse": "कंप्यूटर उपयोग का समर्थन करता है", - "noComputerUse": "कंप्यूटर उपयोग का समर्थन नहीं करता है", "supportsPromptCache": "प्रॉम्प्ट कैशिंग का समर्थन करता है", "noPromptCache": "प्रॉम्प्ट कैशिंग का समर्थन नहीं करता है", "contextWindow": "संदर्भ विंडो:", diff --git a/webview-ui/src/i18n/locales/id/chat.json b/webview-ui/src/i18n/locales/id/chat.json index 85243098470..65c18699d76 100644 --- a/webview-ui/src/i18n/locales/id/chat.json +++ b/webview-ui/src/i18n/locales/id/chat.json @@ -159,6 +159,9 @@ "initializingWarning": "Masih menginisialisasi checkpoint... Jika ini terlalu lama, kamu bisa menonaktifkan checkpoint di pengaturan dan restart tugas.", "menu": { "viewDiff": "Lihat Diff", + "more": "Opsi lainnya", + "viewDiffFromInit": "Lihat Semua Perubahan", + "viewDiffWithCurrent": "Lihat Perubahan Sejak Checkpoint Ini", "restore": "Pulihkan Checkpoint", "restoreFiles": "Pulihkan File", "restoreFilesDescription": "Mengembalikan file proyek kamu ke snapshot yang diambil pada titik ini.", @@ -293,6 +296,7 @@ "toggleAriaLabel": "Beralih persetujuan otomatis", "disabledAriaLabel": "Persetujuan otomatis dinonaktifkan - pilih opsi terlebih dahulu", "triggerLabelOff": "Persetujuan otomatis mati", + "triggerLabelOffShort": "Mati", "triggerLabel_zero": "0 disetujui otomatis", "triggerLabel_one": "1 disetujui otomatis", "triggerLabel_other": "{{count}} disetujui otomatis", @@ -307,6 +311,19 @@ "selectModel": "Pilih roo/code-supernova dari penyedia Roo Code Cloud di Pengaturan untuk memulai.", "goToSettingsButton": "Pergi ke Pengaturan" }, + "release": { + "heading": "Baru di Ekstensi:", + "openRouterEmbeddings": "Dukungan untuk model embedding OpenRouter", + "chutesDynamic": "Chutes sekarang memuat model terbaru secara dinamis", + "queuedMessagesFix": "Perbaikan untuk pesan antrian yang hilang" + }, + "cloudAgents": { + "heading": "Baru di Cloud:", + "prFixer": "Memperkenalkan agen cloud PR Fixer untuk melengkapi PR Reviewer.", + "prFixerDescription": "PR Fixer dari Roo Code menerapkan perubahan berkualitas tinggi ke PR Anda, langsung dari GitHub. Panggil melalui komentar PR dan ia akan membaca seluruh riwayat komentar untuk memahami konteks, kesepakatan, dan trade-off - kemudian menerapkan perbaikan yang tepat.", + "tryPrFixerButton": "Coba PR Fixer" + }, + "careers": "Juga, kami sedang merekrut!", "socialLinks": "Bergabunglah dengan kami di X, Discord, atau r/RooCode 🚀" }, "reasoning": { diff --git a/webview-ui/src/i18n/locales/id/common.json b/webview-ui/src/i18n/locales/id/common.json index 3127ee723a3..4598f505476 100644 --- a/webview-ui/src/i18n/locales/id/common.json +++ b/webview-ui/src/i18n/locales/id/common.json @@ -114,5 +114,9 @@ "months_ago": "{{count}} bulan yang lalu", "year_ago": "satu tahun yang lalu", "years_ago": "{{count}} tahun yang lalu" + }, + "errors": { + "wait_checkpoint_long_time": "Kamu sudah menunggu {{timeout}} detik untuk inisialisasi checkpoint. Kalau tidak butuh fitur ini, matikan saja di pengaturan checkpoint.", + "init_checkpoint_fail_long_time": "Inisialisasi checkpoint sudah lebih dari {{timeout}} detik, jadi checkpoint dinonaktifkan untuk tugas ini. Kamu bisa mematikan checkpoint atau menambah waktu tunggu di pengaturan checkpoint." } } diff --git a/webview-ui/src/i18n/locales/id/mcp.json b/webview-ui/src/i18n/locales/id/mcp.json index 80ecd5c12b1..90b9753acb7 100644 --- a/webview-ui/src/i18n/locales/id/mcp.json +++ b/webview-ui/src/i18n/locales/id/mcp.json @@ -26,12 +26,12 @@ "tabs": { "tools": "Tools", "resources": "Resources", - "errors": "Error" + "logs": "Log" }, "emptyState": { "noTools": "Tidak ada tools ditemukan", "noResources": "Tidak ada resources ditemukan", - "noErrors": "Tidak ada error ditemukan" + "noLogs": "Belum ada log" }, "networkTimeout": { "label": "Network Timeout", diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json index e7170ef5298..6b07d7c6a51 100644 --- a/webview-ui/src/i18n/locales/id/settings.json +++ b/webview-ui/src/i18n/locales/id/settings.json @@ -57,6 +57,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "API Key", "vercelAiGatewayApiKeyPlaceholder": "Masukkan kunci API Vercel AI Gateway Anda", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "Kunci API OpenRouter", + "openRouterApiKeyPlaceholder": "Masukkan kunci API OpenRouter Anda", "mistralProvider": "Mistral", "mistralApiKeyLabel": "Kunci API:", "mistralApiKeyPlaceholder": "Masukkan kunci API Mistral Anda", @@ -126,7 +129,8 @@ "vercelAiGatewayApiKeyRequired": "Kunci API Vercel AI Gateway diperlukan", "ollamaBaseUrlRequired": "URL dasar Ollama diperlukan", "baseUrlRequired": "URL dasar diperlukan", - "modelDimensionMinValue": "Dimensi model harus lebih besar dari 0" + "modelDimensionMinValue": "Dimensi model harus lebih besar dari 0", + "openRouterApiKeyRequired": "Kunci API OpenRouter diperlukan" }, "advancedConfigLabel": "Konfigurasi Lanjutan", "searchMinScoreLabel": "Ambang Batas Skor Pencarian", @@ -309,6 +313,9 @@ "getZaiApiKey": "Dapatkan Kunci API Z AI", "zaiEntrypoint": "Titik Masuk Z AI", "zaiEntrypointDescription": "Silakan pilih titik masuk API yang sesuai berdasarkan lokasi Anda. Jika Anda berada di China, pilih open.bigmodel.cn. Jika tidak, pilih api.z.ai.", + "minimaxApiKey": "Kunci API MiniMax", + "getMiniMaxApiKey": "Dapatkan Kunci API MiniMax", + "minimaxBaseUrl": "Titik Masuk MiniMax", "geminiApiKey": "Gemini API Key", "getGroqApiKey": "Dapatkan Groq API Key", "groqApiKey": "Groq API Key", @@ -362,7 +369,7 @@ "enablePromptCachingTitle": "Aktifkan prompt caching untuk meningkatkan performa dan mengurangi biaya untuk model yang didukung.", "cacheUsageNote": "Catatan: Jika kamu tidak melihat penggunaan cache, coba pilih model yang berbeda lalu pilih model yang kamu inginkan lagi.", "vscodeLmModel": "Model Bahasa", - "vscodeLmWarning": "Catatan: Ini adalah integrasi yang sangat eksperimental dan dukungan provider akan bervariasi. Jika kamu mendapat error tentang model yang tidak didukung, itu adalah masalah di sisi provider.", + "vscodeLmWarning": "Catatan: Model yang diakses melalui VS Code Language Model API dapat dibungkus atau disetel‑halus oleh penyedia, sehingga perilakunya dapat berbeda dibandingkan menggunakan model yang sama secara langsung dari penyedia atau router tipikal. Untuk menggunakan model dari menu tarik‑turun «Language Model», pertama beralihlah ke model tersebut lalu klik «Terima» pada prompt Copilot Chat; jika tidak, Anda mungkin melihat kesalahan seperti 400 «The requested model is not supported».", "geminiParameters": { "urlContext": { "title": "Aktifkan konteks URL", @@ -439,7 +446,7 @@ }, "computerUse": { "label": "Computer Use", - "description": "Apakah model ini mampu berinteraksi dengan browser? (misalnya Claude 3.7 Sonnet)." + "description": "Apakah model ini mampu berinteraksi dengan browser?" }, "promptCache": { "label": "Prompt Caching", @@ -481,6 +488,7 @@ }, "reasoningEffort": { "label": "Upaya Reasoning Model", + "none": "Tidak Ada", "minimal": "Minimal (Tercepat)", "high": "Tinggi", "medium": "Sedang", @@ -553,6 +561,10 @@ } }, "checkpoints": { + "timeout": { + "label": "Batas waktu inisialisasi checkpoint (detik)", + "description": "Waktu maksimum menunggu inisialisasi layanan checkpoint. Default 15 detik. Rentang: 10-60 detik." + }, "enable": { "label": "Aktifkan checkpoint otomatis", "description": "Ketika diaktifkan, Kilo Code akan secara otomatis membuat checkpoint selama eksekusi tugas, memudahkan untuk meninjau perubahan atau kembali ke state sebelumnya. <0>Pelajari lebih lanjut" @@ -681,6 +693,14 @@ "label": "Ukuran total gambar maksimum", "mb": "MB", "description": "Batas ukuran kumulatif maksimum (dalam MB) untuk semua gambar yang diproses dalam satu operasi read_file. Saat membaca beberapa gambar, ukuran setiap gambar ditambahkan ke total. Jika menyertakan gambar lain akan melebihi batas ini, gambar tersebut akan dilewati." + }, + "includeCurrentTime": { + "label": "Sertakan waktu saat ini dalam konteks", + "description": "Ketika diaktifkan, waktu saat ini dan informasi zona waktu akan disertakan dalam prompt sistem. Nonaktifkan ini jika model berhenti bekerja karena masalah waktu." + }, + "includeCurrentCost": { + "label": "Sertakan biaya saat ini dalam konteks", + "description": "Ketika diaktifkan, biaya penggunaan API saat ini akan disertakan dalam prompt sistem. Nonaktifkan ini jika model berhenti bekerja karena masalah biaya." } }, "terminal": { @@ -690,55 +710,55 @@ }, "advanced": { "label": "Pengaturan Terminal: Lanjutan", - "description": "Opsi berikut mungkin memerlukan restart terminal untuk menerapkan pengaturan." + "description": "Pengaturan ini hanya berlaku ketika 'Gunakan Terminal Inline' dinonaktifkan. Hanya mempengaruhi terminal VS Code dan mungkin memerlukan restart IDE." }, "outputLineLimit": { - "label": "Batas output terminal", - "description": "Jumlah maksimum baris yang disertakan dalam output terminal saat mengeksekusi perintah. Ketika terlampaui, baris akan dihapus dari tengah, menghemat token. <0>Pelajari lebih lanjut" + "label": "Batas keluaran terminal", + "description": "Menyimpan baris pertama dan terakhir dan membuang yang tengah agar tetap di bawah batas. Turunkan untuk menghemat token; naikkan untuk memberi Roo lebih banyak detail tengah. Roo melihat placeholder di mana konten dilewati.<0>Pelajari lebih lanjut" }, "outputCharacterLimit": { "label": "Batas karakter terminal", - "description": "Jumlah maksimum karakter yang akan disertakan dalam output terminal saat menjalankan perintah. Batas ini lebih diutamakan daripada batas baris untuk mencegah masalah memori dari baris yang sangat panjang. Ketika terlampaui, output akan dipotong. <0>Pelajari lebih lanjut" + "description": "Override batas baris untuk mencegah masalah memori dengan memberlakukan cap keras pada ukuran output. Jika terlampaui, simpan awal dan akhir lalu tampilkan placeholder ke Roo di mana konten dilewati. <0>Pelajari lebih lanjut" }, "shellIntegrationTimeout": { "label": "Timeout integrasi shell terminal", - "description": "Waktu maksimum untuk menunggu integrasi shell menginisialisasi sebelum mengeksekusi perintah. Untuk pengguna dengan waktu startup shell yang lama, nilai ini mungkin perlu ditingkatkan jika kamu melihat error \"Shell Integration Unavailable\" di terminal. <0>Pelajari lebih lanjut" + "description": "Waktu tunggu integrasi shell VS Code sebelum menjalankan perintah. Naikkan jika shell lambat start atau muncul error 'Shell Integration Unavailable'. <0>Pelajari lebih lanjut" }, "shellIntegrationDisabled": { - "label": "Nonaktifkan integrasi shell terminal", - "description": "Aktifkan ini jika perintah terminal tidak bekerja dengan benar atau kamu melihat error 'Shell Integration Unavailable'. Ini menggunakan metode yang lebih sederhana untuk menjalankan perintah, melewati beberapa fitur terminal lanjutan. <0>Pelajari lebih lanjut" + "label": "Gunakan Terminal Inline (disarankan)", + "description": "Jalankan perintah di Terminal Inline (obrolan) untuk melewati profil/integrasi shell untuk proses lebih cepat dan andal. Saat dinonaktifkan, Roo menggunakan terminal VS Code dengan profil shell, prompt, dan plugin Anda. <0>Pelajari lebih lanjut" }, "commandDelay": { "label": "Delay perintah terminal", - "description": "Delay dalam milidetik untuk ditambahkan setelah eksekusi perintah. Pengaturan default 0 menonaktifkan delay sepenuhnya. Ini dapat membantu memastikan output perintah sepenuhnya ditangkap di terminal dengan masalah timing. Di sebagian besar terminal ini diimplementasikan dengan mengatur `PROMPT_COMMAND='sleep N'` dan Powershell menambahkan `start-sleep` di akhir setiap perintah. Awalnya adalah workaround untuk VSCode bug#237208 dan mungkin tidak diperlukan. <0>Pelajari lebih lanjut" + "description": "Tambahkan jeda singkat setelah setiap perintah agar VS Code terminal bisa flush semua output (bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep). Gunakan hanya jika output ekor hilang; jika tidak biarkan di 0. <0>Pelajari lebih lanjut" }, "compressProgressBar": { - "label": "Kompres output progress bar", - "description": "Ketika diaktifkan, memproses output terminal dengan carriage return (\\r) untuk mensimulasikan bagaimana terminal nyata akan menampilkan konten. Ini menghapus state progress bar intermediate, hanya mempertahankan state final, yang menghemat ruang konteks untuk informasi yang lebih relevan. <0>Pelajari lebih lanjut" + "label": "Kompres keluaran bilah kemajuan", + "description": "Menciutkan bilah kemajuan/spinner sehingga hanya status akhir yang disimpan (menghemat token). <0>Pelajari lebih lanjut" }, "powershellCounter": { - "label": "Aktifkan workaround counter PowerShell", - "description": "Ketika diaktifkan, menambahkan counter ke perintah PowerShell untuk memastikan eksekusi perintah yang tepat. Ini membantu dengan terminal PowerShell yang mungkin memiliki masalah dengan penangkapan output perintah. <0>Pelajari lebih lanjut" + "label": "Aktifkan solusi penghitung PowerShell", + "description": "Aktifkan saat keluaran PowerShell hilang atau digandakan; menambahkan penghitung kecil ke setiap perintah untuk menstabilkan keluaran. Biarkan nonaktif jika keluaran sudah terlihat benar. <0>Pelajari lebih lanjut" }, "zshClearEolMark": { - "label": "Hapus ZSH EOL mark", - "description": "Ketika diaktifkan, menghapus ZSH end-of-line mark dengan mengatur PROMPT_EOL_MARK=''. Ini mencegah masalah dengan interpretasi output perintah ketika output berakhir dengan karakter khusus seperti '%'. <0>Pelajari lebih lanjut" + "label": "Hapus tanda EOL ZSH", + "description": "Aktifkan saat Anda melihat % liar di akhir baris atau penguraian terlihat salah; menghilangkan tanda akhir baris (%) Zsh. <0>Pelajari lebih lanjut" }, "zshOhMy": { "label": "Aktifkan integrasi Oh My Zsh", - "description": "Ketika diaktifkan, mengatur ITERM_SHELL_INTEGRATION_INSTALLED=Yes untuk mengaktifkan fitur integrasi shell Oh My Zsh. Menerapkan pengaturan ini mungkin memerlukan restart IDE. <0>Pelajari lebih lanjut" + "description": "Aktifkan saat tema/plugin Oh My Zsh Anda mengharapkan integrasi shell; menyetel ITERM_SHELL_INTEGRATION_INSTALLED=Yes. Nonaktifkan untuk menghindari penyetelan variabel tersebut. <0>Pelajari lebih lanjut" }, "zshP10k": { "label": "Aktifkan integrasi Powerlevel10k", - "description": "Ketika diaktifkan, mengatur POWERLEVEL9K_TERM_SHELL_INTEGRATION=true untuk mengaktifkan fitur integrasi shell Powerlevel10k. <0>Pelajari lebih lanjut" + "description": "Aktifkan saat menggunakan integrasi shell Powerlevel10k. <0>Pelajari lebih lanjut" }, "zdotdir": { "label": "Aktifkan penanganan ZDOTDIR", - "description": "Ketika diaktifkan, membuat direktori sementara untuk ZDOTDIR untuk menangani integrasi shell zsh dengan benar. Ini memastikan integrasi shell VSCode bekerja dengan benar dengan zsh sambil mempertahankan konfigurasi zsh kamu. <0>Pelajari lebih lanjut" + "description": "Aktifkan saat integrasi shell zsh gagal atau bertentangan dengan dotfile Anda. <0>Pelajari lebih lanjut" }, "inheritEnv": { - "label": "Warisi variabel environment", - "description": "Ketika diaktifkan, terminal akan mewarisi variabel environment dari proses parent VSCode, seperti pengaturan integrasi shell yang didefinisikan user-profile. Ini secara langsung mengalihkan pengaturan global VSCode `terminal.integrated.inheritEnv`. <0>Pelajari lebih lanjut" + "label": "Warisi variabel lingkungan", + "description": "Aktifkan untuk mewarisi variabel lingkungan dari proses induk VS Code. <0>Pelajari lebih lanjut" } }, "advancedSettings": { @@ -747,7 +767,7 @@ "advanced": { "diff": { "label": "Aktifkan editing melalui diff", - "description": "Ketika diaktifkan, Kilo Code akan dapat mengedit file lebih cepat dan akan secara otomatis menolak penulisan file penuh yang terpotong. Bekerja terbaik dengan model Claude 3.7 Sonnet terbaru.", + "description": "Ketika diaktifkan, Kilo Code akan dapat mengedit file lebih cepat dan akan secara otomatis menolak penulisan file penuh yang terpotong", "strategy": { "label": "Strategi diff", "options": { @@ -797,10 +817,6 @@ "name": "Gunakan strategi unified diff eksperimental", "description": "Aktifkan strategi unified diff eksperimental. Strategi ini mungkin mengurangi jumlah retry yang disebabkan oleh error model tetapi dapat menyebabkan perilaku yang tidak terduga atau edit yang salah. Hanya aktifkan jika kamu memahami risikonya dan bersedia meninjau semua perubahan dengan hati-hati." }, - "SEARCH_AND_REPLACE": { - "name": "Gunakan tool search and replace eksperimental", - "description": "Aktifkan tool search and replace eksperimental, memungkinkan Kilo Code mengganti beberapa instance dari term pencarian dalam satu permintaan." - }, "INSERT_BLOCK": { "name": "Gunakan tool insert content eksperimental", "description": "Aktifkan tool insert content eksperimental, memungkinkan Kilo Code menyisipkan konten pada nomor baris spesifik tanpa perlu membuat diff." @@ -887,8 +903,6 @@ "modelInfo": { "supportsImages": "Mendukung gambar", "noImages": "Tidak mendukung gambar", - "supportsComputerUse": "Mendukung computer use", - "noComputerUse": "Tidak mendukung computer use", "supportsPromptCache": "Mendukung prompt caching", "noPromptCache": "Tidak mendukung prompt caching", "contextWindow": "Jendela Konteks:", diff --git a/webview-ui/src/i18n/locales/it/chat.json b/webview-ui/src/i18n/locales/it/chat.json index 7ba3a0e1807..33d1419233d 100644 --- a/webview-ui/src/i18n/locales/it/chat.json +++ b/webview-ui/src/i18n/locales/it/chat.json @@ -159,6 +159,9 @@ "initializingWarning": "Inizializzazione del checkpoint in corso... Se questa operazione richiede troppo tempo, puoi disattivare i checkpoint nelle impostazioni e riavviare l'attività.", "menu": { "viewDiff": "Visualizza differenze", + "more": "Altre opzioni", + "viewDiffFromInit": "Visualizza tutte le modifiche", + "viewDiffWithCurrent": "Visualizza le modifiche da questo checkpoint", "restore": "Ripristina checkpoint", "restoreFiles": "Ripristina file", "restoreFilesDescription": "Ripristina i file del tuo progetto a uno snapshot catturato in questo punto.", @@ -272,6 +275,7 @@ "toggleAriaLabel": "Attiva/disattiva approvazione automatica", "disabledAriaLabel": "Approvazione automatica disabilitata - seleziona prima le opzioni", "triggerLabelOff": "Approvazione automatica disattivata", + "triggerLabelOffShort": "Disattivata", "triggerLabel_zero": "0 approvati automaticamente", "triggerLabel_one": "1 approvato automaticamente", "triggerLabel_other": "{{count}} approvati automaticamente", @@ -301,6 +305,19 @@ "selectModel": "Seleziona roo/code-supernova dal provider Roo Code Cloud nelle Impostazioni per iniziare.", "goToSettingsButton": "Vai alle Impostazioni" }, + "release": { + "heading": "Novità nell'Estensione:", + "openRouterEmbeddings": "Supporto per i modelli di embedding di OpenRouter", + "chutesDynamic": "Chutes ora carica i modelli più recenti dinamicamente", + "queuedMessagesFix": "Correzioni per i messaggi in coda che si perdono" + }, + "cloudAgents": { + "heading": "Novità nel Cloud:", + "prFixer": "Presentazione dell'agente cloud PR Fixer per completare il PR Reviewer.", + "prFixerDescription": "Il PR Fixer di Roo Code applica modifiche di alta qualità alle tue PR, direttamente da GitHub. Invocalo tramite un commento PR e leggerà l'intera cronologia dei commenti per comprendere il contesto, gli accordi e i compromessi - quindi implementerà la correzione giusta.", + "tryPrFixerButton": "Prova PR Fixer" + }, + "careers": "Inoltre, stiamo assumendo!", "socialLinks": "Unisciti a noi su X, Discord, o r/RooCode 🚀" }, "browser": { diff --git a/webview-ui/src/i18n/locales/it/common.json b/webview-ui/src/i18n/locales/it/common.json index 638824fa7b7..687ec047a35 100644 --- a/webview-ui/src/i18n/locales/it/common.json +++ b/webview-ui/src/i18n/locales/it/common.json @@ -114,5 +114,9 @@ "months_ago": "{{count}} mesi fa", "year_ago": "un anno fa", "years_ago": "{{count}} anni fa" + }, + "errors": { + "wait_checkpoint_long_time": "Hai aspettato {{timeout}} secondi per l'inizializzazione del checkpoint. Se non ti serve questa funzione, disattivala nelle impostazioni del checkpoint.", + "init_checkpoint_fail_long_time": "L'inizializzazione del checkpoint ha impiegato più di {{timeout}} secondi, quindi i checkpoint sono disabilitati per questa attività. Puoi disattivare i checkpoint o aumentare il tempo di attesa nelle impostazioni del checkpoint." } } diff --git a/webview-ui/src/i18n/locales/it/mcp.json b/webview-ui/src/i18n/locales/it/mcp.json index abd2fbfc549..387fa42b437 100644 --- a/webview-ui/src/i18n/locales/it/mcp.json +++ b/webview-ui/src/i18n/locales/it/mcp.json @@ -25,12 +25,12 @@ "tabs": { "tools": "Strumenti", "resources": "Risorse", - "errors": "Errori" + "logs": "Registri" }, "emptyState": { "noTools": "Nessuno strumento trovato", "noResources": "Nessuna risorsa trovata", - "noErrors": "Nessun errore trovato" + "noLogs": "Ancora nessun registro" }, "networkTimeout": { "label": "Timeout di rete", diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json index b5a1ed48ef4..05a07f5f5c4 100644 --- a/webview-ui/src/i18n/locales/it/settings.json +++ b/webview-ui/src/i18n/locales/it/settings.json @@ -58,6 +58,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "Chiave API", "vercelAiGatewayApiKeyPlaceholder": "Inserisci la tua chiave API Vercel AI Gateway", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "Chiave API OpenRouter", + "openRouterApiKeyPlaceholder": "Inserisci la tua chiave API OpenRouter", "mistralProvider": "Mistral", "mistralApiKeyLabel": "Chiave API:", "mistralApiKeyPlaceholder": "Inserisci la tua chiave API Mistral", @@ -127,7 +130,8 @@ "vercelAiGatewayApiKeyRequired": "È richiesta la chiave API Vercel AI Gateway", "ollamaBaseUrlRequired": "È richiesto l'URL di base di Ollama", "baseUrlRequired": "È richiesto l'URL di base", - "modelDimensionMinValue": "La dimensione del modello deve essere maggiore di 0" + "modelDimensionMinValue": "La dimensione del modello deve essere maggiore di 0", + "openRouterApiKeyRequired": "Chiave API OpenRouter è richiesta" }, "advancedConfigLabel": "Configurazione avanzata", "searchMinScoreLabel": "Soglia punteggio di ricerca", @@ -310,6 +314,9 @@ "getZaiApiKey": "Ottieni chiave API Z AI", "zaiEntrypoint": "Punto di ingresso Z AI", "zaiEntrypointDescription": "Si prega di selezionare il punto di ingresso API appropriato in base alla propria posizione. Se ti trovi in Cina, scegli open.bigmodel.cn. Altrimenti, scegli api.z.ai.", + "minimaxApiKey": "Chiave API MiniMax", + "getMiniMaxApiKey": "Ottieni chiave API MiniMax", + "minimaxBaseUrl": "Punto di ingresso MiniMax", "geminiApiKey": "Chiave API Gemini", "getGroqApiKey": "Ottieni chiave API Groq", "groqApiKey": "Chiave API Groq", @@ -363,7 +370,7 @@ "enablePromptCachingTitle": "Abilita la cache dei prompt per migliorare le prestazioni e ridurre i costi per i modelli supportati.", "cacheUsageNote": "Nota: Se non vedi l'utilizzo della cache, prova a selezionare un modello diverso e poi seleziona nuovamente il modello desiderato.", "vscodeLmModel": "Modello linguistico", - "vscodeLmWarning": "Nota: Questa è un'integrazione molto sperimentale e il supporto del fornitore varierà. Se ricevi un errore relativo a un modello non supportato, si tratta di un problema del fornitore.", + "vscodeLmWarning": "Nota: I modelli accessibili tramite la VS Code Language Model API possono essere incapsulati o perfezionati dal provider, quindi il comportamento può differire dall’uso diretto dello stesso modello presso un provider o router tipico. Per usare un modello dal menu a discesa «Language Model», passa prima a quel modello e poi fai clic su «Accetta» nell’avviso di Copilot Chat; in caso contrario potresti visualizzare un errore come 400 «The requested model is not supported».", "geminiParameters": { "urlContext": { "title": "Abilita contesto URL", @@ -440,7 +447,7 @@ }, "computerUse": { "label": "Uso del computer", - "description": "Il modello è in grado di interagire con il browser? (es. Claude 3.7 Sonnet)." + "description": "Il modello è in grado di interagire con il browser?" }, "promptCache": { "label": "Cache dei prompt", @@ -482,6 +489,7 @@ }, "reasoningEffort": { "label": "Sforzo di ragionamento del modello", + "none": "Nessuno", "minimal": "Minimo (più veloce)", "high": "Alto", "medium": "Medio", @@ -554,6 +562,10 @@ } }, "checkpoints": { + "timeout": { + "label": "Timeout inizializzazione checkpoint (secondi)", + "description": "Tempo massimo di attesa per l'inizializzazione del servizio checkpoint. Predefinito: 15 secondi. Intervallo: 10-60 secondi." + }, "enable": { "label": "Abilita punti di controllo automatici", "description": "Quando abilitato, Kilo Code creerà automaticamente punti di controllo durante l'esecuzione dei compiti, facilitando la revisione delle modifiche o il ritorno a stati precedenti. <0>Scopri di più" @@ -682,6 +694,14 @@ "label": "Dimensione totale massima immagini", "mb": "MB", "description": "Limite di dimensione cumulativa massima (in MB) per tutte le immagini elaborate in una singola operazione read_file. Durante la lettura di più immagini, la dimensione di ogni immagine viene aggiunta al totale. Se l'inclusione di un'altra immagine supererebbe questo limite, verrà saltata." + }, + "includeCurrentTime": { + "label": "Includi l'ora corrente nel contesto", + "description": "Se abilitato, l'ora corrente e le informazioni sul fuso orario verranno incluse nel prompt di sistema. Disabilita questa opzione se i modelli smettono di funzionare a causa di problemi di orario." + }, + "includeCurrentCost": { + "label": "Includi il costo corrente nel contesto", + "description": "Se abilitato, il costo di utilizzo corrente dell'API verrà incluso nel prompt di sistema. Disabilita questa opzione se i modelli smettono di funzionare a causa di problemi di costo." } }, "terminal": { @@ -691,55 +711,55 @@ }, "advanced": { "label": "Impostazioni terminale: Avanzate", - "description": "Le seguenti opzioni potrebbero richiedere il riavvio del terminale per applicare l'impostazione." + "description": "Queste impostazioni si applicano solo quando 'Usa terminale in linea' è disabilitato. Influenzano solo il terminale VS Code e possono richiedere il riavvio dell'IDE." }, "outputLineLimit": { "label": "Limite output terminale", - "description": "Numero massimo di righe da includere nell'output del terminale durante l'esecuzione dei comandi. Quando superato, le righe verranno rimosse dal centro, risparmiando token. <0>Scopri di più" + "description": "Mantiene le prime e ultime righe e scarta quelle centrali per rimanere sotto il limite. Abbassa per risparmiare token; alza per dare a Roo più dettagli centrali. Roo vede un segnaposto dove il contenuto viene saltato.<0>Scopri di più" }, "outputCharacterLimit": { - "label": "Limite di caratteri del terminale", - "description": "Numero massimo di caratteri da includere nell'output del terminale durante l'esecuzione dei comandi. Questo limite ha la precedenza sul limite di righe per prevenire problemi di memoria causati da righe estremamente lunghe. Se superato, l'output verrà troncato. <0>Scopri di più" + "label": "Limite caratteri terminale", + "description": "Sovrascrive il limite di righe per prevenire problemi di memoria imponendo un limite rigido alla dimensione di output. Se superato, mantiene l'inizio e la fine e mostra un segnaposto a Roo dove il contenuto viene saltato. <0>Scopri di più" }, "shellIntegrationTimeout": { - "label": "Timeout integrazione shell del terminale", - "description": "Tempo massimo di attesa per l'inizializzazione dell'integrazione della shell prima di eseguire i comandi. Per gli utenti con tempi di avvio della shell lunghi, questo valore potrebbe dover essere aumentato se si vedono errori \"Shell Integration Unavailable\" nel terminale. <0>Scopri di più" + "label": "Timeout integrazione shell terminale", + "description": "Quanto tempo attendere l'integrazione della shell di VS Code prima di eseguire i comandi. Aumenta se la tua shell si avvia lentamente o vedi errori 'Integrazione Shell Non Disponibile'. <0>Scopri di più" }, "shellIntegrationDisabled": { - "label": "Disabilita l'integrazione della shell del terminale", - "description": "Abilita questa opzione se i comandi del terminale non funzionano correttamente o se vedi errori 'Shell Integration Unavailable'. Questo utilizza un metodo più semplice per eseguire i comandi, bypassando alcune funzionalità avanzate del terminale. <0>Scopri di più" + "label": "Usa terminale in linea (consigliato)", + "description": "Esegui comandi nel terminale in linea (chat) per bypassare profili/integrazione shell per esecuzioni più veloci e affidabili. Quando disabilitato, Roo usa il terminale VS Code con il tuo profilo shell, prompt e plugin. <0>Scopri di più" }, "commandDelay": { "label": "Ritardo comando terminale", - "description": "Ritardo in millisecondi da aggiungere dopo l'esecuzione del comando. L'impostazione predefinita di 0 disabilita completamente il ritardo. Questo può aiutare a garantire che l'output del comando sia catturato completamente nei terminali con problemi di temporizzazione. Nella maggior parte dei terminali viene implementato impostando `PROMPT_COMMAND='sleep N'` e Powershell aggiunge `start-sleep` alla fine di ogni comando. In origine era una soluzione per il bug VSCode#237208 e potrebbe non essere necessario. <0>Scopri di più" + "description": "Aggiunge una breve pausa dopo ogni comando affinché il terminale VS Code possa svuotare tutto l'output (bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep). Usa solo se vedi output finale mancante; altrimenti lascia a 0. <0>Scopri di più" }, "compressProgressBar": { - "label": "Comprimi output barre di progresso", - "description": "Quando abilitato, elabora l'output del terminale con ritorni a capo (\\r) per simulare come un terminale reale visualizzerebbe il contenuto. Questo rimuove gli stati intermedi delle barre di progresso, mantenendo solo lo stato finale, il che conserva spazio di contesto per informazioni più rilevanti. <0>Scopri di più" + "label": "Comprimi output barra di avanzamento", + "description": "Comprime barre di avanzamento/spinner in modo che venga mantenuto solo lo stato finale (risparmia token). <0>Scopri di più" }, "powershellCounter": { - "label": "Abilita soluzione temporanea contatore PowerShell", - "description": "Quando abilitato, aggiunge un contatore ai comandi PowerShell per garantire la corretta esecuzione dei comandi. Questo aiuta con i terminali PowerShell che potrebbero avere problemi con la cattura dell'output. <0>Scopri di più" + "label": "Abilita workaround contatore PowerShell", + "description": "Attiva quando l'output PowerShell è mancante o duplicato; aggiunge un piccolo contatore a ogni comando per stabilizzare l'output. Mantieni disattivato se l'output sembra già corretto. <0>Scopri di più" }, "zshClearEolMark": { - "label": "Cancella marcatore fine riga ZSH", - "description": "Quando abilitato, cancella il marcatore di fine riga ZSH impostando PROMPT_EOL_MARK=''. Questo previene problemi con l'interpretazione dell'output dei comandi quando termina con caratteri speciali come '%'. <0>Scopri di più" + "label": "Cancella marcatore EOL ZSH", + "description": "Attiva quando vedi % vaganti alla fine delle righe o l'analisi sembra sbagliata; omette il marcatore di fine riga (%) di Zsh. <0>Scopri di più" }, "zshOhMy": { "label": "Abilita integrazione Oh My Zsh", - "description": "Quando abilitato, imposta ITERM_SHELL_INTEGRATION_INSTALLED=Yes per abilitare le funzionalità di integrazione della shell Oh My Zsh. L'applicazione di questa impostazione potrebbe richiedere il riavvio dell'IDE. <0>Scopri di più" + "description": "Attiva quando il tuo tema/plugin Oh My Zsh si aspetta l'integrazione della shell; imposta ITERM_SHELL_INTEGRATION_INSTALLED=Yes. Disattiva per evitare di impostare quella variabile. <0>Scopri di più" }, "zshP10k": { "label": "Abilita integrazione Powerlevel10k", - "description": "Quando abilitato, imposta POWERLEVEL9K_TERM_SHELL_INTEGRATION=true per abilitare le funzionalità di integrazione della shell Powerlevel10k. <0>Scopri di più" + "description": "Attiva quando usi l'integrazione della shell Powerlevel10k. <0>Scopri di più" }, "zdotdir": { "label": "Abilita gestione ZDOTDIR", - "description": "Quando abilitato, crea una directory temporanea per ZDOTDIR per gestire correttamente l'integrazione della shell zsh. Questo assicura che l'integrazione della shell VSCode funzioni correttamente con zsh mantenendo la tua configurazione zsh. <0>Scopri di più" + "description": "Attiva quando l'integrazione della shell zsh fallisce o è in conflitto con i tuoi dotfile. <0>Scopri di più" }, "inheritEnv": { "label": "Eredita variabili d'ambiente", - "description": "Quando abilitato, il terminale eredita le variabili d'ambiente dal processo padre di VSCode, come le impostazioni di integrazione della shell definite nel profilo utente. Questo attiva direttamente l'impostazione globale di VSCode `terminal.integrated.inheritEnv`. <0>Scopri di più" + "description": "Attiva per ereditare le variabili d'ambiente dal processo padre di VS Code. <0>Scopri di più" } }, "advancedSettings": { @@ -748,7 +768,7 @@ "advanced": { "diff": { "label": "Abilita modifica tramite diff", - "description": "Quando abilitato, Kilo Code sarà in grado di modificare i file più velocemente e rifiuterà automaticamente scritture di file completi troncati. Funziona meglio con l'ultimo modello Claude 4 Sonnet.", + "description": "Quando abilitato, Kilo Code sarà in grado di modificare i file più velocemente e rifiuterà automaticamente scritture di file completi troncati", "strategy": { "label": "Strategia diff", "options": { @@ -777,10 +797,6 @@ "name": "Usa strategia diff unificata sperimentale", "description": "Abilita la strategia diff unificata sperimentale. Questa strategia potrebbe ridurre il numero di tentativi causati da errori del modello, ma può causare comportamenti imprevisti o modifiche errate. Abilitala solo se comprendi i rischi e sei disposto a rivedere attentamente tutte le modifiche." }, - "SEARCH_AND_REPLACE": { - "name": "Usa strumento di ricerca e sostituzione sperimentale", - "description": "Abilita lo strumento di ricerca e sostituzione sperimentale, consentendo a Kilo Code di sostituire più istanze di un termine di ricerca in una singola richiesta." - }, "INSERT_BLOCK": { "name": "Usa strumento di inserimento contenuti sperimentale", "description": "Abilita lo strumento di inserimento contenuti sperimentale, consentendo a Kilo Code di inserire contenuti a numeri di riga specifici senza dover creare un diff." @@ -867,8 +883,6 @@ "modelInfo": { "supportsImages": "Supporta immagini", "noImages": "Non supporta immagini", - "supportsComputerUse": "Supporta uso del computer", - "noComputerUse": "Non supporta uso del computer", "supportsPromptCache": "Supporta cache dei prompt", "noPromptCache": "Non supporta cache dei prompt", "contextWindow": "Finestra di contesto:", diff --git a/webview-ui/src/i18n/locales/ja/chat.json b/webview-ui/src/i18n/locales/ja/chat.json index 90f0c564b08..f9ba4b1a8a7 100644 --- a/webview-ui/src/i18n/locales/ja/chat.json +++ b/webview-ui/src/i18n/locales/ja/chat.json @@ -156,6 +156,9 @@ "initializingWarning": "チェックポイントの初期化中... 時間がかかりすぎる場合は、設定でチェックポイントを無効にしてタスクを再開できます。", "menu": { "viewDiff": "差分を表示", + "more": "その他のオプション", + "viewDiffFromInit": "すべての変更を表示", + "viewDiffWithCurrent": "このチェックポイント以降の変更を表示", "restore": "チェックポイントを復元", "restoreFiles": "ファイルを復元", "restoreFilesDescription": "この時点で撮影されたスナップショットにプロジェクトのファイルを復元します。", @@ -272,6 +275,7 @@ "toggleAriaLabel": "自動承認を切り替える", "disabledAriaLabel": "自動承認が無効です - 最初にオプションを選択してください", "triggerLabelOff": "自動承認オフ", + "triggerLabelOffShort": "オフ", "triggerLabel_zero": "0個の自動承認", "triggerLabel_one": "1個の自動承認済み", "triggerLabel_other": "{{count}}個の自動承認済み", @@ -301,6 +305,19 @@ "selectModel": "設定で Roo Code Cloud プロバイダーから roo/code-supernova を選択して開始してください。", "goToSettingsButton": "設定に移動" }, + "release": { + "heading": "拡張機能の新機能:", + "openRouterEmbeddings": "OpenRouter埋め込みモデルのサポート", + "chutesDynamic": "Chutesが最新モデルを動的にロード", + "queuedMessagesFix": "キューメッセージが失われる問題の修正" + }, + "cloudAgents": { + "heading": "クラウドの新機能:", + "prFixer": "PRレビューアを補完するクラウドエージェントPR Fixerを導入します。", + "prFixerDescription": "Roo CodeのPR FixerはGitHubから直接、高品質な変更をPRに適用します。PRコメントから呼び出すと、コメント履歴全体を読んでコンテキスト、合意、トレードオフを理解し、適切な修正を実装します。", + "tryPrFixerButton": "PR Fixerを試す" + }, + "careers": "また、採用中です!", "socialLinks": "XDiscord、またはr/RooCodeでフォローしてください 🚀" }, "browser": { diff --git a/webview-ui/src/i18n/locales/ja/common.json b/webview-ui/src/i18n/locales/ja/common.json index 47af886fb79..6c27001006c 100644 --- a/webview-ui/src/i18n/locales/ja/common.json +++ b/webview-ui/src/i18n/locales/ja/common.json @@ -114,5 +114,9 @@ "months_ago": "{{count}}ヶ月前", "year_ago": "1年前", "years_ago": "{{count}}年前" + }, + "errors": { + "wait_checkpoint_long_time": "{{timeout}} 秒間チェックポイントの初期化を待機しました。チェックポイント機能が不要な場合は、チェックポイント設定でオフにしてください。", + "init_checkpoint_fail_long_time": "チェックポイントの初期化が {{timeout}} 秒以上かかったため、このタスクではチェックポイントが無効化されました。チェックポイントをオフにするか、チェックポイント設定で待機時間を延長できます。" } } diff --git a/webview-ui/src/i18n/locales/ja/mcp.json b/webview-ui/src/i18n/locales/ja/mcp.json index 2378010e68d..0bda7b26fa0 100644 --- a/webview-ui/src/i18n/locales/ja/mcp.json +++ b/webview-ui/src/i18n/locales/ja/mcp.json @@ -25,12 +25,12 @@ "tabs": { "tools": "ツール", "resources": "リソース", - "errors": "エラー" + "logs": "ログ" }, "emptyState": { "noTools": "ツールが見つかりません", "noResources": "リソースが見つかりません", - "noErrors": "エラーが見つかりません" + "noLogs": "ログはまだありません" }, "networkTimeout": { "label": "ネットワークタイムアウト", diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json index 3d0d2412f07..3b64a088e66 100644 --- a/webview-ui/src/i18n/locales/ja/settings.json +++ b/webview-ui/src/i18n/locales/ja/settings.json @@ -58,6 +58,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "APIキー", "vercelAiGatewayApiKeyPlaceholder": "Vercel AI GatewayのAPIキーを入力してください", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "OpenRouter APIキー", + "openRouterApiKeyPlaceholder": "OpenRouter APIキーを入力してください", "mistralProvider": "Mistral", "mistralApiKeyLabel": "APIキー:", "mistralApiKeyPlaceholder": "Mistral APIキーを入力してください", @@ -127,7 +130,8 @@ "vercelAiGatewayApiKeyRequired": "Vercel AI Gateway APIキーが必要です", "ollamaBaseUrlRequired": "OllamaのベースURLが必要です", "baseUrlRequired": "ベースURLが必要です", - "modelDimensionMinValue": "モデルの次元は0より大きくなければなりません" + "modelDimensionMinValue": "モデルの次元は0より大きくなければなりません", + "openRouterApiKeyRequired": "OpenRouter APIキーが必要です" }, "advancedConfigLabel": "詳細設定", "searchMinScoreLabel": "検索スコアのしきい値", @@ -310,6 +314,9 @@ "getZaiApiKey": "Z AI APIキーを取得", "zaiEntrypoint": "Z AI エントリーポイント", "zaiEntrypointDescription": "お住まいの地域に応じて適切な API エントリーポイントを選択してください。中国にお住まいの場合は open.bigmodel.cn を選択してください。それ以外の場合は api.z.ai を選択してください。", + "minimaxApiKey": "MiniMax APIキー", + "getMiniMaxApiKey": "MiniMax APIキーを取得", + "minimaxBaseUrl": "MiniMax エントリーポイント", "geminiApiKey": "Gemini APIキー", "getGroqApiKey": "Groq APIキーを取得", "groqApiKey": "Groq APIキー", @@ -363,7 +370,7 @@ "enablePromptCachingTitle": "サポートされているモデルのパフォーマンスを向上させ、コストを削減するためにプロンプトキャッシュを有効化します。", "cacheUsageNote": "注意:キャッシュの使用が表示されない場合は、別のモデルを選択してから希望のモデルを再度選択してみてください。", "vscodeLmModel": "言語モデル", - "vscodeLmWarning": "注意:これは非常に実験的な統合であり、プロバイダーのサポートは異なります。モデルがサポートされていないというエラーが表示された場合、それはプロバイダー側の問題です。", + "vscodeLmWarning": "注意: VS Code Language Model API を通じて利用されるモデルは、プロバイダーによってラップまたは微調整されている場合があります。したがって、一般的なプロバイダーやルーターから同じモデルを直接使用する場合と挙動が異なることがあります。『Language Model』ドロップダウンのモデルを使用するには、まずそのモデルに切り替え、Copilot Chat のプロンプトで『承認』をクリックしてください。そうしないと、400『The requested model is not supported』などのエラーが表示されることがあります。", "geminiParameters": { "urlContext": { "title": "URLコンテキストを有効にする", @@ -440,7 +447,7 @@ }, "computerUse": { "label": "コンピューター使用", - "description": "このモデルはブラウザとの対話が可能ですか?(例:Claude 3.7 Sonnet)" + "description": "このモデルはブラウザとの対話が可能ですか?" }, "promptCache": { "label": "プロンプトキャッシュ", @@ -482,6 +489,7 @@ }, "reasoningEffort": { "label": "モデル推論の労力", + "none": "なし", "minimal": "最小 (最速)", "high": "高", "medium": "中", @@ -554,6 +562,10 @@ } }, "checkpoints": { + "timeout": { + "label": "チェックポイント初期化タイムアウト(秒)", + "description": "チェックポイントサービスの初期化を待つ最大時間。デフォルトは15秒。範囲:10~60秒。" + }, "enable": { "label": "自動チェックポイントを有効化", "description": "有効にすると、Kilo Codeはタスク実行中に自動的にチェックポイントを作成し、変更の確認や以前の状態への復帰を容易にします。 <0>詳細情報" @@ -682,6 +694,14 @@ "label": "最大合計画像サイズ", "mb": "MB", "description": "単一のread_file操作で処理されるすべての画像の累積サイズ制限(MB単位)。複数の画像を読み取る際、各画像のサイズが合計に加算されます。別の画像を含めるとこの制限を超える場合、その画像はスキップされます。" + }, + "includeCurrentTime": { + "label": "現在の時刻をコンテキストに含める", + "description": "有効にすると、現在の時刻とタイムゾーン情報がシステムプロンプトに含まれます。モデルが時間に関する懸念で動作を停止する場合は無効にしてください。" + }, + "includeCurrentCost": { + "label": "現在のコストをコンテキストに含める", + "description": "有効にすると、現在のAPI使用コストがシステムプロンプトに含まれます。モデルがコストに関する懸念で動作を停止する場合は無効にしてください。" } }, "terminal": { @@ -691,55 +711,55 @@ }, "advanced": { "label": "ターミナル設定:詳細", - "description": "以下のオプションは設定を適用するためにターミナルの再起動が必要な場合があります" + "description": "これらの設定は、「インラインターミナルを使用」が無効の場合にのみ適用されます。VS Code ターミナルのみに影響し、IDE の再起動が必要になる場合があります。" }, "outputLineLimit": { "label": "ターミナル出力制限", - "description": "コマンド実行時にターミナル出力に含める最大行数。超過すると中央から行が削除され、tokenを節約します。 <0>詳細情報" + "description": "制限内に収めるため最初と最後の行を保持し、中間を削除します。トークンを節約するには下げる;Rooに中間の詳細を与えるには上げる。Rooはコンテンツがスキップされた箇所にプレースホルダーを表示します。<0>詳細情報" }, "outputCharacterLimit": { - "label": "ターミナルの文字数制限", - "description": "コマンド実行時にターミナル出力に含める最大文字数。この制限は、非常に長い行によるメモリ問題を回避するため、行数制限よりも優先されます。超過した場合、出力は切り捨てられます。<0>詳細" + "label": "ターミナル文字制限", + "description": "出力サイズにハードキャップを適用してメモリ問題を防ぐため、行制限を上書きします。超過した場合、最初と最後を保持し、コンテンツがスキップされた箇所にRooにプレースホルダーを表示します。<0>詳細情報" }, "shellIntegrationTimeout": { "label": "ターミナルシェル統合タイムアウト", - "description": "コマンドを実行する前にシェル統合の初期化を待つ最大時間。シェルの起動時間が長いユーザーの場合、ターミナルで「Shell Integration Unavailable」エラーが表示される場合は、この値を増やす必要があるかもしれません。 <0>詳細情報" + "description": "コマンドを実行する前�����VS Codeシェル統合を待機する時間。シェルが遅く起動する場合や「シェル統合が利用できません」というエラーが表示される場合は、この値を増やしてください。<0>詳細" }, "shellIntegrationDisabled": { - "label": "ターミナルシェル統合を無効にする", - "description": "ターミナルコマンドが正しく機能しない場合や、「シェル統合が利用できません」というエラーが表示される場合は、これを有効にします。これにより、一部の高度なターミナル機能をバイパスして、コマンドを実行するより簡単な方法が使用されます。 <0>詳細情報" + "label": "インラインターミナルを使用(推奨)", + "description": "より高速で信頼性の高い実行のため、シェルプロファイル/統合をバイパスしてインラインターミナル(チャット)でコマンドを実行します。無効にすると、Roo はシェルプロファイル、プロンプト、プラグインと共に VS Code ターミナルを使用します。<0>詳細情報" }, "commandDelay": { "label": "ターミナルコマンド遅延", - "description": "コマンド実行後に追加する遅延時間(ミリ秒)。デフォルト設定の0は遅延を完全に無効にします。これはタイミングの問題があるターミナルでコマンド出力を完全にキャプチャするのに役立ちます。ほとんどのターミナルでは`PROMPT_COMMAND='sleep N'`を設定することで実装され、PowerShellは各コマンドの最後に`start-sleep`を追加します。元々はVSCodeバグ#237208の回避策で、必要ない場合があります。 <0>詳細情報" + "description": "VS Codeターミナルがすべての出力をフラッシュできるよう、各コマンド後に短い一時停止を追加します(bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep)。末尾出力が欠落している場合のみ使用;それ以外は0のままにします。<0>詳細情報" }, "compressProgressBar": { - "label": "プログレスバー出力を圧縮", - "description": "有効にすると、キャリッジリターン(\\r)を含むターミナル出力を処理して、実際のターミナルがコンテンツを表示する方法をシミュレートします。これによりプログレスバーの中間状態が削除され、最終状態のみが保持されるため、より関連性の高い情報のためのコンテキスト空間が節約されます。 <0>詳細情報" + "label": "プログレスバー出力を圧���������", + "description": "プログレスバー/スピナーを折りたたんで、最終状態のみを保持します(トークンを節約します)。<0>詳細情報" }, "powershellCounter": { - "label": "PowerShellカウンター回避策を有効化", - "description": "有効にすると、PowerShellコマンドにカウンターを追加して、コマンドの正しい実行を確保します。これは出力のキャプチャに問題がある可能性のあるPowerShellターミナルで役立ちます。 <0>詳細情報" + "label": "PowerShellカウンターの回避策を有効にする", + "description": "PowerShellの出力が欠落または重複している場合にこれをオンにします。出力を安定させるために各コマンドに小さなカウンターを追加します。出力がすでに正しい場合はオフのままにします。<0>詳細情報" }, "zshClearEolMark": { - "label": "ZSH行末マークをクリア", - "description": "有効にすると、PROMPT_EOL_MARK=''を設定してZSHの行末マークをクリアします。これにより、'%'などの特殊文字で終わるコマンド出力の解釈に関する問題を防ぎます。 <0>詳細情報" + "label": "ZSH EOLマークをクリア", + "description": "行末に迷子の%が表示されたり、解析が間違っているように見える場合にこれをオンにします。Zshの行末マーク(%)を省略します。<0>詳細情報" }, "zshOhMy": { - "label": "Oh My Zsh 統合を有効化", - "description": "有効にすると、ITERM_SHELL_INTEGRATION_INSTALLED=Yes を設定して Oh My Zsh シェル統合機能を有効にします。この設定を適用するには、IDEの再起動が必要な場合があります。 <0>詳細情報" + "label": "Oh My Zsh統合を有効にする", + "description": "Oh My Zshのテーマ/プラグインがシェル統合を期待している場合にこれをオンにします。ITERM_SHELL_INTEGRATION_INSTALLED=Yesを設定します。その変数を設定しないようにするにはオフにします。<0>詳細情報" }, "zshP10k": { - "label": "Powerlevel10k 統合を有効化", - "description": "有効にすると、POWERLEVEL9K_TERM_SHELL_INTEGRATION=true を設定して Powerlevel10k シェル統合機能を有効にします。 <0>詳細情報" + "label": "Powerlevel10k統合を有効にする", + "description": "Powerlevel10kシェル統合を使用している場合にこれをオンにします。<0>詳細情報" }, "zdotdir": { - "label": "ZDOTDIR 処理を有効化", - "description": "有効にすると、zsh シェル統合を適切に処理するために ZDOTDIR 用の一時ディレクトリを作成します。これにより、zsh の設定を保持しながら VSCode のシェル統合が正しく機能します。 <0>詳細情報" + "label": "ZDOTDIR処理を有効にする", + "description": "zshシェル統合が失敗したり、ドットファイルと競合したりする場合にこれをオンにします。<0>詳細情報" }, "inheritEnv": { "label": "環境変数を継承", - "description": "有効にすると、ターミナルは VSCode の親プロセスから環境変数を継承します。ユーザープロファイルで定義されたシェル統合設定などが含まれます。これは VSCode のグローバル設定 `terminal.integrated.inheritEnv` を直接切り替えます。 <0>詳細情報" + "description": "親VS Codeプロセスから環境変数を継承するには、これをオンにします。<0>詳細情報" } }, "advancedSettings": { @@ -748,7 +768,7 @@ "advanced": { "diff": { "label": "diff経由の編集を有効化", - "description": "有効にすると、Kilo Codeはファイルをより迅速に編集でき、切り詰められた全ファイル書き込みを自動的に拒否します。最新のClaude 4 Sonnetモデルで最良に機能します。", + "description": "有効にすると、Rooはファイルをより迅速に編集でき、切り詰められた全ファイル書き込みを自動的に拒否します。", "strategy": { "label": "Diff戦略", "options": { @@ -777,10 +797,6 @@ "name": "実験的な統合diff戦略を使用する", "description": "実験的な統合diff戦略を有効にします。この戦略はモデルエラーによる再試行の回数を減らす可能性がありますが、予期しない動作や不正確な編集を引き起こす可能性があります。リスクを理解し、すべての変更を注意深く確認する準備がある場合にのみ有効にしてください。" }, - "SEARCH_AND_REPLACE": { - "name": "実験的な検索と置換ツールを使用する", - "description": "実験的な検索と置換ツールを有効にし、Kilo Codeが1つのリクエストで検索語の複数のインスタンスを置き換えることを可能にします。" - }, "INSERT_BLOCK": { "name": "実験的なコンテンツ挿入ツールを使用する", "description": "実験的なコンテンツ挿入ツールを有効にし、Kilo Codeがdiffを作成せずに特定の行番号にコンテンツを挿入できるようにします。" @@ -867,8 +883,6 @@ "modelInfo": { "supportsImages": "画像をサポート", "noImages": "画像をサポートしていません", - "supportsComputerUse": "コンピュータ使用をサポート", - "noComputerUse": "コンピュータ使用をサポートしていません", "supportsPromptCache": "プロンプトキャッシュをサポート", "noPromptCache": "プロンプトキャッシュをサポートしていません", "contextWindow": "コンテキストウィンドウ:", diff --git a/webview-ui/src/i18n/locales/ko/chat.json b/webview-ui/src/i18n/locales/ko/chat.json index 6cd64ae94f5..968a20d62fd 100644 --- a/webview-ui/src/i18n/locales/ko/chat.json +++ b/webview-ui/src/i18n/locales/ko/chat.json @@ -156,6 +156,9 @@ "initializingWarning": "체크포인트 초기화 중... 시간이 너무 오래 걸리면 설정에서 체크포인트를 비활성화하고 작업을 다시 시작할 수 있습니다.", "menu": { "viewDiff": "차이점 보기", + "more": "더 많은 옵션", + "viewDiffFromInit": "모든 변경 사항 보기", + "viewDiffWithCurrent": "이 체크포인트 이후 변경 사항 보기", "restore": "체크포인트 복원", "restoreFiles": "파일 복원", "restoreFilesDescription": "프로젝트 파일을 이 시점에 찍힌 스냅샷으로 복원합니다.", @@ -272,6 +275,7 @@ "toggleAriaLabel": "자동 승인 전환", "disabledAriaLabel": "자동 승인 비활성화됨 - 먼저 옵션을 선택하십시오", "triggerLabelOff": "자동 승인 꺼짐", + "triggerLabelOffShort": "꺼짐", "triggerLabel_zero": "0개 자동 승인됨", "triggerLabel_one": "1개 자동 승인됨", "triggerLabel_other": "{{count}}개 자동 승인됨", @@ -301,6 +305,19 @@ "selectModel": "설정에서 Roo Code Cloud 제공업체의 roo/code-supernova를 선택하여 시작하세요.", "goToSettingsButton": "설정으로 이동" }, + "release": { + "heading": "확장 프로그램의 새로운 기능:", + "openRouterEmbeddings": "OpenRouter 임베딩 모델 지원", + "chutesDynamic": "Chutes가 이제 최신 모델을 동적으로 로드합니다", + "queuedMessagesFix": "대기 중인 메시지가 손실되는 문제 수정" + }, + "cloudAgents": { + "heading": "클라우드의 새로운 기능:", + "prFixer": "PR 리뷰어를 보완하는 PR Fixer 클라우드 에이전트를 소개합니다.", + "prFixerDescription": "Roo Code의 PR Fixer는 GitHub에서 직접 PR에 고품질 변경사항을 적용합니다. PR 댓글을 통해 호출하면 전체 댓글 기록을 읽어 컨텍스트, 합의사항 및 트레이드오프를 이해한 다음 올바른 수정을 구현합니다.", + "tryPrFixerButton": "PR Fixer 사용해보기" + }, + "careers": "그리고, 채용 중입니다!", "socialLinks": "X, Discord, 또는 r/RooCode에서 만나요 🚀" }, "browser": { diff --git a/webview-ui/src/i18n/locales/ko/common.json b/webview-ui/src/i18n/locales/ko/common.json index f1dfbf8da5b..2f9e76b51d9 100644 --- a/webview-ui/src/i18n/locales/ko/common.json +++ b/webview-ui/src/i18n/locales/ko/common.json @@ -114,5 +114,9 @@ "months_ago": "{{count}}개월 전", "year_ago": "1년 전", "years_ago": "{{count}}년 전" + }, + "errors": { + "wait_checkpoint_long_time": "{{timeout}}초 동안 체크포인트 초기화를 기다렸어. 체크포인트 기능이 필요 없다면 체크포인트 설정에서 꺼 줘.", + "init_checkpoint_fail_long_time": "체크포인트 초기화가 {{timeout}}초 이상 걸려서 이 작업에 대해 체크포인트가 꺼졌어. 체크포인트를 끄거나 체크포인트 설정에서 대기 시간을 늘릴 수 있어." } } diff --git a/webview-ui/src/i18n/locales/ko/mcp.json b/webview-ui/src/i18n/locales/ko/mcp.json index dacf16901be..4f173f6bc12 100644 --- a/webview-ui/src/i18n/locales/ko/mcp.json +++ b/webview-ui/src/i18n/locales/ko/mcp.json @@ -25,12 +25,12 @@ "tabs": { "tools": "도구", "resources": "리소스", - "errors": "오류" + "logs": "로그" }, "emptyState": { "noTools": "도구를 찾을 수 없음", "noResources": "리소스를 찾을 수 없음", - "noErrors": "오류를 찾을 수 없음" + "noLogs": "아직 로그 없음" }, "networkTimeout": { "label": "네트워크 타임아웃", diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json index 5594ed61555..0634e580e23 100644 --- a/webview-ui/src/i18n/locales/ko/settings.json +++ b/webview-ui/src/i18n/locales/ko/settings.json @@ -60,6 +60,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "API 키", "vercelAiGatewayApiKeyPlaceholder": "Vercel AI Gateway API 키를 입력하세요", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "OpenRouter API 키", + "openRouterApiKeyPlaceholder": "OpenRouter API 키를 입력하세요", "openaiCompatibleProvider": "OpenAI 호환", "openAiKeyLabel": "OpenAI API 키", "openAiKeyPlaceholder": "OpenAI API 키를 입력하세요", @@ -126,7 +129,8 @@ "vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API 키가 필요합니다", "ollamaBaseUrlRequired": "Ollama 기본 URL이 필요합니다", "baseUrlRequired": "기본 URL이 필요합니다", - "modelDimensionMinValue": "모델 차원은 0보다 커야 합니다" + "modelDimensionMinValue": "모델 차원은 0보다 커야 합니다", + "openRouterApiKeyRequired": "OpenRouter API 키가 필요합니다" }, "advancedConfigLabel": "고급 구성", "searchMinScoreLabel": "검색 점수 임계값", @@ -309,6 +313,9 @@ "getZaiApiKey": "Z AI API 키 받기", "zaiEntrypoint": "Z AI 엔트리포인트", "zaiEntrypointDescription": "위치에 따라 적절한 API 엔트리포인트를 선택하세요. 중국에 있다면 open.bigmodel.cn을 선택하세요. 그렇지 않으면 api.z.ai를 선택하세요.", + "minimaxApiKey": "MiniMax API 키", + "getMiniMaxApiKey": "MiniMax API 키 받기", + "minimaxBaseUrl": "MiniMax 엔트리포인트", "geminiApiKey": "Gemini API 키", "getGroqApiKey": "Groq API 키 받기", "groqApiKey": "Groq API 키", @@ -362,7 +369,7 @@ "enablePromptCachingTitle": "지원되는 모델의 성능을 향상시키고 비용을 절감하기 위해 프롬프트 캐시를 활성화합니다.", "cacheUsageNote": "참고: 캐시 사용이 표시되지 않는 경우, 다른 모델을 선택한 다음 원하는 모델을 다시 선택해 보세요.", "vscodeLmModel": "언어 모델", - "vscodeLmWarning": "참고: 이는 매우 실험적인 통합이며, 공급자 지원은 다를 수 있습니다. 모델이 지원되지 않는다는 오류가 발생하면, 이는 공급자 측의 문제입니다.", + "vscodeLmWarning": "참고: VS Code Language Model API를 통해 액세스되는 모델은 공급자가 래핑하거나 미세 조정했을 수 있어, 일반적인 공급자나 라우터에서 동일한 모델을 직접 사용할 때와 동작이 다를 수 있습니다. ‘Language Model’ 드롭다운의 모델을 사용하려면 먼저 해당 모델로 전환한 다음 Copilot Chat 프롬프트에서 ‘허용(수락)’을 클릭하세요. 그렇지 않으면 400 ‘The requested model is not supported’와 같은 오류가 발생할 수 있습니다.", "geminiParameters": { "urlContext": { "title": "URL 컨텍스트 활성화", @@ -439,7 +446,7 @@ }, "computerUse": { "label": "컴퓨터 사용", - "description": "이 모델이 브라우저와 상호 작용할 수 있습니까? (예: Claude 3.7 Sonnet)" + "description": "이 모델이 브라우저와 상호 작용할 수 있습니까?" }, "promptCache": { "label": "프롬프트 캐시", @@ -481,6 +488,7 @@ }, "reasoningEffort": { "label": "모델 추론 노력", + "none": "없음", "minimal": "최소 (가장 빠름)", "high": "높음", "medium": "중간", @@ -553,6 +561,10 @@ } }, "checkpoints": { + "timeout": { + "label": "체크포인트 초기화 타임아웃(초)", + "description": "체크포인트 서비스 초기화를 기다리는 최대 시간입니다. 기본값은 15초. 범위: 10~60초." + }, "enable": { "label": "자동 체크포인트 활성화", "description": "활성화되면 Kilo Code는 작업 실행 중에 자동으로 체크포인트를 생성하여 변경 사항을 검토하거나 이전 상태로 되돌리기 쉽게 합니다. <0>더 알아보기" @@ -681,6 +693,14 @@ "label": "최대 총 이미지 크기", "mb": "MB", "description": "단일 read_file 작업에서 처리되는 모든 이미지의 최대 누적 크기 제한(MB 단위)입니다. 여러 이미지를 읽을 때 각 이미지의 크기가 총계에 추가됩니다. 다른 이미지를 포함하면 이 제한을 초과하는 경우 해당 이미지는 건너뜁니다." + }, + "includeCurrentTime": { + "label": "컨텍스트에 현재 시간 포함", + "description": "활성화하면 현재 시간과 시간대 정보가 시스템 프롬프트에 포함됩니다. 시간 문제로 모델이 작동을 멈추면 비활성화하세요." + }, + "includeCurrentCost": { + "label": "컨텍스트에 현재 비용 포함", + "description": "활성화하면 현재 API 사용 비용이 시스템 프롬프트에 포함됩니다. 비용 문제로 모델이 작동을 멈추면 비활성화하세요." } }, "terminal": { @@ -690,55 +710,55 @@ }, "advanced": { "label": "터미널 설정: 고급", - "description": "다음 옵션들은 설정을 적용하기 위해 터미널 재시작이 필요할 수 있습니다" + "description": "이 설정은 '인라인 터미널 사용'이 비활성화된 경우에만 적용됩니다. VS Code 터미널에만 영향을 미치며 IDE를 다시 시작해야 할 수 있습니다." }, "outputLineLimit": { "label": "터미널 출력 제한", - "description": "명령 실행 시 터미널 출력에 포함할 최대 라인 수. 초과 시 중간에서 라인이 제거되어 token이 절약됩니다. <0>더 알아보기" + "description": "제한 내로 유지하기 위해 첫 줄과 마지막 줄을 유지하고 중간을 삭제합니다. 토큰을 절약하려면 낮추고; Roo에게 더 많은 중간 세부 정보를 제공하려면 높입니다. Roo는 콘텐츠가 건너뛴 곳에 자리 표시자를 봅니다.<0>자세히 알아보기" }, "outputCharacterLimit": { "label": "터미널 문자 제한", - "description": "명령을 실행할 때 터미널 출력에 포함할 최대 문자 수입니다. 이 제한은 매우 긴 줄로 인한 메모리 문제를 방지하기 위해 줄 제한보다 우선합니다. 초과하면 출력이 잘립니다. <0>더 알아보기" + "description": "출력 크기에 대한 엄격한 상한을 적용하여 메모리 문제를 방지하기 위해 줄 제한을 재정의합니다. 초과하면 시작과 끝을 유지하고 내용이 생략된 곳에 Roo에게 자리 표시자를 표시합니다. <0>자세히 알아보기" }, "shellIntegrationTimeout": { - "label": "터미널 쉘 통합 타임아웃", - "description": "명령을 실행하기 전에 쉘 통합이 초기화될 때까지 기다리는 최대 시간. 쉘 시작 시간이 긴 사용자의 경우, 터미널에서 \"Shell Integration Unavailable\" 오류가 표시되면 이 값을 늘려야 할 수 있습니다. <0>더 알아보기" + "label": "터미널 셸 통합 시간 초과", + "description": "명령을 실행하기 전에 VS Code 셸 통합을 기다리는 시간입니다. 셸이 느리게 시작되거나 '셸 통합을 사용할 수 없음' 오류가 표시되면 이 값을 늘리십시오. <0>자세히 알아보기" }, "shellIntegrationDisabled": { - "label": "터미널 셸 통합 비활성화", - "description": "터미널 명령이 올바르게 작동하지 않거나 '셸 통합을 사용할 수 없음' 오류가 표시되는 경우 이 옵션을 활성화합니다. 이렇게 하면 일부 고급 터미널 기능을 우회하여 명령을 실행하는 더 간단한 방법을 사용합니다. <0>더 알아보기" + "label": "인라인 터미널 사용(권장)", + "description": "더 빠르고 안정적인 실행을 위해 셸 프로필/통합��� 우회하려면 인라인 터미널(채팅)에�� 명령을 실행하십시오. 비활성화하면 Roo는 셸 프로필, 프롬프트 및 플러그인과 함께 VS Code 터미널을 사용합니다. <0>자세히 알아보기" }, "commandDelay": { "label": "터미널 명령 지연", - "description": "명령 실행 후 추가할 지연 시간(밀리초). 기본값 0은 지연을 완전히 비활성화합니다. 이는 타이밍 문제가 있는 터미널에서 명령 출력을 완전히 캡처하는 데 도움이 될 수 있습니다. 대부분의 터미널에서는 `PROMPT_COMMAND='sleep N'`을 설정하여 구현되며, PowerShell은 각 명령 끝에 `start-sleep`을 추가합니다. 원래는 VSCode 버그#237208에 대한 해결책이었으며 필요하지 않을 수 있습니다. <0>더 알아보기" + "description": "VS Code 터미널이 모든 출력을 플러시할 수 있도록 각 명령 후에 짧은 일시 중지를 추가합니다(bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep). 누락된 꼬리 출력이 표시되는 경우에만 사용하고, 그렇지 않으면 0으로 둡니다. <0>자세히 알아보기" }, "compressProgressBar": { - "label": "진행 표시줄 출력 압축", - "description": "활성화하면 캐리지 리턴(\\r)이 포함된 터미널 출력을 처리하여 실제 터미널이 콘텐츠를 표시하는 방식을 시뮬레이션합니다. 이는 진행 표시줄의 중간 상태를 제거하고 최종 상태만 유지하여 더 관련성 있는 정보를 위한 컨텍스트 공간을 절약합니다. <0>더 알아보기" + "label": "진행률 표시줄 출력 압축", + "description": "진행률 표시줄/스피너를 축소하여 최종 상태만 유지합니다(토큰 절약). <0>자세히 알아보기" }, "powershellCounter": { "label": "PowerShell 카운터 해결 방법 활성화", - "description": "활성화하면 PowerShell 명령에 카운터를 추가하여 명령이 올바르게 실행되도록 합니다. 이는 명령 출력 캡처에 문제가 있을 수 있는 PowerShell 터미널에서 도움이 됩니다. <0>더 알아보기" + "description": "PowerShell 출력이 누락되거나 중복될 때 이 기능을 켜십시오. 출력을 안정화하기 위해 각 명령에 작은 카운터를 추가합니다. 출력이 이미 올바르게 표시되면 이 기능을 끄십시오. <0>자세히 알아보기" }, "zshClearEolMark": { - "label": "ZSH 줄 끝 표시 지우기", - "description": "활성화하면 PROMPT_EOL_MARK=''를 설정하여 ZSH 줄 끝 표시를 지웁니다. 이는 '%'와 같은 특수 문자로 끝나는 명령 출력 해석의 문제를 방지합니다. <0>더 알아보기" + "label": "ZSH EOL 표시 지우기", + "description": "줄 끝에 떠도는 %가 보이거나 구문 분석이 잘못된 것 같을 때 이 기능을 켜십시오. Zsh의 줄 끝 표시(%)를 생략합니다. <0>자세히 알아보기" }, "zshOhMy": { "label": "Oh My Zsh 통합 활성화", - "description": "활성화하면 ITERM_SHELL_INTEGRATION_INSTALLED=Yes를 설정하여 Oh My Zsh 셸 통합 기능을 활성화합니다. 이 설정을 적용하려면 IDE를 다시 시작해야 할 수 있습니다. <0>더 알아보기" + "description": "Oh My Zsh 테마/플러그인이 셸 통합을 예상할 때 이 기능을 켜십시오. ITERM_SHELL_INTEGRATION_INSTALLED=Yes를 설정합니다. 해당 변수를 설정하지 않으려면 이 기능을 끄십시오. <0>자세히 알아보기" }, "zshP10k": { "label": "Powerlevel10k 통합 활성화", - "description": "활성화하면 POWERLEVEL9K_TERM_SHELL_INTEGRATION=true를 설정하여 Powerlevel10k 셸 통합 기능을 활성화합니다. <0>더 알아보기" + "description": "Powerlevel10k 셸 통합을 사용할 때 이 기능을 켜십시오. <0>자세히 알아보기" }, "zdotdir": { "label": "ZDOTDIR 처리 활성화", - "description": "활성화하면 zsh 셸 통합을 올바르게 처리하기 위한 ZDOTDIR용 임시 디렉터리를 생성합니다. 이를 통해 zsh 구성을 유지하면서 VSCode 셸 통합이 zsh와 올바르게 작동합니다. <0>더 알아보기" + "description": "zsh 셸 통합이 실패하거나 점 파일과 충돌할 때 이 기능을 켜십시오. <0>자세히 알아보기" }, "inheritEnv": { "label": "환경 변수 상속", - "description": "활성화하면 터미널이 VSCode 부모 프로세스로부터 환경 변수를 상속받습니다. 사용자 프로필에 정의된 셸 통합 설정 등이 포함됩니다. 이는 VSCode 전역 설정 `terminal.integrated.inheritEnv`를 직접 전환합니다. <0>더 알아보기" + "description": "부모 VS Code 프로세���에서 환경 변수를 상속하려면 이 기능을 켜십시오. <0>자세히 알아보기" } }, "advancedSettings": { @@ -747,7 +767,7 @@ "advanced": { "diff": { "label": "diff를 통한 편집 활성화", - "description": "활성화되면 Kilo Code는 파일을 더 빠르게 편집할 수 있으며 잘린 전체 파일 쓰기를 자동으로 거부합니다. 최신 Claude 4 Sonnet 모델에서 가장 잘 작동합니다.", + "description": "활성화되면 Kilo Code는 파일을 더 빠르게 편집할 수 있으며 잘린 전체 파일 쓰기를 자동으로 거부합니다", "strategy": { "label": "Diff 전략", "options": { @@ -776,10 +796,6 @@ "name": "실험적 통합 diff 전략 사용", "description": "실험적 통합 diff 전략을 활성화합니다. 이 전략은 모델 오류로 인한 재시도 횟수를 줄일 수 있지만 예기치 않은 동작이나 잘못된 편집을 일으킬 수 있습니다. 위험을 이해하고 모든 변경 사항을 신중하게 검토할 의향이 있는 경우에만 활성화하십시오." }, - "SEARCH_AND_REPLACE": { - "name": "실험적 검색 및 바꾸기 도구 사용", - "description": "실험적 검색 및 바꾸기 도구를 활성화하여 Kilo Code가 하나의 요청에서 검색어의 여러 인스턴스를 바꿀 수 있게 합니다." - }, "INSERT_BLOCK": { "name": "실험적 콘텐츠 삽입 도구 사용", "description": "실험적 콘텐츠 삽입 도구를 활성화하여 Kilo Code가 diff를 만들 필요 없이 특정 줄 번호에 콘텐츠를 삽입할 수 있게 합니다." @@ -866,8 +882,6 @@ "modelInfo": { "supportsImages": "이미지 지원", "noImages": "이미지 지원 안 함", - "supportsComputerUse": "컴퓨터 사용 지원", - "noComputerUse": "컴퓨터 사용 지원 안 함", "supportsPromptCache": "프롬프트 캐시 지원", "noPromptCache": "프롬프트 캐시 지원 안 함", "contextWindow": "컨텍스트 창:", diff --git a/webview-ui/src/i18n/locales/nl/chat.json b/webview-ui/src/i18n/locales/nl/chat.json index 49a009a7d08..89bd31f4497 100644 --- a/webview-ui/src/i18n/locales/nl/chat.json +++ b/webview-ui/src/i18n/locales/nl/chat.json @@ -151,6 +151,9 @@ "initializingWarning": "Checkpoint wordt nog steeds geïnitialiseerd... Als dit te lang duurt, kun je checkpoints uitschakelen in de instellingen en je taak opnieuw starten.", "menu": { "viewDiff": "Bekijk verschil", + "more": "Meer opties", + "viewDiffFromInit": "Bekijk alle wijzigingen", + "viewDiffWithCurrent": "Bekijk wijzigingen sinds dit checkpoint", "restore": "Herstel checkpoint", "restoreFiles": "Bestanden herstellen", "restoreFilesDescription": "Herstelt de bestanden van je project naar een momentopname die op dit punt is gemaakt.", @@ -272,6 +275,7 @@ "toggleAriaLabel": "Automatische goedkeuring in-/uitschakelen", "disabledAriaLabel": "Automatische goedkeuring uitgeschakeld - selecteer eerst opties", "triggerLabelOff": "Automatisch goedkeuren uit", + "triggerLabelOffShort": "Uit", "triggerLabel_zero": "0 automatisch goedgekeurd", "triggerLabel_one": "1 automatisch goedgekeurd", "triggerLabel_other": "{{count}} automatisch goedgekeurd", @@ -286,6 +290,19 @@ "selectModel": "Selecteer roo/code-supernova van de Roo Code Cloud provider in Instellingen om te beginnen.", "goToSettingsButton": "Ga naar Instellingen" }, + "release": { + "heading": "Nieuw in de extensie:", + "openRouterEmbeddings": "Ondersteuning voor OpenRouter embedding-modellen", + "chutesDynamic": "Chutes laadt nu dynamisch de nieuwste modellen", + "queuedMessagesFix": "Oplossingen voor berichten in de wachtrij die verloren gaan" + }, + "cloudAgents": { + "heading": "Nieuw in de Cloud:", + "prFixer": "Introductie van de PR Fixer cloud-agent als aanvulling op de PR Reviewer.", + "prFixerDescription": "De PR Fixer van Roo Code past hoogwaardige wijzigingen toe op je PR's, direct vanuit GitHub. Roep het aan via een PR-opmerking en het leest de volledige opmerkingengeschiedenis om context, afspraken en afwegingen te begrijpen - en implementeert vervolgens de juiste oplossing.", + "tryPrFixerButton": "Probeer PR Fixer" + }, + "careers": "Ook, we nemen aan!", "socialLinks": "Sluit je bij ons aan op X, Discord, of r/RooCode 🚀" }, "reasoning": { diff --git a/webview-ui/src/i18n/locales/nl/common.json b/webview-ui/src/i18n/locales/nl/common.json index e6f3e76333b..435dac9ad5a 100644 --- a/webview-ui/src/i18n/locales/nl/common.json +++ b/webview-ui/src/i18n/locales/nl/common.json @@ -114,5 +114,9 @@ "months_ago": "{{count}} maanden geleden", "year_ago": "een jaar geleden", "years_ago": "{{count}} jaar geleden" + }, + "errors": { + "wait_checkpoint_long_time": "Je hebt {{timeout}} seconden gewacht op de initialisatie van de checkpoint. Als je deze functie niet nodig hebt, schakel hem dan uit in de checkpoint-instellingen.", + "init_checkpoint_fail_long_time": "De initialisatie van de checkpoint duurde meer dan {{timeout}} seconden, dus checkpoints zijn uitgeschakeld voor deze taak. Je kunt checkpoints uitschakelen of de wachttijd in de checkpoint-instellingen verhogen." } } diff --git a/webview-ui/src/i18n/locales/nl/mcp.json b/webview-ui/src/i18n/locales/nl/mcp.json index 136aae18bc7..0c5f7b42e5c 100644 --- a/webview-ui/src/i18n/locales/nl/mcp.json +++ b/webview-ui/src/i18n/locales/nl/mcp.json @@ -25,13 +25,12 @@ "tabs": { "tools": "Tools", "resources": "Bronnen", - "errors": "Fouten" + "logs": "Logboeken" }, "emptyState": { "noTools": "Geen tools gevonden", "noResources": "Geen bronnen gevonden", - "noLogs": "Geen logboeken gevonden", - "noErrors": "Geen fouten gevonden" + "noLogs": "Nog geen logboeken" }, "networkTimeout": { "label": "Netwerktime-out", diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json index 78aba33ed24..ed3b0ce6b95 100644 --- a/webview-ui/src/i18n/locales/nl/settings.json +++ b/webview-ui/src/i18n/locales/nl/settings.json @@ -57,6 +57,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "API-sleutel", "vercelAiGatewayApiKeyPlaceholder": "Voer uw Vercel AI Gateway API-sleutel in", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "OpenRouter API-sleutel", + "openRouterApiKeyPlaceholder": "Voer uw OpenRouter API-sleutel in", "mistralProvider": "Mistral", "mistralApiKeyLabel": "API-sleutel:", "mistralApiKeyPlaceholder": "Voer uw Mistral API-sleutel in", @@ -126,7 +129,8 @@ "vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API-sleutel is vereist", "ollamaBaseUrlRequired": "Ollama basis-URL is vereist", "baseUrlRequired": "Basis-URL is vereist", - "modelDimensionMinValue": "Modelafmeting moet groter zijn dan 0" + "modelDimensionMinValue": "Modelafmeting moet groter zijn dan 0", + "openRouterApiKeyRequired": "OpenRouter API-sleutel is vereist" }, "advancedConfigLabel": "Geavanceerde configuratie", "searchMinScoreLabel": "Zoekscore drempel", @@ -309,6 +313,9 @@ "getZaiApiKey": "Z AI API-sleutel ophalen", "zaiEntrypoint": "Z AI-ingangspunt", "zaiEntrypointDescription": "Selecteer het juiste API-ingangspunt op basis van uw locatie. Als u zich in China bevindt, kies dan open.bigmodel.cn. Anders kiest u api.z.ai.", + "minimaxApiKey": "MiniMax API-sleutel", + "getMiniMaxApiKey": "MiniMax API-sleutel ophalen", + "minimaxBaseUrl": "MiniMax-ingangspunt", "geminiApiKey": "Gemini API-sleutel", "getGroqApiKey": "Groq API-sleutel ophalen", "groqApiKey": "Groq API-sleutel", @@ -362,7 +369,7 @@ "enablePromptCachingTitle": "Schakel prompt caching in om de prestaties te verbeteren en de kosten te verlagen voor ondersteunde modellen.", "cacheUsageNote": "Let op: als je geen cachegebruik ziet, probeer dan een ander model te selecteren en vervolgens weer je gewenste model.", "vscodeLmModel": "Taalmodel", - "vscodeLmWarning": "Let op: dit is een zeer experimentele integratie en ondersteuning door providers kan variëren. Krijg je een foutmelding dat een model niet wordt ondersteund, dan ligt dat aan de provider.", + "vscodeLmWarning": "Let op: Modellen die via de VS Code Language Model API worden benaderd kunnen door de provider worden verpakt of fijn‑afgesteld, waardoor het gedrag kan afwijken van het rechtstreeks gebruiken van hetzelfde model bij een typische provider of router. Om een model uit de keuzelijst ‘Language Model’ te gebruiken, schakel eerst naar dat model en klik vervolgens op ‘Accepteren’ in de Copilot Chat‑prompt; anders kun je een fout zien zoals 400 ‘The requested model is not supported’.", "geminiParameters": { "urlContext": { "title": "URL-context inschakelen", @@ -439,7 +446,7 @@ }, "computerUse": { "label": "Computergebruik", - "description": "Kan dit model met een browser werken? (bijv. Claude 3.7 Sonnet)." + "description": "Kan dit model met een browser werken?" }, "promptCache": { "label": "Prompt caching", @@ -481,6 +488,7 @@ }, "reasoningEffort": { "label": "Model redeneervermogen", + "none": "Geen", "minimal": "Minimaal (Snelst)", "high": "Hoog", "medium": "Middel", @@ -553,6 +561,10 @@ } }, "checkpoints": { + "timeout": { + "label": "Timeout voor checkpoint-initialisatie (seconden)", + "description": "Maximale wachttijd voor het initialiseren van de checkpointservice. Standaard is 15 seconden. Bereik: 10-60 seconden." + }, "enable": { "label": "Automatische checkpoints inschakelen", "description": "Indien ingeschakeld, maakt Kilo Code automatisch checkpoints tijdens het uitvoeren van taken, zodat je eenvoudig wijzigingen kunt bekijken of terugzetten. <0>Meer informatie" @@ -681,6 +693,14 @@ "profileDescription": "Aangepaste drempelwaarde alleen voor dit profiel (overschrijft globale standaard)", "inheritDescription": "Dit profiel erft de globale standaard drempelwaarde ({{threshold}}%)", "usesGlobal": "(gebruikt globaal {{threshold}}%)" + }, + "includeCurrentTime": { + "label": "Huidige tijd opnemen in context", + "description": "Indien ingeschakeld, worden de huidige tijd en tijdzone-informatie opgenomen in de systeemprompt. Schakel dit uit als modellen stoppen met werken vanwege tijdproblemen." + }, + "includeCurrentCost": { + "label": "Huidige kosten opnemen in context", + "description": "Indien ingeschakeld, worden de huidige API-gebruikskosten opgenomen in de systeemprompt. Schakel dit uit als modellen stoppen met werken vanwege kostenproblemen." } }, "terminal": { @@ -690,55 +710,55 @@ }, "advanced": { "label": "Terminalinstellingen: Geavanceerd", - "description": "De volgende opties vereisen mogelijk een herstart van de terminal om de instelling toe te passen." + "description": "Deze instellingen gelden alleen wanneer 'Inline Terminal gebruiken' is uitgeschakeld. Ze beïnvloeden alleen de VS Code-terminal en kunnen een IDE-herstart vereisen." }, "outputLineLimit": { "label": "Terminaluitvoerlimiet", - "description": "Maximaal aantal regels dat wordt opgenomen in de terminaluitvoer bij het uitvoeren van commando's. Overtollige regels worden uit het midden verwijderd om tokens te besparen. <0>Meer informatie" + "description": "Behoudt eerste en laatste regels en verwijdert middelste om onder de limiet te blijven. Verlaag om tokens te besparen; verhoog om Roo meer tussendetails te geven. Roo ziet een placeholder waar inhoud wordt overgeslagen.<0>Meer informatie" }, "outputCharacterLimit": { - "label": "Tekenlimiet terminal", - "description": "Maximaal aantal tekens dat moet worden opgenomen in de terminaluitvoer bij het uitvoeren van commando's. Deze limiet heeft voorrang op de regellimiet om geheugenproblemen door extreem lange regels te voorkomen. Bij overschrijding wordt de uitvoer afgekapt. <0>Meer informatie" + "label": "Terminal-tekenlimiet", + "description": "Overschrijft de regellimiet om geheugenproblemen te voorkomen door een harde limiet op uitvoergrootte af te dwingen. Bij overschrijding behoudt het begin en einde en toont een placeholder aan Roo waar inhoud wordt overgeslagen. <0>Meer informatie" }, "shellIntegrationTimeout": { - "label": "Terminal shell-integratie timeout", - "description": "Maximale wachttijd voor het initialiseren van shell-integratie voordat commando's worden uitgevoerd. Voor gebruikers met lange shell-opstarttijden moet deze waarde mogelijk worden verhoogd als je 'Shell Integration Unavailable'-fouten ziet in de terminal. <0>Meer informatie" + "label": "Terminal-shell-integratie timeout", + "description": "Hoe lang te wachten op VS Code-shell-integratie voordat commando's worden uitgevoerd. Verhoog als je shell traag opstart of je 'Shell-Integratie Niet Beschikbaar'-fouten ziet. <0>Meer informatie" }, "shellIntegrationDisabled": { - "label": "Terminal shell-integratie uitschakelen", - "description": "Schakel dit in als terminalcommando's niet correct werken of als je 'Shell Integration Unavailable'-fouten ziet. Dit gebruikt een eenvoudigere methode om commando's uit te voeren en omzeilt enkele geavanceerde terminalfuncties. <0>Meer informatie" + "label": "Inline Terminal gebruiken (aanbevolen)", + "description": "Voer commando's uit in de Inline Terminal (chat) om shell-profielen/integratie te omzeilen voor snellere, betrouwbaardere runs. Wanneer uitgeschakeld gebruikt Roo de VS Code-terminal met je shell-profiel, prompts en plugins. <0>Meer informatie" }, "commandDelay": { - "label": "Terminalcommando-vertraging", - "description": "Vertraging in milliseconden na het uitvoeren van een commando. De standaardinstelling van 0 schakelt de vertraging volledig uit. Dit kan helpen om te zorgen dat de uitvoer volledig wordt vastgelegd in terminals met timingproblemen. In de meeste terminals wordt dit geïmplementeerd door `PROMPT_COMMAND='sleep N'` te zetten en in Powershell wordt `start-sleep` toegevoegd aan het einde van elk commando. Oorspronkelijk was dit een workaround voor VSCode bug#237208 en is mogelijk niet meer nodig. <0>Meer informatie" + "label": "Terminal-commandovertraging", + "description": "Voegt korte pauze toe na elk commando zodat VS Code-terminal alle uitvoer kan flushen (bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep). Gebruik alleen als je ontbrekende tail-uitvoer ziet; anders op 0 laten. <0>Meer informatie" }, "compressProgressBar": { "label": "Voortgangsbalk-uitvoer comprimeren", - "description": "Indien ingeschakeld, verwerkt Kilo Code terminaluitvoer met carriage returns (\r) om te simuleren hoe een echte terminal inhoud weergeeft. Dit verwijdert tussenliggende voortgangsbalken en behoudt alleen de eindstatus, waardoor er meer contextruimte overblijft. <0>Meer informatie" + "description": "Klapt voortgangsbalken/spinners in zodat alleen eindstatus behouden blijft (bespaart tokens). <0>Meer informatie" }, "powershellCounter": { "label": "PowerShell-teller workaround inschakelen", - "description": "Indien ingeschakeld, voegt Kilo Code een teller toe aan PowerShell-commando's om correcte uitvoering te garanderen. Dit helpt bij PowerShell-terminals die problemen hebben met het vastleggen van uitvoer. <0>Meer informatie" + "description": "Schakel in wanneer PowerShell-uitvoer ontbreekt of gedupliceerd wordt; voegt kleine teller toe aan elk commando om uitvoer te stabiliseren. Laat uit als uitvoer al correct lijkt. <0>Meer informatie" }, "zshClearEolMark": { - "label": "ZSH EOL-markering wissen", - "description": "Indien ingeschakeld, wist Kilo Code de ZSH end-of-line markering door PROMPT_EOL_MARK='' te zetten. Dit voorkomt problemen met de interpretatie van uitvoer die eindigt met speciale tekens zoals '%'. <0>Meer informatie" + "label": "ZSH-EOL-markering wissen", + "description": "Schakel in wanneer je verdwaalde % aan regeleinden ziet of parsing verkeerd lijkt; laat Zsh's einde-van-regel-markering (%) weg. <0>Meer informatie" }, "zshOhMy": { "label": "Oh My Zsh-integratie inschakelen", - "description": "Indien ingeschakeld, zet Kilo Code ITERM_SHELL_INTEGRATION_INSTALLED=Yes om Oh My Zsh shell-integratiefuncties te activeren. Het toepassen van deze instelling kan een herstart van de IDE vereisen. <0>Meer informatie" + "description": "Schakel in wanneer je Oh My Zsh-thema/plugins shell-integratie verwachten; stelt ITERM_SHELL_INTEGRATION_INSTALLED=Yes in. Schakel uit om instellen van die variabele te vermijden. <0>Meer informatie" }, "zshP10k": { "label": "Powerlevel10k-integratie inschakelen", - "description": "Indien ingeschakeld, zet Kilo Code POWERLEVEL9K_TERM_SHELL_INTEGRATION=true om Powerlevel10k shell-integratiefuncties te activeren. <0>Meer informatie" + "description": "Schakel in wanneer je Powerlevel10k-shell-integratie gebruikt. <0>Meer informatie" }, "zdotdir": { "label": "ZDOTDIR-afhandeling inschakelen", - "description": "Indien ingeschakeld, maakt Kilo Code een tijdelijke map aan voor ZDOTDIR om zsh shell-integratie correct af te handelen. Dit zorgt ervoor dat VSCode shell-integratie goed werkt met zsh en je zsh-configuratie behouden blijft. <0>Meer informatie" + "description": "Schakel in wanneer zsh-shell-integratie mislukt of conflicteert met je dotfiles. <0>Meer informatie" }, "inheritEnv": { "label": "Omgevingsvariabelen overnemen", - "description": "Indien ingeschakeld, neemt de terminal omgevingsvariabelen over van het bovenliggende VSCode-proces, zoals shell-integratie-instellingen uit het gebruikersprofiel. Dit schakelt direct de VSCode-instelling `terminal.integrated.inheritEnv` om. <0>Meer informatie" + "description": "Schakel in om omgevingsvariabelen over te nemen van het bovenliggende VS Code-proces. <0>Meer informatie" } }, "advancedSettings": { @@ -747,7 +767,7 @@ "advanced": { "diff": { "label": "Bewerken via diffs inschakelen", - "description": "Indien ingeschakeld kan Kilo Code sneller bestanden bewerken en worden afgekorte volledige-bestandswijzigingen automatisch geweigerd. Werkt het beste met het nieuwste Claude 4 Sonnet-model.", + "description": "Indien ingeschakeld kan Kilo Code sneller bestanden bewerken en worden afgekorte volledige-bestandswijzigingen automatisch geweigerd", "strategy": { "label": "Diff-strategie", "options": { @@ -776,10 +796,6 @@ "name": "Experimentele unified diff-strategie gebruiken", "description": "Schakel de experimentele unified diff-strategie in. Deze strategie kan het aantal herhalingen door model fouten verminderen, maar kan onverwacht gedrag of onjuiste bewerkingen veroorzaken. Alleen inschakelen als je de risico's begrijpt en wijzigingen zorgvuldig wilt controleren." }, - "SEARCH_AND_REPLACE": { - "name": "Experimentele zoek-en-vervang-tool gebruiken", - "description": "Schakel de experimentele zoek-en-vervang-tool in, waarmee Kilo Code meerdere instanties van een zoekterm in één verzoek kan vervangen." - }, "INSERT_BLOCK": { "name": "Experimentele inhoud-invoeg-tool gebruiken", "description": "Schakel de experimentele inhoud-invoeg-tool in, waarmee Kilo Code inhoud op specifieke regelnummers kan invoegen zonder een diff te maken." @@ -866,8 +882,6 @@ "modelInfo": { "supportsImages": "Ondersteunt afbeeldingen", "noImages": "Ondersteunt geen afbeeldingen", - "supportsComputerUse": "Ondersteunt computergebruik", - "noComputerUse": "Ondersteunt geen computergebruik", "supportsPromptCache": "Ondersteunt prompt caching", "noPromptCache": "Ondersteunt geen prompt caching", "contextWindow": "Contextvenster:", diff --git a/webview-ui/src/i18n/locales/pl/chat.json b/webview-ui/src/i18n/locales/pl/chat.json index 0eff4eae58c..d7b0a6e7156 100644 --- a/webview-ui/src/i18n/locales/pl/chat.json +++ b/webview-ui/src/i18n/locales/pl/chat.json @@ -156,6 +156,9 @@ "initializingWarning": "Trwa inicjalizacja punktu kontrolnego... Jeśli to trwa zbyt długo, możesz wyłączyć punkty kontrolne w ustawieniach i uruchomić zadanie ponownie.", "menu": { "viewDiff": "Zobacz różnice", + "more": "Więcej opcji", + "viewDiffFromInit": "Zobacz wszystkie zmiany", + "viewDiffWithCurrent": "Zobacz zmiany od tego punktu kontrolnego", "restore": "Przywróć punkt kontrolny", "restoreFiles": "Przywróć pliki", "restoreFilesDescription": "Przywraca pliki Twojego projektu do zrzutu wykonanego w tym punkcie.", @@ -272,6 +275,7 @@ "toggleAriaLabel": "Przełącz automatyczne zatwierdzanie", "disabledAriaLabel": "Automatyczne zatwierdzanie wyłączone - najpierw wybierz opcje", "triggerLabelOff": "Automatyczne zatwierdzanie wyłączone", + "triggerLabelOffShort": "Wyłączone", "triggerLabel_zero": "0 automatycznie zatwierdzone", "triggerLabel_one": "1 automatycznie zatwierdzony", "triggerLabel_other": "{{count}} automatycznie zatwierdzonych", @@ -301,6 +305,19 @@ "selectModel": "Wybierz roo/code-supernova od dostawcy Roo Code Cloud w Ustawieniach, aby rozpocząć.", "goToSettingsButton": "Przejdź do Ustawień" }, + "release": { + "heading": "Nowości w rozszerzeniu:", + "openRouterEmbeddings": "Wsparcie dla modeli osadzania OpenRouter", + "chutesDynamic": "Chutes teraz dynamicznie ładuje najnowsze modele", + "queuedMessagesFix": "Poprawki dla gubienia się wiadomości w kolejce" + }, + "cloudAgents": { + "heading": "Nowości w chmurze:", + "prFixer": "Przedstawiamy agenta chmurowego PR Fixer uzupełniającego PR Reviewer.", + "prFixerDescription": "PR Fixer Roo Code stosuje wysokiej jakości zmiany do Twoich PR, bezpośrednio z GitHub. Wywołaj go przez komentarz PR, a on przeczyta całą historię komentarzy, aby zrozumieć kontekst, uzgodnienia i kompromisy - następnie wdroży właściwą poprawkę.", + "tryPrFixerButton": "Wypróbuj PR Fixer" + }, + "careers": "Dodatkowo, zatrudniamy!", "socialLinks": "Dołącz do nas na X, Discord, lub r/RooCode 🚀" }, "browser": { diff --git a/webview-ui/src/i18n/locales/pl/common.json b/webview-ui/src/i18n/locales/pl/common.json index aa763407f33..0f44c26db22 100644 --- a/webview-ui/src/i18n/locales/pl/common.json +++ b/webview-ui/src/i18n/locales/pl/common.json @@ -114,5 +114,9 @@ "months_ago": "{{count}} miesięcy temu", "year_ago": "rok temu", "years_ago": "{{count}} lat temu" + }, + "errors": { + "wait_checkpoint_long_time": "Czekałeś {{timeout}} sekund na inicjalizację punktu kontrolnego. Jeśli nie potrzebujesz tej funkcji, wyłącz ją w ustawieniach punktu kontrolnego.", + "init_checkpoint_fail_long_time": "Inicjalizacja punktu kontrolnego trwała ponad {{timeout}} sekund, więc punkty kontrolne zostały wyłączone dla tego zadania. Możesz wyłączyć punkty kontrolne lub wydłużyć czas oczekiwania w ustawieniach punktu kontrolnego." } } diff --git a/webview-ui/src/i18n/locales/pl/mcp.json b/webview-ui/src/i18n/locales/pl/mcp.json index 8a8b624bcf0..1abf0165308 100644 --- a/webview-ui/src/i18n/locales/pl/mcp.json +++ b/webview-ui/src/i18n/locales/pl/mcp.json @@ -25,12 +25,12 @@ "tabs": { "tools": "Narzędzia", "resources": "Zasoby", - "errors": "Błędy" + "logs": "Logi" }, "emptyState": { "noTools": "Nie znaleziono narzędzi", "noResources": "Nie znaleziono zasobów", - "noErrors": "Nie znaleziono błędów" + "noLogs": "Brak logów" }, "networkTimeout": { "label": "Limit czasu sieci", diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json index e0e4e641df8..c2c5b812cf9 100644 --- a/webview-ui/src/i18n/locales/pl/settings.json +++ b/webview-ui/src/i18n/locales/pl/settings.json @@ -60,6 +60,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "Klucz API", "vercelAiGatewayApiKeyPlaceholder": "Wprowadź swój klucz API Vercel AI Gateway", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "Klucz API OpenRouter", + "openRouterApiKeyPlaceholder": "Wprowadź swój klucz API OpenRouter", "openaiCompatibleProvider": "Kompatybilny z OpenAI", "openAiKeyLabel": "Klucz API OpenAI", "openAiKeyPlaceholder": "Wprowadź swój klucz API OpenAI", @@ -126,7 +129,8 @@ "vercelAiGatewayApiKeyRequired": "Klucz API Vercel AI Gateway jest wymagany", "ollamaBaseUrlRequired": "Wymagany jest bazowy adres URL Ollama", "baseUrlRequired": "Wymagany jest bazowy adres URL", - "modelDimensionMinValue": "Wymiar modelu musi być większy niż 0" + "modelDimensionMinValue": "Wymiar modelu musi być większy niż 0", + "openRouterApiKeyRequired": "Wymagany jest klucz API OpenRouter" }, "advancedConfigLabel": "Konfiguracja zaawansowana", "searchMinScoreLabel": "Próg wyniku wyszukiwania", @@ -309,6 +313,9 @@ "getZaiApiKey": "Uzyskaj klucz API Z AI", "zaiEntrypoint": "Punkt wejścia Z AI", "zaiEntrypointDescription": "Wybierz odpowiedni punkt wejścia API w zależności od swojej lokalizacji. Jeśli jesteś w Chinach, wybierz open.bigmodel.cn. W przeciwnym razie wybierz api.z.ai.", + "minimaxApiKey": "Klucz API MiniMax", + "getMiniMaxApiKey": "Uzyskaj klucz API MiniMax", + "minimaxBaseUrl": "Punkt wejścia MiniMax", "geminiApiKey": "Klucz API Gemini", "getGroqApiKey": "Uzyskaj klucz API Groq", "groqApiKey": "Klucz API Groq", @@ -362,7 +369,7 @@ "enablePromptCachingTitle": "Włącz buforowanie podpowiedzi, aby poprawić wydajność i zmniejszyć koszty dla obsługiwanych modeli.", "cacheUsageNote": "Uwaga: Jeśli nie widzisz użycia bufora, spróbuj wybrać inny model, a następnie ponownie wybrać żądany model.", "vscodeLmModel": "Model językowy", - "vscodeLmWarning": "Uwaga: To bardzo eksperymentalna integracja, a wsparcie dostawcy może się różnić. Jeśli otrzymasz błąd dotyczący nieobsługiwanego modelu, jest to problem po stronie dostawcy.", + "vscodeLmWarning": "Uwaga: Modele dostępne przez interfejs VS Code Language Model API mogą być opakowane lub dostrojone przez dostawcę, dlatego ich działanie może różnić się od bezpośredniego użycia tego samego modelu u typowego dostawcy lub routera. Aby użyć modelu z listy «Language Model», najpierw przełącz się na ten model, a następnie kliknij «Akceptuj» w monicie Copilot Chat; w przeciwnym razie możesz zobaczyć błąd, np. 400 „The requested model is not supported”.", "geminiParameters": { "urlContext": { "title": "Włącz kontekst URL", @@ -439,7 +446,7 @@ }, "computerUse": { "label": "Użycie komputera", - "description": "Czy model jest w stanie wchodzić w interakcję z przeglądarką? (np. Claude 3.7 Sonnet)." + "description": "Czy model jest w stanie wchodzić w interakcję z przeglądarką?" }, "promptCache": { "label": "Buforowanie podpowiedzi", @@ -481,6 +488,7 @@ }, "reasoningEffort": { "label": "Wysiłek rozumowania modelu", + "none": "Brak", "minimal": "Minimalny (najszybszy)", "high": "Wysoki", "medium": "Średni", @@ -553,6 +561,10 @@ } }, "checkpoints": { + "timeout": { + "label": "Limit czasu inicjalizacji punktu kontrolnego (sekundy)", + "description": "Maksymalny czas oczekiwania na inicjalizację usługi punktów kontrolnych. Domyślnie 15 sekund. Zakres: 10-60 sekund." + }, "enable": { "label": "Włącz automatyczne punkty kontrolne", "description": "Gdy włączone, Kilo Code automatycznie utworzy punkty kontrolne podczas wykonywania zadań, ułatwiając przeglądanie zmian lub powrót do wcześniejszych stanów. <0>Dowiedz się więcej" @@ -681,6 +693,14 @@ "label": "Maksymalny całkowity rozmiar obrazów", "mb": "MB", "description": "Maksymalny skumulowany limit rozmiaru (w MB) dla wszystkich obrazów przetwarzanych w jednej operacji read_file. Podczas odczytu wielu obrazów rozmiar każdego obrazu jest dodawany do sumy. Jeśli dołączenie kolejnego obrazu przekroczyłoby ten limit, zostanie on pominięty." + }, + "includeCurrentTime": { + "label": "Uwzględnij bieżący czas w kontekście", + "description": "Gdy włączone, bieżący czas i informacje o strefie czasowej zostaną uwzględnione w promptcie systemowym. Wyłącz, jeśli modele przestają działać z powodu problemów z czasem." + }, + "includeCurrentCost": { + "label": "Uwzględnij bieżący koszt w kontekście", + "description": "Gdy włączone, bieżący koszt użycia API zostanie uwzględniony w promptcie systemowym. Wyłącz, jeśli modele przestają działać z powodu problemów z kosztami." } }, "terminal": { @@ -690,55 +710,55 @@ }, "advanced": { "label": "Ustawienia terminala: Zaawansowane", - "description": "Poniższe opcje mogą wymagać ponownego uruchomienia terminala, aby zastosować ustawienie." + "description": "Te ustawienia mają zastosowanie tylko gdy 'Użyj terminala wbudowanego' jest wyłączone. Dotyczą tylko terminala VS Code i mogą wymagać ponownego uruchomienia IDE." }, "outputLineLimit": { "label": "Limit wyjścia terminala", - "description": "Maksymalna liczba linii do uwzględnienia w wyjściu terminala podczas wykonywania poleceń. Po przekroczeniu linie będą usuwane ze środka, oszczędzając token. <0>Dowiedz się więcej" + "description": "Zachowuje pierwsze i ostatnie linie i odrzuca środkowe, aby zmieścić się w limicie. Zmniejsz, aby oszczędzać tokeny; zwiększ, aby dać Roo więcej szczegółów ze środka. Roo widzi symbol zastępczy tam, gdzie treść jest pomijana.<0>Dowiedz się więcej" }, "outputCharacterLimit": { - "label": "Limit znaków w terminalu", - "description": "Maksymalna liczba znaków do uwzględnienia w danych wyjściowych terminala podczas wykonywania poleceń. Limit ten ma pierwszeństwo przed limitem linii, aby zapobiec problemom z pamięcią spowodowanym przez bardzo długie linie. Po przekroczeniu limitu dane wyjściowe zostaną obcięte. <0>Dowiedz się więcej" + "label": "Limit znaków terminala", + "description": "Zastępuje limit linii, aby zapobiec problemom z pamięcią, narzucając twardy limit rozmiaru wyjścia. W przypadku przekroczenia zachowuje początek i koniec i pokazuje symbol zastępczy Roo tam, gdzie treść jest pomijana. <0>Dowiedz się więcej" }, "shellIntegrationTimeout": { "label": "Limit czasu integracji powłoki terminala", - "description": "Maksymalny czas oczekiwania na inicjalizację integracji powłoki przed wykonaniem poleceń. Dla użytkowników z długim czasem uruchamiania powłoki, ta wartość może wymagać zwiększenia, jeśli widzisz błędy \"Shell Integration Unavailable\" w terminalu. <0>Dowiedz się więcej" + "description": "Jak długo czekać na integrację powłoki VS Code przed wykonaniem poleceń. Zwiększ, jeśli twoja powłoka wolno się uruchamia lub widzisz błędy 'Integracja Powłoki Niedostępna'. <0>Dowiedz się więcej" }, "shellIntegrationDisabled": { - "label": "Wyłącz integrację powłoki terminala", - "description": "Włącz tę opcję, jeśli polecenia terminala nie działają poprawnie lub widzisz błędy 'Shell Integration Unavailable'. Używa to prostszej metody uruchamiania poleceń, omijając niektóre zaawansowane funkcje terminala. <0>Dowiedz się więcej" + "label": "Użyj terminala wbudowanego (zalecane)", + "description": "Uruchamiaj polecenia w terminalu wbudowanym (czat), aby ominąć profile/integrację powłoki dla szybszych, bardziej niezawodnych uruchomień. Gdy wyłączony, Roo używa terminala VS Code z twoim profilem powłoki, monitami i wtyczkami. <0>Dowiedz się więcej" }, "commandDelay": { - "label": "Opóźnienie poleceń terminala", - "description": "Opóźnienie w milisekundach dodawane po wykonaniu polecenia. Domyślne ustawienie 0 całkowicie wyłącza opóźnienie. Może to pomóc w zapewnieniu pełnego przechwytywania wyjścia poleceń w terminalach z problemami z synchronizacją. W większości terminali jest to implementowane przez ustawienie `PROMPT_COMMAND='sleep N'`, a PowerShell dodaje `start-sleep` na końcu każdego polecenia. Pierwotnie było to obejście błędu VSCode#237208 i może nie być potrzebne. <0>Dowiedz się więcej" + "label": "Opóźnienie polecenia terminala", + "description": "Dodaje krótką pauzę po każdym poleceniu, aby terminal VS Code mógł opróżnić całe wyjście (bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep). Używaj tylko gdy widzisz brakujące wyjście końcowe; w przeciwnym razie zostaw na 0. <0>Dowiedz się więcej" }, "compressProgressBar": { - "label": "Kompresuj wyjście pasków postępu", - "description": "Po włączeniu, przetwarza wyjście terminala z powrotami karetki (\\r), aby symulować sposób wyświetlania treści przez prawdziwy terminal. Usuwa to pośrednie stany pasków postępu, zachowując tylko stan końcowy, co oszczędza przestrzeń kontekstową dla bardziej istotnych informacji. <0>Dowiedz się więcej" + "label": "Kompresuj wyjście paska postępu", + "description": "Zwija paski postępu/spinnery, aby zachować tylko stan końcowy (oszczędza tokeny). <0>Dowiedz się więcej" }, "powershellCounter": { "label": "Włącz obejście licznika PowerShell", - "description": "Po włączeniu dodaje licznik do poleceń PowerShell, aby zapewnić prawidłowe wykonanie poleceń. Pomaga to w terminalach PowerShell, które mogą mieć problemy z przechwytywaniem wyjścia. <0>Dowiedz się więcej" + "description": "Włącz gdy brakuje lub jest zduplikowane wyjście PowerShell; dodaje mały licznik do każdego polecenia, aby ustabilizować wyjście. Pozostaw wyłączone, jeśli wyjście już wygląda poprawnie. <0>Dowiedz się więcej" }, "zshClearEolMark": { "label": "Wyczyść znacznik końca linii ZSH", - "description": "Po włączeniu czyści znacznik końca linii ZSH poprzez ustawienie PROMPT_EOL_MARK=''. Zapobiega to problemom z interpretacją wyjścia poleceń, gdy kończy się ono znakami specjalnymi jak '%'. <0>Dowiedz się więcej" + "description": "Włącz gdy widzisz zbłąkane % na końcu linii lub parsowanie wygląda nieprawidłowo; pomija znacznik końca linii (%) Zsh. <0>Dowiedz się więcej" }, "zshOhMy": { "label": "Włącz integrację Oh My Zsh", - "description": "Po włączeniu ustawia ITERM_SHELL_INTEGRATION_INSTALLED=Yes, aby włączyć funkcje integracji powłoki Oh My Zsh. Zastosowanie tego ustawienia może wymagać ponownego uruchomienia IDE. <0>Dowiedz się więcej" + "description": "Włącz gdy twój motyw/wtyczki Oh My Zsh oczekują integracji powłoki; ustawia ITERM_SHELL_INTEGRATION_INSTALLED=Yes. Wyłącz, aby uniknąć ustawiania tej zmiennej. <0>Dowiedz się więcej" }, "zshP10k": { "label": "Włącz integrację Powerlevel10k", - "description": "Po włączeniu ustawia POWERLEVEL9K_TERM_SHELL_INTEGRATION=true, aby włączyć funkcje integracji powłoki Powerlevel10k. <0>Dowiedz się więcej" + "description": "Włącz gdy używasz integracji powłoki Powerlevel10k. <0>Dowiedz się więcej" }, "zdotdir": { "label": "Włącz obsługę ZDOTDIR", - "description": "Po włączeniu tworzy tymczasowy katalog dla ZDOTDIR, aby poprawnie obsłużyć integrację powłoki zsh. Zapewnia to prawidłowe działanie integracji powłoki VSCode z zsh, zachowując twoją konfigurację zsh. <0>Dowiedz się więcej" + "description": "Włącz gdy integracja powłoki zsh zawodzi lub jest w konflikcie z twoimi plikami dotfiles. <0>Dowiedz się więcej" }, "inheritEnv": { "label": "Dziedzicz zmienne środowiskowe", - "description": "Po włączeniu terminal dziedziczy zmienne środowiskowe z procesu nadrzędnego VSCode, takie jak ustawienia integracji powłoki zdefiniowane w profilu użytkownika. Przełącza to bezpośrednio globalne ustawienie VSCode `terminal.integrated.inheritEnv`. <0>Dowiedz się więcej" + "description": "Włącz, aby dziedziczyć zmienne środowiskowe z procesu nadrzędnego VS Code. <0>Dowiedz się więcej" } }, "advancedSettings": { @@ -747,7 +767,7 @@ "advanced": { "diff": { "label": "Włącz edycję przez różnice", - "description": "Gdy włączone, Kilo Code będzie w stanie edytować pliki szybciej i automatycznie odrzuci obcięte pełne zapisy plików. Działa najlepiej z najnowszym modelem Claude 4 Sonnet.", + "description": "Gdy włączone, Kilo Code będzie w stanie edytować pliki szybciej i automatycznie odrzuci obcięte pełne zapisy plików", "strategy": { "label": "Strategia diff", "options": { @@ -776,10 +796,6 @@ "name": "Użyj eksperymentalnej ujednoliconej strategii diff", "description": "Włącz eksperymentalną ujednoliconą strategię diff. Ta strategia może zmniejszyć liczbę ponownych prób spowodowanych błędami modelu, ale może powodować nieoczekiwane zachowanie lub nieprawidłowe edycje. Włącz tylko jeśli rozumiesz ryzyko i jesteś gotów dokładnie przeglądać wszystkie zmiany." }, - "SEARCH_AND_REPLACE": { - "name": "Użyj eksperymentalnego narzędzia do wyszukiwania i zamiany", - "description": "Włącz eksperymentalne narzędzie do wyszukiwania i zamiany, umożliwiając Kilo Code zastąpienie wielu wystąpień wyszukiwanego terminu w jednym żądaniu." - }, "INSERT_BLOCK": { "name": "Użyj eksperymentalnego narzędzia do wstawiania treści", "description": "Włącz eksperymentalne narzędzie do wstawiania treści, umożliwiając Kilo Code wstawianie treści w określonych numerach linii bez konieczności tworzenia diff." @@ -866,8 +882,6 @@ "modelInfo": { "supportsImages": "Obsługuje obrazy", "noImages": "Nie obsługuje obrazów", - "supportsComputerUse": "Obsługuje użycie komputera", - "noComputerUse": "Nie obsługuje użycia komputera", "supportsPromptCache": "Obsługuje buforowanie podpowiedzi", "noPromptCache": "Nie obsługuje buforowania podpowiedzi", "contextWindow": "Okno kontekstowe:", diff --git a/webview-ui/src/i18n/locales/pt-BR/chat.json b/webview-ui/src/i18n/locales/pt-BR/chat.json index 2a4a9f0b20c..dd6ddac40e6 100644 --- a/webview-ui/src/i18n/locales/pt-BR/chat.json +++ b/webview-ui/src/i18n/locales/pt-BR/chat.json @@ -156,6 +156,9 @@ "initializingWarning": "Ainda inicializando ponto de verificação... Se isso demorar muito, você pode desativar os pontos de verificação nas configurações e reiniciar sua tarefa.", "menu": { "viewDiff": "Ver diferenças", + "more": "Mais opções", + "viewDiffFromInit": "Ver todas as alterações", + "viewDiffWithCurrent": "Ver alterações desde este ponto de verificação", "restore": "Restaurar ponto de verificação", "restoreFiles": "Restaurar arquivos", "restoreFilesDescription": "Restaura os arquivos do seu projeto para um snapshot feito neste ponto.", @@ -272,6 +275,7 @@ "toggleAriaLabel": "Alternar aprovação automática", "disabledAriaLabel": "Aprovação automática desativada - selecione as opções primeiro", "triggerLabelOff": "Aprovação automática desativada", + "triggerLabelOffShort": "Desativada", "triggerLabel_zero": "0 aprovados automaticamente", "triggerLabel_one": "1 aprovado automaticamente", "triggerLabel_other": "{{count}} aprovados automaticamente", @@ -301,6 +305,19 @@ "selectModel": "Selecione roo/code-supernova do provedor Roo Code Cloud em Configurações para começar.", "goToSettingsButton": "Ir para Configurações" }, + "release": { + "heading": "Novidades na Extensão:", + "openRouterEmbeddings": "Suporte para modelos de embedding do OpenRouter", + "chutesDynamic": "Chutes agora carrega os modelos mais recentes dinamicamente", + "queuedMessagesFix": "Correções para mensagens enfileiradas que se perdiam" + }, + "cloudAgents": { + "heading": "Novidades na Nuvem:", + "prFixer": "Apresentando o agente em nuvem PR Fixer para complementar o Revisor de PR.", + "prFixerDescription": "O PR Fixer do Roo Code aplica alterações de alta qualidade aos seus PRs, diretamente do GitHub. Invoque via comentário de PR e ele lerá todo o histórico de comentários para entender contexto, acordos e compromissos - depois implementa a correção certa.", + "tryPrFixerButton": "Experimentar PR Fixer" + }, + "careers": "Além disso, estamos contratando!", "socialLinks": "Junte-se a nós no X, Discord, ou r/RooCode 🚀" }, "browser": { diff --git a/webview-ui/src/i18n/locales/pt-BR/common.json b/webview-ui/src/i18n/locales/pt-BR/common.json index 37bd3ff2911..62259b69eaf 100644 --- a/webview-ui/src/i18n/locales/pt-BR/common.json +++ b/webview-ui/src/i18n/locales/pt-BR/common.json @@ -114,5 +114,9 @@ "months_ago": "há {{count}} meses", "year_ago": "há um ano", "years_ago": "há {{count}} anos" + }, + "errors": { + "wait_checkpoint_long_time": "Você esperou {{timeout}} segundos para inicializar o checkpoint. Se não precisa dessa função, desative nas configurações do checkpoint.", + "init_checkpoint_fail_long_time": "A inicialização do checkpoint levou mais de {{timeout}} segundos, então os checkpoints foram desativados para esta tarefa. Você pode desativar os checkpoints ou aumentar o tempo de espera nas configurações do checkpoint." } } diff --git a/webview-ui/src/i18n/locales/pt-BR/mcp.json b/webview-ui/src/i18n/locales/pt-BR/mcp.json index 46708b61d66..a4c44817db0 100644 --- a/webview-ui/src/i18n/locales/pt-BR/mcp.json +++ b/webview-ui/src/i18n/locales/pt-BR/mcp.json @@ -25,12 +25,12 @@ "tabs": { "tools": "Ferramentas", "resources": "Recursos", - "errors": "Erros" + "logs": "Logs" }, "emptyState": { "noTools": "Nenhuma ferramenta encontrada", "noResources": "Nenhum recurso encontrado", - "noErrors": "Nenhum erro encontrado" + "noLogs": "Ainda sem logs" }, "networkTimeout": { "label": "Tempo limite de rede", diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json index 7a14c533928..6c652dabdc0 100644 --- a/webview-ui/src/i18n/locales/pt-BR/settings.json +++ b/webview-ui/src/i18n/locales/pt-BR/settings.json @@ -57,6 +57,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "Chave de API", "vercelAiGatewayApiKeyPlaceholder": "Digite sua chave de API do Vercel AI Gateway", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "Chave de API do OpenRouter", + "openRouterApiKeyPlaceholder": "Digite sua chave de API do OpenRouter", "mistralProvider": "Mistral", "mistralApiKeyLabel": "Chave de API:", "mistralApiKeyPlaceholder": "Digite sua chave de API da Mistral", @@ -126,7 +129,8 @@ "vercelAiGatewayApiKeyRequired": "A chave de API do Vercel AI Gateway é obrigatória", "ollamaBaseUrlRequired": "A URL base do Ollama é obrigatória", "baseUrlRequired": "A URL base é obrigatória", - "modelDimensionMinValue": "A dimensão do modelo deve ser maior que 0" + "modelDimensionMinValue": "A dimensão do modelo deve ser maior que 0", + "openRouterApiKeyRequired": "Chave API do OpenRouter é obrigatória" }, "advancedConfigLabel": "Configuração Avançada", "searchMinScoreLabel": "Limite de pontuação de busca", @@ -309,6 +313,9 @@ "getZaiApiKey": "Obter chave de API Z AI", "zaiEntrypoint": "Ponto de entrada Z AI", "zaiEntrypointDescription": "Selecione o ponto de entrada da API apropriado com base na sua localização. Se você estiver na China, escolha open.bigmodel.cn. Caso contrário, escolha api.z.ai.", + "minimaxApiKey": "Chave de API MiniMax", + "getMiniMaxApiKey": "Obter chave de API MiniMax", + "minimaxBaseUrl": "Ponto de entrada MiniMax", "geminiApiKey": "Chave de API Gemini", "getGroqApiKey": "Obter chave de API Groq", "groqApiKey": "Chave de API Groq", @@ -362,7 +369,7 @@ "enablePromptCachingTitle": "Ativar cache de prompts para melhorar o desempenho e reduzir custos para modelos suportados.", "cacheUsageNote": "Nota: Se você não vir o uso do cache, tente selecionar um modelo diferente e depois selecionar novamente o modelo desejado.", "vscodeLmModel": "Modelo de Linguagem", - "vscodeLmWarning": "Nota: Esta é uma integração muito experimental e o suporte do provedor pode variar. Se você receber um erro sobre um modelo não ser suportado, isso é um problema do lado do provedor.", + "vscodeLmWarning": "Observação: Modelos acessados pela VS Code Language Model API podem ser encapsulados ou ajustados pelo provedor, portanto o comportamento pode diferir do uso direto do mesmo modelo em um provedor ou roteador típico. Para usar um modelo no menu suspenso «Language Model», primeiro altere para esse modelo e depois clique em «Aceitar» no prompt do Copilot Chat; caso contrário, você pode ver um erro como 400 «The requested model is not supported».", "geminiParameters": { "urlContext": { "title": "Ativar contexto de URL", @@ -439,7 +446,7 @@ }, "computerUse": { "label": "Uso do Computador", - "description": "Este modelo é capaz de interagir com um navegador? (ex. Claude 3.7 Sonnet)." + "description": "Este modelo é capaz de interagir com um navegador?" }, "promptCache": { "label": "Cache de Prompts", @@ -481,6 +488,7 @@ }, "reasoningEffort": { "label": "Esforço de raciocínio do modelo", + "none": "Nenhum", "minimal": "Mínimo (mais rápido)", "high": "Alto", "medium": "Médio", @@ -553,6 +561,10 @@ } }, "checkpoints": { + "timeout": { + "label": "Tempo limite para inicialização do checkpoint (segundos)", + "description": "Tempo máximo de espera para inicializar o serviço de checkpoint. Padrão: 15 segundos. Faixa: 10-60 segundos." + }, "enable": { "label": "Ativar pontos de verificação automáticos", "description": "Quando ativado, o Kilo Code criará automaticamente pontos de verificação durante a execução de tarefas, facilitando a revisão de alterações ou o retorno a estados anteriores. <0>Saiba mais" @@ -681,6 +693,14 @@ "label": "Tamanho total máximo da imagem", "mb": "MB", "description": "Limite máximo de tamanho cumulativo (em MB) para todas as imagens processadas em uma única operação read_file. Ao ler várias imagens, o tamanho de cada imagem é adicionado ao total. Se incluir outra imagem exceder esse limite, ela será ignorada." + }, + "includeCurrentTime": { + "label": "Incluir hora atual no contexto", + "description": "Quando ativado, a hora atual e as informações de fuso horário serão incluídas no prompt do sistema. Desative se os modelos pararem de funcionar por problemas de tempo." + }, + "includeCurrentCost": { + "label": "Incluir custo atual no contexto", + "description": "Quando ativado, o custo de uso atual da API será incluído no prompt do sistema. Desative se os modelos pararem de funcionar por problemas de custo." } }, "terminal": { @@ -689,56 +709,56 @@ "description": "Configurações básicas do terminal" }, "advanced": { - "label": "Configurações do terminal: Avançadas", - "description": "As seguintes opções podem exigir reiniciar o terminal para aplicar a configuração." + "label": "Configurações do Terminal: Avançado", + "description": "Estas configurações só se aplicam quando 'Usar Terminal Inline' está desativado. Afetam apenas o terminal do VS Code e podem exigir reiniciar o IDE." }, "outputLineLimit": { "label": "Limite de saída do terminal", - "description": "Número máximo de linhas a incluir na saída do terminal ao executar comandos. Quando excedido, as linhas serão removidas do meio, economizando token. <0>Saiba mais" + "description": "Mantém as primeiras e últimas linhas e descarta as do meio para ficar abaixo do limite. Diminua para economizar tokens; aumente para dar ao Roo mais detalhes do meio. O Roo vê um placeholder onde o conteúdo é pulado.<0>Saiba mais" }, "outputCharacterLimit": { "label": "Limite de caracteres do terminal", - "description": "Número máximo de caracteres a serem incluídos na saída do terminal ao executar comandos. Este limite tem precedência sobre o limite de linhas para evitar problemas de memória com linhas extremamente longas. Quando excedido, a saída será truncada. <0>Saiba mais" + "description": "Substitui o limite de linhas para evitar problemas de memória, impondo um limite rígido no tamanho da saída. Se excedido, mantém o início e o fim e mostra um placeholder para o Roo onde o conteúdo é pulado. <0>Saiba mais" }, "shellIntegrationTimeout": { "label": "Tempo limite de integração do shell do terminal", - "description": "Tempo máximo de espera para a inicialização da integração do shell antes de executar comandos. Para usuários com tempos de inicialização de shell longos, este valor pode precisar ser aumentado se você vir erros \"Shell Integration Unavailable\" no terminal. <0>Saiba mais" + "description": "Quanto tempo esperar pela integração do shell do VS Code antes de executar comandos. Aumente se o seu shell demorar para iniciar ou se você vir erros de 'Integração do Shell Indisponível'. <0>Saiba mais" }, "shellIntegrationDisabled": { - "label": "Desativar integração do shell do terminal", - "description": "Ative isso se os comandos do terminal não estiverem funcionando corretamente ou se você vir erros de 'Shell Integration Unavailable'. Isso usa um método mais simples para executar comandos, ignorando alguns recursos avançados do terminal. <0>Saiba mais" + "label": "Usar Terminal Inline (recomendado)", + "description": "Execute comandos no Terminal Inline (chat) para contornar perfis/integração de shell para execuções mais rápidas e confiáveis. Quando desativado, o Roo usa o terminal do VS Code com seu perfil de shell, prompts e plugins. <0>Saiba mais" }, "commandDelay": { "label": "Atraso de comando do terminal", - "description": "Atraso em milissegundos para adicionar após a execução do comando. A configuração padrão de 0 desativa completamente o atraso. Isso pode ajudar a garantir que a saída do comando seja totalmente capturada em terminais com problemas de temporização. Na maioria dos terminais, isso é implementado definindo `PROMPT_COMMAND='sleep N'` e o PowerShell adiciona `start-sleep` ao final de cada comando. Originalmente era uma solução para o bug VSCode#237208 e pode não ser necessário. <0>Saiba mais" + "description": "Adiciona uma pequena pausa após cada comando para que o terminal do VS Code possa liberar toda a saída (bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep). Use apenas se você vir a saída final faltando; caso contrário, deixe em 0. <0>Saiba mais" }, "compressProgressBar": { - "label": "Comprimir saída de barras de progresso", - "description": "Quando ativado, processa a saída do terminal com retornos de carro (\\r) para simular como um terminal real exibiria o conteúdo. Isso remove os estados intermediários das barras de progresso, mantendo apenas o estado final, o que conserva espaço de contexto para informações mais relevantes. <0>Saiba mais" + "label": "Comprimir saída da barra de progresso", + "description": "Recolhe barras de progresso/spinners para que apenas o estado final seja mantido (economiza tokens). <0>Saiba mais" }, "powershellCounter": { - "label": "Ativar solução alternativa do contador PowerShell", - "description": "Quando ativado, adiciona um contador aos comandos PowerShell para garantir a execução correta dos comandos. Isso ajuda com terminais PowerShell que podem ter problemas com a captura de saída. <0>Saiba mais" + "label": "Ativar solução alternativa do contador do PowerShell", + "description": "Ative isso quando a saída do PowerShell estiver faltando ou duplicada; ele adiciona um pequeno contador a cada comando para estabilizar a saída. Mantenha desativado se a saída já parecer correta. <0>Saiba mais" }, "zshClearEolMark": { "label": "Limpar marca de fim de linha do ZSH", - "description": "Quando ativado, limpa a marca de fim de linha do ZSH definindo PROMPT_EOL_MARK=''. Isso evita problemas com a interpretação da saída de comandos quando termina com caracteres especiais como '%'. <0>Saiba mais" + "description": "Ative isso quando vir % perdidos no final das linhas ou a análise parecer errada; ele omite a marca de fim de linha (%) do Zsh. <0>Saiba mais" }, "zshOhMy": { - "label": "Ativar integração Oh My Zsh", - "description": "Quando ativado, define ITERM_SHELL_INTEGRATION_INSTALLED=Yes para habilitar os recursos de integração do shell Oh My Zsh. A aplicação desta configuração pode exigir a reinicialização do IDE. <0>Saiba mais" + "label": "Ativar integração com o Oh My Zsh", + "description": "Ative isso quando seu tema/plugins do Oh My Zsh esperarem integração com o shell; ele define ITERM_SHELL_INTEGRATION_INSTALLED=Yes. Desative para evitar a configuração dessa variável. <0>Saiba mais" }, "zshP10k": { - "label": "Ativar integração Powerlevel10k", - "description": "Quando ativado, define POWERLEVEL9K_TERM_SHELL_INTEGRATION=true para habilitar os recursos de integração do shell Powerlevel10k. <0>Saiba mais" + "label": "Ativar integração com o Powerlevel10k", + "description": "Ative isso ao usar a integração do shell Powerlevel10k. <0>Saiba mais" }, "zdotdir": { - "label": "Ativar gerenciamento do ZDOTDIR", - "description": "Quando ativado, cria um diretório temporário para o ZDOTDIR para lidar corretamente com a integração do shell zsh. Isso garante que a integração do shell do VSCode funcione corretamente com o zsh enquanto preserva sua configuração do zsh. <0>Saiba mais" + "label": "Ativar manipulação de ZDOTDIR", + "description": "Ative isso quando a integração do shell zsh falhar ou entrar em conflito com seus dotfiles. <0>Saiba mais" }, "inheritEnv": { "label": "Herdar variáveis de ambiente", - "description": "Quando ativado, o terminal herda variáveis de ambiente do processo pai do VSCode, como configurações de integração do shell definidas no perfil do usuário. Isso alterna diretamente a configuração global do VSCode `terminal.integrated.inheritEnv`. <0>Saiba mais" + "description": "Ative isso para herdar variáveis de ambiente do processo pai do VS Code. <0>Saiba mais" } }, "advancedSettings": { @@ -747,7 +767,7 @@ "advanced": { "diff": { "label": "Ativar edição através de diffs", - "description": "Quando ativado, o Kilo Code poderá editar arquivos mais rapidamente e rejeitará automaticamente escritas completas de arquivos truncados. Funciona melhor com o modelo mais recente Claude 4 Sonnet.", + "description": "Quando ativado, o Kilo Code poderá editar arquivos mais rapidamente e rejeitará automaticamente escritas completas de arquivos truncados", "strategy": { "label": "Estratégia de diff", "options": { @@ -776,10 +796,6 @@ "name": "Usar estratégia diff unificada experimental", "description": "Ativar a estratégia diff unificada experimental. Esta estratégia pode reduzir o número de novas tentativas causadas por erros do modelo, mas pode causar comportamento inesperado ou edições incorretas. Ative apenas se compreender os riscos e estiver disposto a revisar cuidadosamente todas as alterações." }, - "SEARCH_AND_REPLACE": { - "name": "Usar ferramenta de busca e substituição experimental", - "description": "Ativar a ferramenta de busca e substituição experimental, permitindo que o Kilo Code substitua várias instâncias de um termo de busca em uma única solicitação." - }, "INSERT_BLOCK": { "name": "Usar ferramenta de inserção de conteúdo experimental", "description": "Ativar a ferramenta de inserção de conteúdo experimental, permitindo que o Kilo Code insira conteúdo em números de linha específicos sem precisar criar um diff." @@ -866,8 +882,6 @@ "modelInfo": { "supportsImages": "Suporta imagens", "noImages": "Não suporta imagens", - "supportsComputerUse": "Suporta uso do computador", - "noComputerUse": "Não suporta uso do computador", "supportsPromptCache": "Suporta cache de prompts", "noPromptCache": "Não suporta cache de prompts", "contextWindow": "Janela de Contexto:", diff --git a/webview-ui/src/i18n/locales/ru/chat.json b/webview-ui/src/i18n/locales/ru/chat.json index f5cfb74e6f4..2e4189a6f46 100644 --- a/webview-ui/src/i18n/locales/ru/chat.json +++ b/webview-ui/src/i18n/locales/ru/chat.json @@ -151,6 +151,9 @@ "initializingWarning": "Точка сохранения еще инициализируется... Если это занимает слишком много времени, вы можете отключить точки сохранения в настройках и перезапустить задачу.", "menu": { "viewDiff": "Просмотреть различия", + "more": "Больше опций", + "viewDiffFromInit": "Просмотреть все изменения", + "viewDiffWithCurrent": "Просмотреть изменения с этой точки сохранения", "restore": "Восстановить точку сохранения", "restoreFiles": "Восстановить файлы", "restoreFilesDescription": "Восстанавливает файлы вашего проекта до состояния на момент этой точки.", @@ -273,6 +276,7 @@ "toggleAriaLabel": "Переключить авто-утверждение", "disabledAriaLabel": "Авто-утверждение отключено - сначала выберите опции", "triggerLabelOff": "Авто-утверждение выкл", + "triggerLabelOffShort": "Выкл", "triggerLabel_zero": "0 авто-утвержденных", "triggerLabel_one": "1 авто-утвержден", "triggerLabel_other": "{{count}} авто-утвержденных", @@ -287,6 +291,19 @@ "selectModel": "Выберите roo/code-supernova от провайдера Roo Code Cloud в Настройках для начала работы.", "goToSettingsButton": "Перейти к Настройкам" }, + "release": { + "heading": "Новое в расширении:", + "openRouterEmbeddings": "Поддержка моделей встраивания OpenRouter", + "chutesDynamic": "Chutes теперь динамически загружает последние модели", + "queuedMessagesFix": "Исправления для потерянных сообщений в очереди" + }, + "cloudAgents": { + "heading": "Новое в облаке:", + "prFixer": "Представляем облачного агента PR Fixer в дополнение к PR Reviewer.", + "prFixerDescription": "PR Fixer от Roo Code применяет высококачественные изменения к вашим PR прямо из GitHub. Вызовите его через комментарий к PR, и он прочитает всю историю комментариев, чтобы понять контекст, соглашения и компромиссы - затем реализует правильное исправление.", + "tryPrFixerButton": "Попробовать PR Fixer" + }, + "careers": "Кроме того, мы нанимаем!", "socialLinks": "Присоединяйтесь к нам в X, Discord, или r/RooCode 🚀" }, "reasoning": { diff --git a/webview-ui/src/i18n/locales/ru/common.json b/webview-ui/src/i18n/locales/ru/common.json index 069a912154a..be459556e04 100644 --- a/webview-ui/src/i18n/locales/ru/common.json +++ b/webview-ui/src/i18n/locales/ru/common.json @@ -114,5 +114,9 @@ "months_ago": "{{count}} месяцев назад", "year_ago": "год назад", "years_ago": "{{count}} лет назад" + }, + "errors": { + "wait_checkpoint_long_time": "Ожидание инициализации контрольной точки заняло {{timeout}} секунд. Если тебе не нужна эта функция, отключи её в настройках контрольных точек.", + "init_checkpoint_fail_long_time": "Инициализация контрольной точки заняла более {{timeout}} секунд, поэтому контрольные точки отключены для этой задачи. Ты можешь отключить контрольные точки или увеличить время ожидания в настройках контрольных точек." } } diff --git a/webview-ui/src/i18n/locales/ru/mcp.json b/webview-ui/src/i18n/locales/ru/mcp.json index 53401f62d1d..4908598b843 100644 --- a/webview-ui/src/i18n/locales/ru/mcp.json +++ b/webview-ui/src/i18n/locales/ru/mcp.json @@ -25,12 +25,12 @@ "tabs": { "tools": "Инструменты", "resources": "Ресурсы", - "errors": "Ошибки" + "logs": "Логи" }, "emptyState": { "noTools": "Инструменты не найдены", "noResources": "Ресурсы не найдены", - "noErrors": "Ошибки не найдены" + "noLogs": "Логов пока нет" }, "networkTimeout": { "label": "Тайм-аут сети", diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json index ef67e87856a..0a0c2029068 100644 --- a/webview-ui/src/i18n/locales/ru/settings.json +++ b/webview-ui/src/i18n/locales/ru/settings.json @@ -57,6 +57,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "Ключ API", "vercelAiGatewayApiKeyPlaceholder": "Введите свой API-ключ Vercel AI Gateway", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "Ключ API OpenRouter", + "openRouterApiKeyPlaceholder": "Введите свой ключ API OpenRouter", "mistralProvider": "Mistral", "mistralApiKeyLabel": "Ключ API:", "mistralApiKeyPlaceholder": "Введите свой API-ключ Mistral", @@ -126,7 +129,8 @@ "vercelAiGatewayApiKeyRequired": "Требуется API-ключ Vercel AI Gateway", "ollamaBaseUrlRequired": "Требуется базовый URL Ollama", "baseUrlRequired": "Требуется базовый URL", - "modelDimensionMinValue": "Размерность модели должна быть больше 0" + "modelDimensionMinValue": "Размерность модели должна быть больше 0", + "openRouterApiKeyRequired": "Требуется ключ API OpenRouter" }, "advancedConfigLabel": "Расширенная конфигурация", "searchMinScoreLabel": "Порог оценки поиска", @@ -309,6 +313,9 @@ "getZaiApiKey": "Получить Z AI API-ключ", "zaiEntrypoint": "Точка входа Z AI", "zaiEntrypointDescription": "Пожалуйста, выберите подходящую точку входа API в зависимости от вашего местоположения. Если вы находитесь в Китае, выберите open.bigmodel.cn. В противном случае выберите api.z.ai.", + "minimaxApiKey": "MiniMax API-ключ", + "getMiniMaxApiKey": "Получить MiniMax API-ключ", + "minimaxBaseUrl": "Точка входа MiniMax", "geminiApiKey": "Gemini API-ключ", "getGroqApiKey": "Получить Groq API-ключ", "groqApiKey": "Groq API-ключ", @@ -362,7 +369,7 @@ "enablePromptCachingTitle": "Включить кэширование подсказок для повышения производительности и снижения затрат для поддерживаемых моделей.", "cacheUsageNote": "Примечание: если вы не видите использование кэша, попробуйте выбрать другую модель, а затем вернуться к нужной.", "vscodeLmModel": "Языковая модель", - "vscodeLmWarning": "Внимание: это очень экспериментальная интеграция, поддержка провайдера может отличаться. Если возникает ошибка о неподдерживаемой модели — проблема на стороне провайдера.", + "vscodeLmWarning": "Внимание: Модели, доступные через API VS Code Language Model, могут быть обёрнуты или дополнительно дообучены поставщиком, поэтому их поведение может отличаться от прямого использования той же модели у типичного провайдера или роутера. Чтобы использовать модель из выпадающего списка «Language Model», сначала переключитесь на эту модель, затем нажмите «Принять» в запросе Copilot Chat; в противном случае возможна ошибка, например 400 «The requested model is not supported».", "geminiParameters": { "urlContext": { "title": "Включить контекст URL", @@ -439,7 +446,7 @@ }, "computerUse": { "label": "Использование компьютера", - "description": "Может ли эта модель взаимодействовать с браузером? (например, Claude 3.7 Sonnet)." + "description": "Может ли эта модель взаимодействовать с браузером?" }, "promptCache": { "label": "Кэширование подсказок", @@ -481,6 +488,7 @@ }, "reasoningEffort": { "label": "Усилия по рассуждению модели", + "none": "Нет", "minimal": "Минимальный (самый быстрый)", "high": "Высокие", "medium": "Средние", @@ -553,6 +561,10 @@ } }, "checkpoints": { + "timeout": { + "label": "Таймаут инициализации контрольной точки (секунды)", + "description": "Максимальное время ожидания инициализации сервиса контрольных точек. По умолчанию 15 секунд. Диапазон: 10-60 секунд." + }, "enable": { "label": "Включить автоматические контрольные точки", "description": "Если включено, Kilo Code будет автоматически создавать контрольные точки во время выполнения задач, что упрощает просмотр изменений или возврат к предыдущим состояниям. <0>Подробнее" @@ -681,6 +693,14 @@ "label": "Максимальный общий размер изображений", "mb": "МБ", "description": "Максимальный совокупный лимит размера (в МБ) для всех изображений, обрабатываемых в одной операции read_file. При чтении нескольких изображений размер каждого изображения добавляется к общему. Если включение другого изображения превысит этот лимит, оно будет пропущено." + }, + "includeCurrentTime": { + "label": "Включить текущее время в контекст", + "description": "Если включено, текущее время и информация о часовом поясе будут включены в системную подсказку. Отключите, если модели прекращают работу из-за проблем со временем." + }, + "includeCurrentCost": { + "label": "Включить текущую стоимость в контекст", + "description": "Если включено, текущая стоимость использования API будет включена в системную подсказку. Отключите, если модели прекращают работу из-за проблем со стоимостью." } }, "terminal": { @@ -690,55 +710,55 @@ }, "advanced": { "label": "Настройки терминала: Расширенные", - "description": "Следующие параметры могут потребовать перезапуск терминала для применения настроек." + "description": "Эти настройки применяются только когда 'Использовать встроенный терминал' отключено. Они влияют только на терминал VS Code и могут потребовать перезапуска IDE." }, "outputLineLimit": { "label": "Лимит вывода терминала", - "description": "Максимальное количество строк, включаемых в вывод терминала при выполнении команд. При превышении строки из середины будут удаляться для экономии токенов. <0>Подробнее" + "description": "Сохраняет первые и последние строки и отбрасывает средние, чтобы остаться в пределах лимита. Уменьшите для экономии токенов; увеличьте, чтобы дать Roo больше деталей из середины. Roo видит заполнитель там, где контент пропущен.<0>Подробнее" }, "outputCharacterLimit": { "label": "Лимит символов терминала", - "description": "Максимальное количество символов для включения в вывод терминала при выполнении команд. Этот лимит имеет приоритет над лимитом строк, чтобы предотвратить проблемы с памятью из-за чрезвычайно длинных строк. При превышении лимита вывод будет усечен. <0>Узнать больше" + "description": "Переопределяет лимит строк для предотвращения проблем с памятью, устанавливая жёсткое ограничение на размер вывода. При превышении сохраняет начало и конец и показывает Roo заполнитель там, где контент пропущен. <0>Подробнее" }, "shellIntegrationTimeout": { - "label": "Таймаут интеграции оболочки терминала", - "description": "Максимальное время ожидания инициализации интеграции оболочки перед выполнением команд. Для пользователей с долгим стартом shell это значение можно увеличить, если появляются ошибки \"Shell Integration Unavailable\". <0>Подробнее" + "label": "Таймаут интеграции shell терминала", + "description": "Сколько ждать интеграции shell VS Code перед выполнением команд. Увеличьте, если ваш shell запускается медленно или вы видите ошибки 'Интеграция Shell Недоступна'. <0>Подробнее" }, "shellIntegrationDisabled": { - "label": "Отключить интеграцию оболочки терминала", - "description": "Включите это, если команды терминала не работают должным образом или вы видите ошибки 'Shell Integration Unavailable'. Это использует более простой метод выполнения команд, обходя некоторые расширенные функции терминала. <0>Подробнее" + "label": "Использовать встроенный терминал (рекомендуется)", + "description": "Выполняйте команды во встроенном терминале (чат), чтобы обойти профили/интеграцию shell для более быстрого и надёжного выполнения. Когда отключено, Roo использует терминал VS Code с вашим профилем shell, промптами и плагинами. <0>Подробнее" }, "commandDelay": { "label": "Задержка команды терминала", - "description": "Задержка в миллисекундах после выполнения команды. Значение по умолчанию 0 полностью отключает задержку. Это может помочь захватить весь вывод в терминалах с проблемами синхронизации. Обычно реализуется установкой `PROMPT_COMMAND='sleep N'`, в Powershell добавляется `start-sleep` в конец команды. Изначально было обходом бага VSCode #237208 и может не требоваться. <0>Подробнее" + "description": "Добавляет короткую паузу после каждой команды, чтобы терминал VS Code мог вывести весь output (bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep). Используйте только если видите отсутствующий tail output; иначе оставьте 0. <0>Подробнее" }, "compressProgressBar": { "label": "Сжимать вывод прогресс-бара", - "description": "Если включено, обрабатывает вывод терминала с возвратами каретки (\\r), имитируя отображение в реальном терминале. Промежуточные состояния прогресс-бара удаляются, остаётся только финальное, что экономит место в контексте. <0>Подробнее" + "description": "Сворачивает прогресс-бары/спиннеры, чтобы сохранялось только финальное состояние (экономит токены). <0>Подробнее" }, "powershellCounter": { "label": "Включить обходчик счётчика PowerShell", - "description": "Если включено, добавляет счётчик к командам PowerShell для корректного выполнения. Помогает при проблемах с захватом вывода в терминалах PowerShell. <0>Подробнее" + "description": "Включите, когда вывод PowerShell отсутствует или дублируется; добавляет маленький счётчик к каждой команде для стабилизации вывода. Оставьте выключенным, если вывод уже выглядит корректно. <0>Подробнее" }, "zshClearEolMark": { - "label": "Очищать метку конца строки ZSH", - "description": "Если включено, очищает PROMPT_EOL_MARK в zsh, чтобы избежать проблем с интерпретацией вывода, когда он заканчивается специальными символами типа '%'. <0>Подробнее" + "label": "Очистить метку EOL ZSH", + "description": "Включите, когда видите потерянные % в конце строк или парсинг выглядит неправильно; пропускает метку конца строки (%) Zsh. <0>Подробнее" }, "zshOhMy": { "label": "Включить интеграцию Oh My Zsh", - "description": "Если включено, устанавливает ITERM_SHELL_INTEGRATION_INSTALLED=Yes для поддержки функций интеграции Oh My Zsh. Применение этой настройки может потребовать перезапуска IDE. <0>Подробнее" + "description": "Включите, когда ваша тема/плагины Oh My Zsh ожидают интеграцию shell; устанавливает ITERM_SHELL_INTEGRATION_INSTALLED=Yes. Выключите, чтобы избежать установки этой переменной. <0>Подробнее" }, "zshP10k": { "label": "Включить интеграцию Powerlevel10k", - "description": "Если включено, устанавливает POWERLEVEL9K_TERM_SHELL_INTEGRATION=true для поддержки функций Powerlevel10k. <0>Подробнее" + "description": "Включите при использовании интеграции shell Powerlevel10k. <0>Подробнее" }, "zdotdir": { "label": "Включить обработку ZDOTDIR", - "description": "Если включено, создаёт временную директорию для ZDOTDIR для корректной интеграции zsh. Это обеспечивает корректную работу интеграции VSCode с zsh, сохраняя вашу конфигурацию. <0>Подробнее" + "description": "Включите, когда интеграция shell zsh не работает или конфликтует с вашими dotfiles. <0>Подробнее" }, "inheritEnv": { "label": "Наследовать переменные среды", - "description": "Если включено, терминал будет наследовать переменные среды от родительского процесса VSCode, такие как настройки интеграции оболочки, определённые в профиле пользователя. Напрямую переключает глобальную настройку VSCode `terminal.integrated.inheritEnv`. <0>Подробнее" + "description": "Включите для наследования переменных среды от родительского процесса VS Code. <0>Подробнее" } }, "advancedSettings": { @@ -747,7 +767,7 @@ "advanced": { "diff": { "label": "Включить редактирование через диффы", - "description": "Если включено, Kilo Code сможет быстрее редактировать файлы и автоматически отклонять усечённые полные записи. Лучше всего работает с последней моделью Claude 4 Sonnet.", + "description": "Если включено, Kilo Code сможет быстрее редактировать файлы и автоматически отклонять усечённые полные записи", "strategy": { "label": "Стратегия диффа", "options": { @@ -776,10 +796,6 @@ "name": "Использовать экспериментальную стратегию унифицированного диффа", "description": "Включает экспериментальную стратегию унифицированного диффа. Может уменьшить количество повторных попыток из-за ошибок модели, но может привести к неожиданному поведению или неверным правкам. Включайте только если готовы внимательно проверять все изменения." }, - "SEARCH_AND_REPLACE": { - "name": "Использовать экспериментальный инструмент поиска и замены", - "description": "Включает экспериментальный инструмент поиска и замены, позволяя Kilo Code заменять несколько вхождений за один запрос." - }, "INSERT_BLOCK": { "name": "Использовать экспериментальный инструмент вставки контента", "description": "Включает экспериментальный инструмент вставки контента, позволяя Kilo Code вставлять контент по номеру строки без создания диффа." @@ -866,8 +882,6 @@ "modelInfo": { "supportsImages": "Поддерживает изображения", "noImages": "Не поддерживает изображения", - "supportsComputerUse": "Поддерживает использование компьютера", - "noComputerUse": "Не поддерживает использование компьютера", "supportsPromptCache": "Поддерживает кэширование подсказок", "noPromptCache": "Не поддерживает кэширование подсказок", "contextWindow": "Контекстное окно:", diff --git a/webview-ui/src/i18n/locales/th/chat.json b/webview-ui/src/i18n/locales/th/chat.json index 638cad5f554..874cc1c1a59 100644 --- a/webview-ui/src/i18n/locales/th/chat.json +++ b/webview-ui/src/i18n/locales/th/chat.json @@ -163,6 +163,8 @@ "initializingWarning": "ยังคงเริ่มต้นจุดตรวจ... หากใช้เวลานานเกินไป คุณสามารถปิดจุดตรวจในการตั้งค่าและรีสตาร์ทงานของคุณ", "menu": { "viewDiff": "ดูความแตกต่าง", + "viewDiffFromInit": "ดูการเปลี่ยนแปลงทั้งหมด", + "viewDiffWithCurrent": "ดูการเปลี่ยนแปลงตั้งแต่จุดตรวจนี้", "restore": "คืนค่าจุดตรวจ", "restoreFiles": "คืนค่าไฟล์", "restoreFilesDescription": "คืนค่าไฟล์โปรเจ็กต์ของคุณกลับไปยังสแนปช็อตที่ถ่ายไว้ ณ จุดนี้", @@ -170,7 +172,8 @@ "confirm": "ยืนยัน", "cancel": "ยกเลิก", "cannotUndo": "ไม่สามารถยกเลิกการดำเนินการนี้ได้", - "restoreFilesAndTaskDescription": "คืนค่าไฟล์โปรเจ็กต์ของคุณกลับไปยังสแนปช็อตที่ถ่ายไว้ ณ จุดนี้และลบข้อความทั้งหมดหลังจากจุดนี้" + "restoreFilesAndTaskDescription": "คืนค่าไฟล์โปรเจ็กต์ของคุณกลับไปยังสแนปช็อตที่ถ่ายไว้ ณ จุดนี้และลบข้อความทั้งหมดหลังจากจุดนี้", + "more": "ตัวเลือกเพิ่มเติม" }, "current": "ปัจจุบัน" }, @@ -302,10 +305,11 @@ "triggerLabel_one": "อนุมัติอัตโนมัติ: 1", "triggerLabel_other": "อนุมัติอัตโนมัติ: {{count}}", "triggerLabelAll": "อนุมัติอัตโนมัติ: YOLO", - "triggerLabelOff": "ปิดการอนุมัติอัตโนมัติ" + "triggerLabelOff": "ปิดการอนุมัติอัตโนมัติ", + "triggerLabelOffShort": "ปิด" }, "announcement": { - "title": "🎉 เปิดตัว Kilo Code {{version}}", + "title": "🎉 เปิดตัว Roo Code {{version}}", "description": "Kilo Code {{version}} นำเสนอฟีเจอร์ใหม่ที่สำคัญและการปรับปรุงตามคำติชมของคุณ", "whatsNew": "มีอะไรใหม่", "feature1": "เปิดตัว Kilo Code Marketplace: ตลาดกลางเปิดให้บริการแล้ว! ค้นพบและติดตั้งโหมดและ MCP ได้ง่ายกว่าที่เคย", @@ -320,9 +324,23 @@ "feature": "โมเดลแฝงฟรีในเวลาจำกัด - โมเดลการให้เหตุผลที่รวดเร็วซึ่งเชี่ยวชาญในการเขียนโค้ดแบบ agentic ด้วย context window 262k พร้อมใช้งานผ่าน Roo Code Cloud", "note": "(หมายเหตุ: พรอมต์และผลลัพธ์จะถูกบันทึกโดยผู้สร้างโมเดลเพื่อปรับปรุงโมเดล)", "connectButton": "เชื่อมต่อกับ Roo Code Cloud", - "selectModel": "เลือก roo/sonic จากผู้ให้บริการ Roo Code Cloud ใน
    การตั้งค่า เพื่อเริ่มต้น", + "selectModel": "เลือก roo/code-supernova จากผู้ให้บริการ Roo Code Cloud ในการตั้งค่า เพื่อเริ่มต้น", "goToSettingsButton": "ไปที่การตั้งค่า" - } + }, + "release": { + "heading": "ใหม่ในส่วนขยาย:", + "openRouterEmbeddings": "รองรับโมเดล embedding ของ OpenRouter", + "chutesDynamic": "Chutes โหลดโมเดลล่าสุดแบบไดนามิก", + "queuedMessagesFix": "แก้ไขปัญหาข้อความในคิวที่สูญหาย" + }, + "cloudAgents": { + "heading": "ใหม่ใน Cloud:", + "prFixer": "ขอแนะนำ PR Fixer cloud agent เพื่อเสริม PR Reviewer", + "prFixerDescription": "PR Fixer ใช้การเปลี่ยนแปลงคุณภาพสูงกับ PR ของคุณ ตรงจาก GitHub เรียกใช้ผ่านความคิดเห็น PR และจะอ่านประวัติความคิดเห็นทั้งหมดเพื่อเข้าใจบริบท ข้อตกลง และการแลกเปลี่ยน - จากนั้นดำเนินการแก้ไขที่ถูกต้อง", + "tryPrFixerButton": "ลอง PR Fixer" + }, + "careers": "นอกจากนี้ เรากำลังรับสมัครงาน!", + "socialLinks": "เข้าร่วมกับเราบน X, Discord หรือ r/RooCode 🚀" }, "reasoning": { "thinking": "กำลังคิด", diff --git a/webview-ui/src/i18n/locales/th/common.json b/webview-ui/src/i18n/locales/th/common.json index 8d7a6375d51..cc03ba796c0 100644 --- a/webview-ui/src/i18n/locales/th/common.json +++ b/webview-ui/src/i18n/locales/th/common.json @@ -114,5 +114,9 @@ "months_ago": "{{count}} เดือนที่แล้ว", "year_ago": "ปีที่แล้ว", "years_ago": "{{count}} ปีที่แล้ว" + }, + "errors": { + "wait_checkpoint_long_time": "รอการเริ่มต้นจุดตรวจสอบ {{timeout}} วินาทีแล้ว หากคุณไม่ต้องการคุณสมบัติจุดตรวจสอบ โปรดปิดการใช้งานในการตั้งค่าจุดตรวจสอบ", + "init_checkpoint_fail_long_time": "การเริ่มต้นจุดตรวจสอบใช้เวลามากกว่า {{timeout}} วินาทีแล้ว จึงปิดการใช้งานจุดตรวจสอบสำหรับงานนี้ คุณสามารถปิดการใช้งานจุดตรวจสอบหรือขยายเวลารอได้ในการตั้งค่าจุดตรวจสอบ" } } diff --git a/webview-ui/src/i18n/locales/th/mcp.json b/webview-ui/src/i18n/locales/th/mcp.json index 1ed1aab06e9..efadf63f456 100644 --- a/webview-ui/src/i18n/locales/th/mcp.json +++ b/webview-ui/src/i18n/locales/th/mcp.json @@ -26,12 +26,12 @@ "tabs": { "tools": "เครื่องมือ", "resources": "ทรัพยากร", - "errors": "ข้อผิดพลาด" + "logs": "บันทึก" }, "emptyState": { "noTools": "ไม่พบเครื่องมือ", "noResources": "ไม่พบทรัพยากร", - "noErrors": "ไม่พบข้อผิดพลาด" + "noLogs": "ยังไม่มีบันทึก" }, "networkTimeout": { "label": "Network Timeout", diff --git a/webview-ui/src/i18n/locales/th/settings.json b/webview-ui/src/i18n/locales/th/settings.json index 26cc8df69c7..29fbd179d0e 100644 --- a/webview-ui/src/i18n/locales/th/settings.json +++ b/webview-ui/src/i18n/locales/th/settings.json @@ -72,6 +72,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "คีย์ API", "vercelAiGatewayApiKeyPlaceholder": "ป้อนคีย์ API Vercel AI Gateway ของคุณ", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "คีย์ API OpenRouter", + "openRouterApiKeyPlaceholder": "ป้อนคีย์ API OpenRouter ของคุณ", "openaiCompatibleProvider": "เข้ากันได้กับ OpenAI", "openAiKeyLabel": "คีย์ API OpenAI", "openAiKeyPlaceholder": "ป้อนคีย์ API OpenAI ของคุณ", @@ -137,6 +140,7 @@ "modelDimensionRequired": "จำเป็นต้องมีมิติโมเดล", "geminiApiKeyRequired": "จำเป็นต้องมีคีย์ API Gemini", "mistralApiKeyRequired": "จำเป็นต้องมีคีย์ API Mistral", + "openRouterApiKeyRequired": "จำเป็นต้องมีคีย์ API OpenRouter", "ollamaBaseUrlRequired": "จำเป็นต้องมี URL พื้นฐาน Ollama", "baseUrlRequired": "จำเป็นต้องมี URL พื้นฐาน", "modelDimensionMinValue": "มิติโมเดลต้องมากกว่า 0", @@ -309,6 +313,9 @@ "moonshotApiKey": "คีย์ API ของ Moonshot", "getMoonshotApiKey": "รับคีย์ API ของ Moonshot", "moonshotBaseUrl": "จุดเข้าใช้งาน Moonshot", + "minimaxApiKey": "คีย์ API ของ MiniMax", + "getMiniMaxApiKey": "รับคีย์ API ของ MiniMax", + "minimaxBaseUrl": "จุดเข้าใช้งาน MiniMax", "zaiEntrypoint": "จุดเข้าใช้งาน Z AI", "zaiEntrypointDescription": "โปรดเลือกจุดเข้าใช้งาน API ที่เหมาะสมตามตำแหน่งของคุณ หากคุณอยู่ในจีน ให้เลือก open.bigmodel.cn มิฉะนั้น ให้เลือก api.z.ai", "geminiApiKey": "คีย์ API ของ Gemini", @@ -484,10 +491,11 @@ }, "reasoningEffort": { "label": "ความพยายามในการให้เหตุผลของโมเดล", + "none": "ไม่มี", "minimal": "น้อยที่สุด (เร็วที่สุด)", - "high": "สูง", + "low": "ต่ำ", "medium": "ปานกลาง", - "low": "ต่ำ" + "high": "สูง" }, "verbosity": { "label": "ระดับความละเอียดของผลลัพธ์", @@ -584,6 +592,10 @@ } }, "checkpoints": { + "timeout": { + "label": "หมดเวลาการเริ่มต้นจุดตรวจสอบ (วินาที)", + "description": "เวลาสูงสุดที่จะรอการเริ่มต้นบริการจุดตรวจสอบ ค่าเริ่มต้นคือ 15 วินาที ช่วง: 10-60 วินาที" + }, "enable": { "label": "เปิดใช้งานจุดตรวจสอบอัตโนมัติ", "description": "เมื่อเปิดใช้งาน Kilo Code จะสร้างจุดตรวจสอบโดยอัตโนมัติระหว่างการทำงาน ทำให้ง่ายต่อการตรวจสอบการเปลี่ยนแปลงหรือย้อนกลับไปยังสถานะก่อนหน้า <0>เรียนรู้เพิ่มเติม" @@ -703,19 +715,15 @@ "lines": "บรรทัด", "always_full_read": "อ่านทั้งไฟล์เสมอ" }, - "imageFileSize": { + "maxImageFileSize": { "label": "ขนาดไฟล์รูปภาพสูงสุด", - "description": "ขนาดไฟล์รูปภาพสูงสุดเป็น MB ที่จะรวมในบริบท รูปภาพที่ใหญ่กว่านี้จะถูกปฏิเสธ", - "mb": "MB" + "mb": "MB", + "description": "ขนาดสูงสุด (เป็น MB) สำหรับไฟล์รูปภาพที่สามารถประมวลผลโดยเครื่องมืออ่านไฟล์" }, - "condensingThreshold": { - "label": "เกณฑ์การกระตุ้นการย่อ", - "selectProfile": "กำหนดค่าเกณฑ์สำหรับโปรไฟล์", - "defaultProfile": "ค่าเริ่มต้นสากล (ทุกโปรไฟล์)", - "defaultDescription": "เมื่อบริบทถึงเปอร์เซ็นต์นี้ จะถูกย่อโดยอัตโนมัติสำหรับทุกโปรไฟล์ เว้นแต่จะมีการตั้งค่าที่กำหนดเอง", - "profileDescription": "เกณฑ์ที่กำหนดเองสำหรับโปรไฟล์นี้เท่านั้น (แทนที่ค่าเริ่มต้นสากล)", - "inheritDescription": "โปรไฟล์นี้สืบทอดเกณฑ์เริ่มต้นสากล ({{threshold}}%)", - "usesGlobal": "(ใช้ค่าสากล {{threshold}}%)" + "maxTotalImageSize": { + "label": "ขนาดรูปภาพรวมสูงสุด", + "mb": "MB", + "description": "ขีดจำกัดขนาดสะสมสูงสุด (เป็น MB) สำหรับรูปภาพทั้งหมดที่ประมวลผลในการดำเนินการ read_file เดียว เมื่ออ่านรูปภาพหลายรูป ขนาดของแต่ละรูปจะถูกเพิ่มเข้าไปในผลรวม หากการรวมรูปภาพอื่นจะเกินขีดจำกัดนี้ รูปภาพนั้นจะถูกข้าม" }, "diagnostics": { "includeMessages": { @@ -730,18 +738,25 @@ }, "delayAfterWrite": { "label": "หน่วงเวลาหลังการเขียนเพื่อให้การวินิจฉัยตรวจจับปัญหาที่อาจเกิดขึ้น", - "description": "เวลาที่รอหลังจากการเขียนไฟล์ก่อนที่จะรวบรวมการวินิจฉัย (เป็นมิลลิวินาที) ค่าที่สูงขึ้นอาจปรับปรุงความแม่นยำแต่ทำให้การตรวจจับข้อผิดพลาดช้าลง" + "description": "เวลาที่รอหลังจากการเขียนไฟล์ก่อนดำเนินการต่อ ทำให้เครื่องมือวินิจฉัยสามารถประมวลผลการเปลี่ยนแปลงและตรวจจับปัญหาได้" } }, - "maxImageFileSize": { - "label": "ขนาดไฟล์รูปภาพสูงสุด", - "mb": "MB", - "description": "ขนาดสูงสุด (เป็น MB) สำหรับไฟล์รูปภาพที่สามารถรวมในคำขอได้ ไฟล์ที่ใหญ่กว่าจะถูกปรับขนาดโดยอัตโนมัติ" + "condensingThreshold": { + "label": "เกณฑ์การกระตุ้นการย่อ", + "selectProfile": "กำหนดค่าเกณฑ์สำหรับโปรไฟล์", + "defaultProfile": "ค่าเริ่มต้นสากล (ทุกโปรไฟล์)", + "defaultDescription": "เมื่อบริบทถึงเปอร์เซ็นต์นี้ จะถูกย่อโดยอัตโนมัติสำหรับทุกโปรไฟล์ เว้นแต่จะมีการตั้งค่าที่กำหนดเอง", + "profileDescription": "เกณฑ์ที่กำหนดเองสำหรับโปรไฟล์นี้เท่านั้น (แทนที่ค่าเริ่มต้นสากล)", + "inheritDescription": "โปรไฟล์นี้สืบทอดเกณฑ์เริ่มต้นสากล ({{threshold}}%)", + "usesGlobal": "(ใช้ค่าสากล {{threshold}}%)" }, - "maxTotalImageSize": { - "label": "ขนาดรูปภาพรวมสูงสุด", - "mb": "MB", - "description": "ขีดจำกัดขนาดสะสมสูงสุด (เป็น MB) สำหรับรูปภาพทั้งหมดในคำขอเดียว เมื่อเกินขีดจำกัด รูปภาพจะถูกปรับขนาดให้พอดีกับขีดจำกัดนี้" + "includeCurrentTime": { + "label": "รวมเวลาปัจจุบันในบริบท", + "description": "เมื่อเปิดใช้งาน เวลาปัจจุบันและข้อมูลเขตเวลาจะถูกรวมในพรอมต์ระบบ ปิดใช้งานหากโมเดลหยุดทำงานเนื่องจากข้อกังวลเรื่องเวลา" + }, + "includeCurrentCost": { + "label": "รวมค่าใช้จ่ายปัจจุบันในบริบท", + "description": "เมื่อเปิดใช้งาน ค่าใช้จ่ายการใช้งาน API ปัจจุบันจะถูกรวมในพรอมต์ระบบ ปิดใช้งานหากโมเดลหยุดทำงานเนื่องจากข้อกังวลเรื่องค่าใช้จ่าย" } }, "terminal": { @@ -751,55 +766,55 @@ }, "advanced": { "label": "การตั้งค่าเทอร์มินัล: ขั้นสูง", - "description": "ตัวเลือกต่อไปนี้อาจต้องรีสตาร์ทเทอร์มินัลเพื่อใช้การตั้งค่า" + "description": "การตั้งค่าเหล่านี้ใช้เฉพาะเมื่อ 'ใช้เทอร์มินัลแบบอินไลน์' ถูกปิดใช้งาน จะส่งผลต่อเทอร์มินัล VS Code เท่านั้นและอาจต้องรีสตาร์ท IDE" }, "outputLineLimit": { "label": "ขีดจำกัดเอาต์พุตเทอร์มินัล", - "description": "จำนวนบรรทัดสูงสุดที่จะรวมไว้ในเอาต์พุตเทอร์มินัลเมื่อรันคำสั่ง เมื่อเกินจำนวนบรรทัดจะถูกลบออกจากตรงกลางเพื่อประหยัดโทเค็น <0>เรียนรู้เพิ่มเติม" + "description": "เก็บบรรทัดแรกและบรรทัดสุดท้ายและลบตรงกลางเพื่อให้อยู่ภายใต้ขีดจำกัด ลดลงเพื่อประหยัดโทเค็น เพิ่มขึ้นเพื่อให้ Roo มีรายละเอียดตรงกลางมากขึ้น Roo จะเห็นตัวยึดตำแหน่งที่เนื้อหาถูกข้าม<0>เรียนรู้เพิ่มเติม" }, "outputCharacterLimit": { "label": "ขีดจำกัดอักขระเทอร์มินัล", - "description": "จำนวนอักขระสูงสุดที่จะรวมไว้ในเอาต์พุตเทอร์มินัลเมื่อรันคำสั่ง ขีดจำกัดนี้มีความสำคัญเหนือขีดจำกัดบรรทัดเพื่อป้องกันปัญหาหน่วยความจำจากบรรทัดที่ยาวมาก เมื่อเกินขีดจำกัด เอาต์พุตจะถูกตัดทอน <0>เรียนรู้เพิ่มเติม" + "description": "แทนที่ขีดจำกัดบรรทัดเพื่อป้องกันปัญหาหน่วยความจำโดยการบังคับใช้ขีดจำกัดสูงสุดของขนาดเอาต์พุต หากเกิน จะเก็บส่วนต้นและส่วนท้ายและแสดงตัวยึดตำแหน่งให้ Roo ที่เนื้อหาถูกข้าม <0>เรียนรู้เพิ่มเติม" }, "shellIntegrationTimeout": { "label": "หมดเวลาการรวมเชลล์เทอร์มินัล", - "description": "เวลารอสูงสุดสำหรับการรวมเชลล์เพื่อเริ่มต้นก่อนรันคำสั่ง สำหรับผู้ใช้ที่มีเวลาเริ่มต้นเชลล์นาน อาจต้องเพิ่มค่านี้หากคุณเห็นข้อผิดพลาด \"การรวมเชลล์ไม่พร้อมใช้งาน\" ในเทอร์มินัล <0>เรียนรู้เพิ่มเติม" + "description": "ระยะเวลารอการรวมเชลล์ของ VS Code ก่อนรันคำสั่ง เพิ่มขึ้นหากเชลล์ของคุณเริ่มต้นช้าหรือคุณเห็นข้อผิดพลาด 'Shell Integration Unavailable' <0>เรียนรู้เพิ่มเติม" }, "shellIntegrationDisabled": { - "label": "ปิดใช้งานการรวมเชลล์เทอร์มินัล", - "description": "เปิดใช้งานนี้หากคำสั่งเทอร์มินัลทำงานไม่ถูกต้อง หรือคุณเห็นข้อผิดพลาด 'การรวมเชลล์ไม่พร้อมใช้งาน' ซึ่งจะใช้วิธีที่ง่ายกว่าในการรันคำสั่ง โดยข้ามคุณสมบัติเทอร์มินัลขั้นสูงบางอย่าง <0>เรียนรู้เพิ่มเติม" + "label": "ใช้เทอร์มินัลแบบอินไลน์ (แนะนำ)", + "description": "รันคำสั่งในเทอร์มินัลแบบอินไลน์ (แชท) เพื่อข้ามโปรไฟล์/การรวมเชลล์สำหรับการรันที่เร็วและเชื่อถือได้มากขึ้น เมื่อปิดใช้งาน Roo จะใช้เทอร์มินัล VS Code พร้อมโปรไฟล์เชลล์ พรอมต์ และปลั๊กอินของคุณ <0>เรียนรู้เพิ่มเติม" }, "commandDelay": { "label": "หน่วงเวลาคำสั่งเทอร์มินัล", - "description": "หน่วงเวลาเป็นมิลลิวินาทีที่จะเพิ่มหลังการรันคำสั่ง การตั้งค่าเริ่มต้นที่ 0 จะปิดใช้งานการหน่วงเวลาโดยสมบูรณ์ ซึ่งจะช่วยให้แน่ใจว่าเอาต์พุตคำสั่งถูกจับภาพได้อย่างสมบูรณ์ในเทอร์มินัลที่มีปัญหาเรื่องเวลา ในเทอร์มินัลส่วนใหญ่จะใช้โดยการตั้งค่า `PROMPT_COMMAND='sleep N'` และ Powershell จะเพิ่ม `start-sleep` ต่อท้ายแต่ละคำสั่ง เดิมทีเป็นวิธีแก้ปัญหาสำหรับข้อผิดพลาด VSCode#237208 และอาจไม่จำเป็นต้องใช้อีกต่อไป <0>เรียนรู้เพิ่มเติม" + "description": "เพิ่มการหยุดชั่วคราวสั้นๆ หลังแต่ละคำสั่งเพื่อให้เทอร์มินัล VS Code สามารถล้างเอาต์พุตทั้งหมด (bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep) ใช้เฉพาะเมื่อคุณเห็นเอาต์พุตส่วนท้ายหายไป มิฉะนั้นให้เว้นไว้ที่ 0 <0>เรียนรู้เพิ่มเติม" }, "compressProgressBar": { "label": "บีบอัดเอาต์พุตแถบความคืบหน้า", - "description": "เมื่อเปิดใช้งาน จะประมวลผลเอาต์พุตเทอร์มินัลด้วยการคืนแคร่ตลับหมึก (\\r) เพื่อจำลองวิธีที่เทอร์มินัลจริงจะแสดงเนื้อหา ซึ่งจะลบสถานะแถบความคืบหน้ากลางออก เหลือเพียงสถานะสุดท้าย ซึ่งจะช่วยประหยัดพื้นที่บริบทสำหรับข้อมูลที่เกี่ยวข้องมากขึ้น <0>เรียนรู้เพิ่มเติม" + "description": "ย่อแถบความคืบหน้า/สปินเนอร์เพื่อให้เก็บเฉพาะสถานะสุดท้าย (ประหยัดโทเค็น) <0>เรียนรู้เพิ่มเติม" }, "powershellCounter": { "label": "เปิดใช้งานวิธีแก้ปัญหาตัวนับ PowerShell", - "description": "เมื่อเปิดใช้งาน จะเพิ่มตัวนับในคำสั่ง PowerShell เพื่อให้แน่ใจว่าการรันคำสั่งถูกต้อง ซึ่งจะช่วยแก้ปัญหาเทอร์มินัล PowerShell ที่อาจมีปัญหาเรื่องการจับภาพเอาต์พุตคำสั่ง <0>เรียนรู้เพิ่มเติม" + "description": "เปิดใช้งานเมื่อเอาต์พุต PowerShell หายไปหรือซ้ำกัน จะเพิ่มตัวนับเล็กๆ ต่อท้ายแต่ละคำสั่งเพื่อทำให้เอาต์พุตเสถียร ปิดใช้งานหากเอาต์พุตดูถูกต้องอยู่แล้ว <0>เรียนรู้เพิ่มเติม" }, "zshClearEolMark": { "label": "ล้างเครื่องหมาย EOL ของ ZSH", - "description": "เมื่อเปิดใช้งาน จะล้างเครื่องหมายสิ้นสุดบรรทัดของ ZSH โดยการตั้งค่า PROMPT_EOL_MARK='' ซึ่งจะป้องกันปัญหาการตีความเอาต์พุตคำสั่งเมื่อเอาต์พุตลงท้ายด้วยอักขระพิเศษเช่น '%' <0>เรียนรู้เพิ่มเติม" + "description": "เปิดใช้งานเมื่อคุณเห็น % ที่หลงเหลืออยู่ที่ท้ายบรรทัดหรือการแยกวิเคราะห์ดูผิดพลาด จะละเว้นเครื่องหมายสิ้นสุดบรรทัดของ Zsh (%) <0>เรียนรู้เพิ่มเติม" }, "zshOhMy": { "label": "เปิดใช้งานการรวม Oh My Zsh", - "description": "เมื่อเปิดใช้งาน จะตั้งค่า ITERM_SHELL_INTEGRATION_INSTALLED=Yes เพื่อเปิดใช้งานคุณสมบัติการรวมเชลล์ Oh My Zsh การใช้การตั้งค่านี้อาจต้องรีสตาร์ท IDE <0>เรียนรู้เพิ่มเติม" + "description": "เปิดใช้งานเมื่อธีม/ปลั๊กอิน Oh My Zsh ของคุณคาดหวังการรวมเชลล์ จะตั้งค่า ITERM_SHELL_INTEGRATION_INSTALLED=Yes ปิดใช้งานเพื่อหลีกเลี่ยงการตั้งค่าตัวแปรนั้น <0>เรียนรู้เพิ่มเติม" }, "zshP10k": { "label": "เปิดใช้งานการรวม Powerlevel10k", - "description": "เมื่อเปิดใช้งาน จะตั้งค่า POWERLEVEL9K_TERM_SHELL_INTEGRATION=true เพื่อเปิดใช้งานคุณสมบัติการรวมเชลล์ Powerlevel10k <0>เรียนรู้เพิ่มเติม" + "description": "เปิดใช้งานเมื่อใช้การรวมเชลล์ Powerlevel10k <0>เรียนรู้เพิ่มเติม" }, "zdotdir": { "label": "เปิดใช้งานการจัดการ ZDOTDIR", - "description": "เมื่อเปิดใช้งาน จะสร้างไดเรกทอรีชั่วคราวสำหรับ ZDOTDIR เพื่อจัดการการรวมเชลล์ zsh อย่างถูกต้อง ซึ่งจะช่วยให้แน่ใจว่าการรวมเชลล์ VSCode ทำงานได้อย่างถูกต้องกับ zsh ในขณะที่ยังคงการกำหนดค่า zsh ของคุณไว้ <0>เรียนรู้เพิ่มเติม" + "description": "เปิดใช้งานเมื่อการรวมเชลล์ zsh ล้มเหลวหรือขัดแย้งกับ dotfiles ของคุณ <0>เรียนรู้เพิ่มเติม" }, "inheritEnv": { "label": "สืบทอดตัวแปรสภาพแวดล้อม", - "description": "เมื่อเปิดใช้งาน เทอร์มินัลจะสืบทอดตัวแปรสภาพแวดล้อมจากกระบวนการหลักของ VSCode เช่น การตั้งค่าการรวมเชลล์ที่กำหนดโดยโปรไฟล์ผู้ใช้ ซึ่งจะสลับการตั้งค่าส่วนกลางของ VSCode `terminal.integrated.inheritEnv` โดยตรง <0>เรียนรู้เพิ่มเติม" + "description": "เปิดใช้งานเพื่อสืบทอดตัวแปรสภาพแวดล้อมจากกระบวนการหลักของ VS Code <0>เรียนรู้เพิ่มเติม" } }, "advancedSettings": { @@ -808,7 +823,7 @@ "advanced": { "diff": { "label": "เปิดใช้งานการแก้ไขผ่าน diffs", - "description": "เมื่อเปิดใช้งาน Kilo Code จะสามารถแก้ไขไฟล์ได้เร็วขึ้นและจะปฏิเสธการเขียนไฟล์เต็มที่ถูกตัดทอนโดยอัตโนมัติ ทำงานได้ดีที่สุดกับโมเดล Claude 4 Sonnet ล่าสุด", + "description": "เมื่อเปิดใช้งาน Kilo Code จะสามารถแก้ไขไฟล์ได้เร็วขึ้นและจะปฏิเสธการเขียนไฟล์เต็มที่ถูกตัดทอนโดยอัตโนมัติ", "strategy": { "label": "กลยุทธ์ Diff", "options": { @@ -837,10 +852,6 @@ "name": "ใช้กลยุทธ์ unified diff แบบทดลอง", "description": "เปิดใช้งานกลยุทธ์ unified diff แบบทดลอง กลยุทธ์นี้อาจลดจำนวนการลองใหม่ที่เกิดจากข้อผิดพลาดของโมเดล แต่อาจทำให้เกิดพฤติกรรมที่ไม่คาดคิดหรือการแก้ไขที่ไม่ถูกต้อง เปิดใช้งานเฉพาะเมื่อคุณเข้าใจความเสี่ยงและยินดีที่จะตรวจสอบการเปลี่ยนแปลงทั้งหมดอย่างระมัดระวัง" }, - "SEARCH_AND_REPLACE": { - "name": "ใช้เครื่องมือค้นหาและแทนที่แบบทดลอง", - "description": "เปิดใช้งานเครื่องมือค้นหาและแทนที่แบบทดลอง ทำให้ Kilo Code สามารถแทนที่คำค้นหาหลายรายการในคำขอเดียว" - }, "INSERT_BLOCK": { "name": "ใช้เครื่องมือแทรกเนื้อหาแบบทดลอง", "description": "เปิดใช้งานเครื่องมือแทรกเนื้อหาแบบทดลอง ทำให้ Kilo Code สามารถแทรกเนื้อหาที่หมายเลขบรรทัดเฉพาะโดยไม่ต้องสร้าง diff" diff --git a/webview-ui/src/i18n/locales/tr/chat.json b/webview-ui/src/i18n/locales/tr/chat.json index 6f27a24a196..89be551fcfe 100644 --- a/webview-ui/src/i18n/locales/tr/chat.json +++ b/webview-ui/src/i18n/locales/tr/chat.json @@ -156,6 +156,9 @@ "initializingWarning": "Kontrol noktası hala başlatılıyor... Bu çok uzun sürerse, ayarlar bölümünden kontrol noktalarını devre dışı bırakabilir ve görevinizi yeniden başlatabilirsiniz.", "menu": { "viewDiff": "Farkları Görüntüle", + "more": "Daha fazla seçenek", + "viewDiffFromInit": "Tüm Değişiklikleri Görüntüle", + "viewDiffWithCurrent": "Bu Kontrol Noktasından Bu Yana Değişiklikleri Görüntüle", "restore": "Kontrol Noktasını Geri Yükle", "restoreFiles": "Dosyaları Geri Yükle", "restoreFilesDescription": "Projenizin dosyalarını bu noktada alınan bir anlık görüntüye geri yükler.", @@ -273,6 +276,7 @@ "toggleAriaLabel": "Otomatik onayı aç/kapat", "disabledAriaLabel": "Otomatik onay devre dışı - önce seçenekleri seçin", "triggerLabelOff": "Otomatik onay kapalı", + "triggerLabelOffShort": "Kapalı", "triggerLabel_zero": "0 otomatik onaylandı", "triggerLabel_one": "1 otomatik onaylandı", "triggerLabel_other": "{{count}} otomatik onaylandı", @@ -302,6 +306,19 @@ "selectModel": "Başlamak için Ayarlar'da Roo Code Cloud sağlayıcısından roo/code-supernova'yı seçin.", "goToSettingsButton": "Ayarlar'a Git" }, + "release": { + "heading": "Uzantıdaki yenilikler:", + "openRouterEmbeddings": "OpenRouter gömme modelleri desteği", + "chutesDynamic": "Chutes artık en son modelleri dinamik olarak yüklüyor", + "queuedMessagesFix": "Sıradaki mesajların kaybolması için düzeltmeler" + }, + "cloudAgents": { + "heading": "Cloud'daki yenilikler:", + "prFixer": "PR Reviewer'ı tamamlamak için PR Fixer bulut ajanını tanıtıyoruz.", + "prFixerDescription": "Roo Code'un PR Fixer'ı, PR'larınıza doğrudan GitHub'dan yüksek kaliteli değişiklikler uygular. Bir PR yorumu aracılığıyla çağırın ve bağlamı, anlaşmaları ve ödünleri anlamak için tüm yorum geçmişini okuyacak - sonra doğru düzeltmeyi uygulayacaktır.", + "tryPrFixerButton": "PR Fixer'ı Dene" + }, + "careers": "Ayrıca, işe alım yapıyoruz!", "socialLinks": "Bize X, Discord, veya r/RooCode'da katılın 🚀" }, "browser": { diff --git a/webview-ui/src/i18n/locales/tr/common.json b/webview-ui/src/i18n/locales/tr/common.json index e600ddf2719..77576759928 100644 --- a/webview-ui/src/i18n/locales/tr/common.json +++ b/webview-ui/src/i18n/locales/tr/common.json @@ -114,5 +114,9 @@ "months_ago": "{{count}} ay önce", "year_ago": "bir yıl önce", "years_ago": "{{count}} yıl önce" + }, + "errors": { + "wait_checkpoint_long_time": "{{timeout}} saniye boyunca kontrol noktası başlatılması beklendi. Bu özelliğe ihtiyacın yoksa kontrol noktası ayarlarından kapatabilirsin.", + "init_checkpoint_fail_long_time": "Kontrol noktası başlatılması {{timeout}} saniyeden fazla sürdü, bu yüzden bu görev için kontrol noktaları devre dışı bırakıldı. Kontrol noktalarını kapatabilir veya kontrol noktası ayarlarından bekleme süresini artırabilirsin." } } diff --git a/webview-ui/src/i18n/locales/tr/mcp.json b/webview-ui/src/i18n/locales/tr/mcp.json index 54380fecba0..5f1f7cd4940 100644 --- a/webview-ui/src/i18n/locales/tr/mcp.json +++ b/webview-ui/src/i18n/locales/tr/mcp.json @@ -25,12 +25,12 @@ "tabs": { "tools": "Araçlar", "resources": "Kaynaklar", - "errors": "Hatalar" + "logs": "Günlükler" }, "emptyState": { "noTools": "Araç bulunamadı", "noResources": "Kaynak bulunamadı", - "noErrors": "Hata bulunamadı" + "noLogs": "Henüz günlük yok" }, "networkTimeout": { "label": "Ağ Zaman Aşımı", diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json index 9f3474bba0c..bc76b597c2e 100644 --- a/webview-ui/src/i18n/locales/tr/settings.json +++ b/webview-ui/src/i18n/locales/tr/settings.json @@ -61,6 +61,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "API Anahtarı", "vercelAiGatewayApiKeyPlaceholder": "Vercel AI Gateway API anahtarınızı girin", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "OpenRouter API Anahtarı", + "openRouterApiKeyPlaceholder": "OpenRouter API anahtarınızı girin", "openaiCompatibleProvider": "OpenAI Uyumlu", "openAiKeyLabel": "OpenAI API Anahtarı", "openAiKeyPlaceholder": "OpenAI API anahtarınızı girin", @@ -127,7 +130,8 @@ "vercelAiGatewayApiKeyRequired": "Vercel AI Gateway API anahtarı gereklidir", "ollamaBaseUrlRequired": "Ollama temel URL'si gereklidir", "baseUrlRequired": "Temel URL'si gereklidir", - "modelDimensionMinValue": "Model boyutu 0'dan büyük olmalıdır" + "modelDimensionMinValue": "Model boyutu 0'dan büyük olmalıdır", + "openRouterApiKeyRequired": "OpenRouter API anahtarı gereklidir" }, "advancedConfigLabel": "Gelişmiş Yapılandırma", "searchMinScoreLabel": "Arama Skoru Eşiği", @@ -310,6 +314,9 @@ "getZaiApiKey": "Z AI API Anahtarı Al", "zaiEntrypoint": "Z AI Giriş Noktası", "zaiEntrypointDescription": "Konumunuza göre uygun API giriş noktasını seçin. Çin'de iseniz open.bigmodel.cn'yi seçin. Aksi takdirde api.z.ai'yi seçin.", + "minimaxApiKey": "MiniMax API Anahtarı", + "getMiniMaxApiKey": "MiniMax API Anahtarı Al", + "minimaxBaseUrl": "MiniMax Giriş Noktası", "geminiApiKey": "Gemini API Anahtarı", "getGroqApiKey": "Groq API Anahtarı Al", "groqApiKey": "Groq API Anahtarı", @@ -363,7 +370,7 @@ "enablePromptCachingTitle": "Desteklenen modeller için performansı artırmak ve maliyetleri azaltmak için istem önbelleğini etkinleştir.", "cacheUsageNote": "Not: Önbellek kullanımını görmüyorsanız, farklı bir model seçip ardından istediğiniz modeli tekrar seçmeyi deneyin.", "vscodeLmModel": "Dil Modeli", - "vscodeLmWarning": "Not: Bu çok deneysel bir entegrasyondur ve sağlayıcı desteği değişebilir. Bir modelin desteklenmediğine dair bir hata alırsanız, bu sağlayıcı tarafındaki bir sorundur.", + "vscodeLmWarning": "Not: VS Code Language Model API üzerinden erişilen modeller sağlayıcı tarafından sarılmış veya ince ayarlanmış olabilir; bu nedenle davranış, aynı modelin tipik bir sağlayıcı ya da yönlendirici üzerinden doğrudan kullanılmasından farklı olabilir. «Language Model» açılır menüsünden bir model kullanmak için önce o modele geçin ve ardından Copilot Chat isteminde «Kabul Et»e tıklayın; aksi takdirde 400 «The requested model is not supported» gibi bir hata görebilirsiniz.", "geminiParameters": { "urlContext": { "title": "URL bağlamını etkinleştir", @@ -440,7 +447,7 @@ }, "computerUse": { "label": "Bilgisayar Kullanımı", - "description": "Bu model bir tarayıcıyla etkileşim kurabilir mi? (örn. Claude 3.7 Sonnet)" + "description": "Bu model bir tarayıcıyla etkileşim kurabilir mi?" }, "promptCache": { "label": "İstem Önbelleği", @@ -482,6 +489,7 @@ }, "reasoningEffort": { "label": "Model Akıl Yürütme Çabası", + "none": "Yok", "minimal": "Minimal (en hızlı)", "high": "Yüksek", "medium": "Orta", @@ -554,6 +562,10 @@ } }, "checkpoints": { + "timeout": { + "label": "Kontrol noktası başlatma zaman aşımı (saniye)", + "description": "Kontrol noktası servisini başlatmak için maksimum bekleme süresi. Varsayılan 15 saniye. Aralık: 10-60 saniye." + }, "enable": { "label": "Otomatik kontrol noktalarını etkinleştir", "description": "Etkinleştirildiğinde, Kilo Code görev yürütme sırasında otomatik olarak kontrol noktaları oluşturarak değişiklikleri gözden geçirmeyi veya önceki durumlara dönmeyi kolaylaştırır. <0>Daha fazla bilgi" @@ -682,6 +694,14 @@ "label": "Maksimum toplam görüntü boyutu", "mb": "MB", "description": "Tek bir read_file işleminde işlenen tüm görüntüler için maksimum kümülatif boyut sınırı (MB cinsinden). Birden çok görüntü okurken, her görüntünün boyutu toplama eklenir. Başka bir görüntü eklemek bu sınırı aşacaksa, atlanacaktır." + }, + "includeCurrentTime": { + "label": "Mevcut zamanı bağlama dahil et", + "description": "Etkinleştirildiğinde, mevcut zaman ve saat dilimi bilgileri sistem istemine dahil edilecektir. Modeller zaman endişeleri nedeniyle çalışmayı durdurursa bunu devre dışı bırakın." + }, + "includeCurrentCost": { + "label": "Mevcut maliyeti bağlama dahil et", + "description": "Etkinleştirildiğinde, mevcut API kullanım maliyeti sistem istemine dahil edilecektir. Modeller maliyet endişeleri nedeniyle çalışmayı durdurursa bunu devre dışı bırakın." } }, "terminal": { @@ -691,55 +711,55 @@ }, "advanced": { "label": "Terminal Ayarları: Gelişmiş", - "description": "Aşağıdaki seçeneklerin uygulanması için terminalin yeniden başlatılması gerekebilir." + "description": "Bu ayarlar yalnızca 'Satır İçi Terminal Kullan' devre dışı bırakıldığında geçerlidir. Sadece VS Code terminalini etkiler ve IDE'nin yeniden başlatılmasını gerektirebilir." }, "outputLineLimit": { - "label": "Terminal çıktısı sınırı", - "description": "Komutları yürütürken terminal çıktısına dahil edilecek maksimum satır sayısı. Aşıldığında, token tasarrufu sağlayarak satırlar ortadan kaldırılacaktır. <0>Daha fazla bilgi" + "label": "Terminal çıktı sınırı", + "description": "Sınırın altında kalmak için ilk ve son satırları tutar ve ortadakileri atar. Jetonları kaydetmek için düşürün; Roo'ya daha fazla orta ayrıntı vermek için yükseltin. Roo, içeriğin atlandığı yerde bir yer tutucu görür.<0>Daha fazla bilgi edinin" }, "outputCharacterLimit": { "label": "Terminal karakter sınırı", - "description": "Komutları yürütürken terminal çıktısına dahil edilecek maksimum karakter sayısı. Bu sınır, aşırı uzun satırlardan kaynaklanan bellek sorunlarını önlemek için satır sınırına göre önceliklidir. Aşıldığında, çıktı kesilir. <0>Daha fazla bilgi edinin" + "description": "Çıktı boyutuna katı bir üst sınır uygulayarak bellek sorunlarını önlemek için satır sınırını geçersiz kılar. Aşılırsa, başlangıcı ve sonu tutar ve içeriğin atlandığı yerde Roo'ya bir yer tutucu gösterir. <0>Daha fazla bilgi edinin" }, "shellIntegrationTimeout": { - "label": "Terminal kabuk entegrasyonu zaman aşımı", - "description": "Komutları yürütmeden önce kabuk entegrasyonunun başlatılması için beklenecek maksimum süre. Kabuk başlatma süresi uzun olan kullanıcılar için, terminalde \"Shell Integration Unavailable\" hatalarını görürseniz bu değerin artırılması gerekebilir. <0>Daha fazla bilgi" + "label": "Terminal shell entegrasyon timeout", + "description": "Komut çalıştırmadan önce VS Code shell entegrasyonunu bekleme süresi. Shell yavaş başlıyorsa veya 'Shell Integration Unavailable' hatası görüyorsanız artırın. <0>Daha fazla bilgi edinin" }, "shellIntegrationDisabled": { - "label": "Terminal kabuk entegrasyonunu devre dışı bırak", - "description": "Terminal komutları düzgün çalışmıyorsa veya 'Shell Integration Unavailable' hataları görüyorsanız bunu etkinleştirin. Bu, bazı gelişmiş terminal özelliklerini atlayarak komutları çalıştırmak için daha basit bir yöntem kullanır. <0>Daha fazla bilgi" + "label": "Satır İçi Terminal Kullan (önerilir)", + "description": "Daha hızlı, daha güvenilir çalıştırmalar için kabuk profillerini/entegrasyonunu atlamak için Satır İçi Terminal'de (sohbet) komutları çalıştırın. Devre dışı bırakıldığında Roo, kabuk profiliniz, istemleriniz ve eklentilerinizle VS Code terminalini kullanır. <0>Daha fazla bilgi edinin" }, "commandDelay": { - "label": "Terminal komut gecikmesi", - "description": "Komut yürütmesinden sonra eklenecek gecikme süresi (milisaniye). 0 varsayılan ayarı gecikmeyi tamamen devre dışı bırakır. Bu, zamanlama sorunları olan terminallerde komut çıktısının tam olarak yakalanmasını sağlamaya yardımcı olabilir. Çoğu terminalde bu, `PROMPT_COMMAND='sleep N'` ayarlanarak uygulanır ve PowerShell her komutun sonuna `start-sleep` ekler. Başlangıçta VSCode hata#237208 için bir geçici çözümdü ve gerekli olmayabilir. <0>Daha fazla bilgi" + "label": "Terminal komut delay", + "description": "VS Code terminalin tüm outputu flush edebilmesi için her komuttan sonra kısa pause ekler (bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep). Sadece tail output eksikse kullan; yoksa 0'da bırak. <0>Daha fazla bilgi edinin" }, "compressProgressBar": { "label": "İlerleme çubuğu çıktısını sıkıştır", - "description": "Etkinleştirildiğinde, satır başı karakteri (\\r) içeren terminal çıktısını işleyerek gerçek bir terminalin içeriği nasıl göstereceğini simüle eder. Bu, ilerleme çubuğunun ara durumlarını kaldırır, yalnızca son durumu korur ve daha alakalı bilgiler için bağlam alanından tasarruf sağlar. <0>Daha fazla bilgi" + "description": "İlerleme çubukları/spinner'ları daraltır, sadece son durumu tutar (token tasarrufu). <0>Daha fazla bilgi edinin" }, "powershellCounter": { "label": "PowerShell sayaç geçici çözümünü etkinleştir", - "description": "Etkinleştirildiğinde, komutların doğru şekilde yürütülmesini sağlamak için PowerShell komutlarına bir sayaç ekler. Bu, çıktı yakalama sorunları yaşayabilecek PowerShell terminallerinde yardımcı olur. <0>Daha fazla bilgi" + "description": "PowerShell çıktısı eksik veya yineleniyorsa bunu açın; çıktıyı stabilize etmek için her komuta küçük bir sayaç ekler. Çıktı zaten doğru görünüyorsa bunu kapalı tutun. <0>Daha fazla bilgi edinin" }, "zshClearEolMark": { - "label": "ZSH satır sonu işaretini temizle", - "description": "Etkinleştirildiğinde, PROMPT_EOL_MARK='' ayarlanarak ZSH satır sonu işaretini temizler. Bu, '%' gibi özel karakterlerle biten komut çıktılarının yorumlanmasında sorun yaşanmasını önler. <0>Daha fazla bilgi" + "label": "ZSH EOL işaretini temizle", + "description": "Satırların sonunda başıboş % gördüğünüzde veya ayrıştırma yanlış göründüğünde bunu açın; Zsh'nin satır sonu işaretini (%) atlar. <0>Daha fazla bilgi edinin" }, "zshOhMy": { "label": "Oh My Zsh entegrasyonunu etkinleştir", - "description": "Etkinleştirildiğinde, Oh My Zsh kabuk entegrasyon özelliklerini etkinleştirmek için ITERM_SHELL_INTEGRATION_INSTALLED=Yes ayarlar. Bu ayarın uygulanması IDE'nin yeniden başlatılmasını gerektirebilir. <0>Daha fazla bilgi" + "description": "Oh My Zsh temanız/eklentileriniz kabuk entegrasyonu beklediğinde bunu açın; ITERM_SHELL_INTEGRATION_INSTALLED=Yes ayarlar. Bu değişkeni ayarlamaktan kaçınmak için bunu kapatın. <0>Daha fazla bilgi edinin" }, "zshP10k": { "label": "Powerlevel10k entegrasyonunu etkinleştir", - "description": "Etkinleştirildiğinde, Powerlevel10k kabuk entegrasyon özelliklerini etkinleştirmek için POWERLEVEL9K_TERM_SHELL_INTEGRATION=true ayarlar. <0>Daha fazla bilgi" + "description": "Powerlevel10k kabuk entegrasyonunu kullanırken bunu açın. <0>Daha fazla bilgi edinin" }, "zdotdir": { "label": "ZDOTDIR işlemeyi etkinleştir", - "description": "Etkinleştirildiğinde, zsh kabuğu entegrasyonunu düzgün şekilde işlemek için ZDOTDIR için geçici bir dizin oluşturur. Bu, zsh yapılandırmanızı korurken VSCode kabuk entegrasyonunun zsh ile düzgün çalışmasını sağlar. <0>Daha fazla bilgi" + "description": "zsh kabuk entegrasyonu başarısız olduğunda veya dotfiles'larınızla çakıştığında bunu açın. <0>Daha fazla bilgi edinin" }, "inheritEnv": { "label": "Ortam değişkenlerini devral", - "description": "Etkinleştirildiğinde, terminal VSCode üst işleminden ortam değişkenlerini devralır, örneğin kullanıcı profilinde tanımlanan kabuk entegrasyon ayarları gibi. Bu, VSCode'un global ayarı olan `terminal.integrated.inheritEnv` değerini doğrudan değiştirir. <0>Daha fazla bilgi" + "description": "Ana VS Code işleminden ortam değişkenlerini devralmak için bunu açın. <0>Daha fazla bilgi edinin" } }, "advancedSettings": { @@ -748,7 +768,7 @@ "advanced": { "diff": { "label": "Diff'ler aracılığıyla düzenlemeyi etkinleştir", - "description": "Etkinleştirildiğinde, Kilo Code dosyaları daha hızlı düzenleyebilecek ve kesik tam dosya yazımlarını otomatik olarak reddedecektir. En son Claude 4 Sonnet modeliyle en iyi şekilde çalışır.", + "description": "Etkinleştirildiğinde, Kilo Code dosyaları daha hızlı düzenleyebilecek ve kesik tam dosya yazımlarını otomatik olarak reddedecektir", "strategy": { "label": "Diff stratejisi", "options": { @@ -777,10 +797,6 @@ "name": "Deneysel birleştirilmiş diff stratejisini kullan", "description": "Deneysel birleştirilmiş diff stratejisini etkinleştir. Bu strateji, model hatalarından kaynaklanan yeniden deneme sayısını azaltabilir, ancak beklenmeyen davranışlara veya hatalı düzenlemelere neden olabilir. Yalnızca riskleri anlıyorsanız ve tüm değişiklikleri dikkatlice incelemeye istekliyseniz etkinleştirin." }, - "SEARCH_AND_REPLACE": { - "name": "Deneysel arama ve değiştirme aracını kullan", - "description": "Deneysel arama ve değiştirme aracını etkinleştir, Kilo Code'nun tek bir istekte bir arama teriminin birden fazla örneğini değiştirmesine olanak tanır." - }, "INSERT_BLOCK": { "name": "Deneysel içerik ekleme aracını kullan", "description": "Deneysel içerik ekleme aracını etkinleştir, Kilo Code'nun bir diff oluşturma gereği duymadan belirli satır numaralarına içerik eklemesine olanak tanır." @@ -867,8 +883,6 @@ "modelInfo": { "supportsImages": "Görüntüleri destekler", "noImages": "Görüntüleri desteklemez", - "supportsComputerUse": "Bilgisayar kullanımını destekler", - "noComputerUse": "Bilgisayar kullanımını desteklemez", "supportsPromptCache": "İstem önbelleğini destekler", "noPromptCache": "İstem önbelleğini desteklemez", "contextWindow": "Bağlam Penceresi:", diff --git a/webview-ui/src/i18n/locales/uk/chat.json b/webview-ui/src/i18n/locales/uk/chat.json index f7256af2dc9..fd859df6f69 100644 --- a/webview-ui/src/i18n/locales/uk/chat.json +++ b/webview-ui/src/i18n/locales/uk/chat.json @@ -163,6 +163,8 @@ "initializingWarning": "Все ще ініціалізується контрольна точка... Якщо це займає надто багато часу, ти можеш вимкнути контрольні точки в налаштуваннях і перезапустити своє завдання.", "menu": { "viewDiff": "Переглянути різницю", + "viewDiffFromInit": "Переглянути всі зміни", + "viewDiffWithCurrent": "Переглянути зміни з цієї контрольної точки", "restore": "Відновити контрольну точку", "restoreFiles": "Відновити файли", "restoreFilesDescription": "Відновлює файли твого проекту до знімка, зробленого в цій точці.", @@ -170,7 +172,8 @@ "confirm": "Підтвердити", "cancel": "Скасувати", "cannotUndo": "Цю дію не можна скасувати.", - "restoreFilesAndTaskDescription": "Відновлює файли твого проекту до знімка, зробленого в цій точці, і видаляє всі повідомлення після цієї точки." + "restoreFilesAndTaskDescription": "Відновлює файли твого проекту до знімка, зробленого в цій точці, і видаляє всі повідомлення після цієї точки.", + "more": "Більше опцій" }, "current": "Поточна" }, @@ -302,27 +305,32 @@ "triggerLabel_one": "Автосхвалення: 1", "triggerLabel_other": "Автосхвалення: {{count}}", "triggerLabelAll": "Автосхвалення: YOLO", - "triggerLabelOff": "Автосхвалення вимкнено" + "triggerLabelOff": "Автосхвалення вимкнено", + "triggerLabelOffShort": "Вимк" }, "announcement": { - "title": "🎉 Випущено Kilo Code {{version}}", + "title": "🎉 Випущено Roo Code {{version}}", "stealthModel": { - "feature": "Обмежена в часі БЕЗКОШТОВНА прихована модель - Блискавично швидка модель міркування, яка відмінно справляється з агентним кодуванням з контекстним вікном 262k, доступна через Roo Code Cloud.", - "note": "(Примітка: запити та завершення записуються створювачем моделі для покращення моделі)", + "feature": "Обмежена в часі БЕЗКОШТОВНА прихована модель - Code Supernova: Тепер оновлена з контекстним вікном 1M токенів! Універсальна агентна модель кодування, яка підтримує введення зображень, доступна через Roo Code Cloud.", + "note": "(Примітка: запити та завершення записуються створювачем моделі та використовуються для покращення моделі)", "connectButton": "Підключитися до Roo Code Cloud", - "selectModel": "Виберіть roo/sonic від провайдера Roo Code Cloud в
    Налаштуваннях, щоб почати", + "selectModel": "Вибери roo/code-supernova від провайдера Roo Code Cloud в Налаштуваннях, щоб почати.", "goToSettingsButton": "Перейти до Налаштувань" }, - "description": "Kilo Code {{version}} приносить основні нові функції та покращення на основі твоїх відгуків.", - "whatsNew": "Що нового", - "feature1": "Запуск Kilo Code Marketplace: Маркетплейс тепер працює! Відкривай та встановлюй режими та MCP легше, ніж будь-коли.", - "feature2": "Моделі Gemini 2.5: Додано підтримку нових моделей Gemini 2.5 Pro, Flash та Flash Lite.", - "feature3": "Підтримка файлів Excel та інше: Додано підтримку файлів Excel (.xlsx) та численні виправлення помилок і покращення!", - "hideButton": "Приховати оголошення", - "detailsDiscussLinks": "Отримай більше деталей та обговори в Discord та Reddit 🚀", - "learnMore": "Дізнатися більше", - "visitCloudButton": "Відвідати Roo Code Cloud", - "socialLinks": "Слідкуйте за нами" + "release": { + "heading": "Нове в розширенні:", + "openRouterEmbeddings": "Підтримка моделей вбудовування OpenRouter", + "chutesDynamic": "Chutes тепер динамічно завантажує найновіші моделі", + "queuedMessagesFix": "Виправлення для повідомлень у черзі, які втрачалися" + }, + "cloudAgents": { + "heading": "Нове в Cloud:", + "prFixer": "Представляємо хмарного агента PR Fixer на доповнення до PR Reviewer.", + "prFixerDescription": "PR Fixer застосовує високоякісні зміни до твоїх PR прямо з GitHub. Викликай через коментар PR, і він прочитає всю історію коментарів, щоб зрозуміти контекст, домовленості та компроміси - потім впровадить правильне виправлення.", + "tryPrFixerButton": "Спробувати PR Fixer" + }, + "careers": "Також, ми наймаємо!", + "socialLinks": "Приєднуйся до нас на X, Discord, або r/RooCode 🚀" }, "reasoning": { "thinking": "Думаю", diff --git a/webview-ui/src/i18n/locales/uk/common.json b/webview-ui/src/i18n/locales/uk/common.json index d4795bcb592..dd9cd0cd6ab 100644 --- a/webview-ui/src/i18n/locales/uk/common.json +++ b/webview-ui/src/i18n/locales/uk/common.json @@ -114,5 +114,9 @@ "months_ago": "{{count}} місяців тому", "year_ago": "рік тому", "years_ago": "{{count}} років тому" + }, + "errors": { + "wait_checkpoint_long_time": "Очікування ініціалізації контрольної точки {{timeout}} секунд. Якщо тобі не потрібна функція контрольних точок, будь ласка, вимкни її в налаштуваннях контрольних точок.", + "init_checkpoint_fail_long_time": "Ініціалізація контрольної точки зайняла більше {{timeout}} секунд, тому контрольні точки вимкнено для цього завдання. Ти можеш вимкнути контрольні точки або збільшити час очікування в налаштуваннях контрольних точок." } } diff --git a/webview-ui/src/i18n/locales/uk/mcp.json b/webview-ui/src/i18n/locales/uk/mcp.json index ffc3a2c1080..a86598d0175 100644 --- a/webview-ui/src/i18n/locales/uk/mcp.json +++ b/webview-ui/src/i18n/locales/uk/mcp.json @@ -26,12 +26,12 @@ "tabs": { "tools": "Інструменти", "resources": "Ресурси", - "errors": "Помилки" + "logs": "Логи" }, "emptyState": { "noTools": "Інструменти не знайдено", "noResources": "Ресурси не знайдено", - "noErrors": "Помилки не знайдено" + "noLogs": "Логів ще немає" }, "networkTimeout": { "label": "Таймаут мережі", diff --git a/webview-ui/src/i18n/locales/uk/settings.json b/webview-ui/src/i18n/locales/uk/settings.json index 9f820636cbb..a1b7eebcca6 100644 --- a/webview-ui/src/i18n/locales/uk/settings.json +++ b/webview-ui/src/i18n/locales/uk/settings.json @@ -72,6 +72,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "Ключ API", "vercelAiGatewayApiKeyPlaceholder": "Введіть ваш ключ API Vercel AI Gateway", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "Ключ API OpenRouter", + "openRouterApiKeyPlaceholder": "Введіть ваш ключ API OpenRouter", "openaiCompatibleProvider": "Сумісний з OpenAI", "openAiKeyLabel": "Ключ API OpenAI", "openAiKeyPlaceholder": "Введіть ваш ключ API OpenAI", @@ -146,7 +149,8 @@ "ollamaBaseUrlRequired": "Базовий URL Ollama є обов'язковим", "baseUrlRequired": "Базовий URL є обов'язковим", "modelDimensionMinValue": "Розмірність моделі повинна бути більше 0", - "vercelAiGatewayApiKeyRequired": "Ключ API Vercel AI Gateway є обов'язковим" + "vercelAiGatewayApiKeyRequired": "Ключ API Vercel AI Gateway є обов'язковим", + "openRouterApiKeyRequired": "Ключ API OpenRouter є обов'язковим" }, "cancelling": "Скасування...", "cancelIndexingButton": "Скасувати індексацію" @@ -315,6 +319,9 @@ "moonshotApiKey": "Ключ API Moonshot", "getMoonshotApiKey": "Отримати ключ API Moonshot", "moonshotBaseUrl": "Точка входу Moonshot", + "minimaxApiKey": "Ключ API MiniMax", + "getMiniMaxApiKey": "Отримати ключ API MiniMax", + "minimaxBaseUrl": "Точка входу MiniMax", "zaiEntrypoint": "Точка входу Z AI", "zaiEntrypointDescription": "Будь ласка, виберіть відповідну точку входу API залежно від вашого місцезнаходження. Якщо ви в Китаї, виберіть open.bigmodel.cn. Інакше виберіть api.z.ai.", "geminiApiKey": "Ключ API Gemini", @@ -498,6 +505,7 @@ }, "reasoningEffort": { "label": "Зусилля з міркування моделі", + "none": "Немає", "minimal": "Мінімальний (найшвидший)", "high": "Високий", "medium": "Середній", @@ -594,6 +602,10 @@ } }, "checkpoints": { + "timeout": { + "label": "Тайм-аут ініціалізації контрольної точки (секунди)", + "description": "Максимальний час очікування ініціалізації служби контрольних точок. За замовчуванням 15 секунд. Діапазон: 10-60 секунд." + }, "enable": { "label": "Увімкнути автоматичні контрольні точки", "description": "Якщо ввімкнено, Kilo Code автоматично створюватиме контрольні точки під час виконання завдання, що полегшить перегляд змін або повернення до попередніх станів. <0>Дізнатися більше" @@ -752,6 +764,14 @@ "label": "Максимальний загальний розмір зображень", "mb": "МБ", "description": "Максимальний кумулятивний ліміт розміру (у МБ) для всіх зображень в одному запиті. При перевищенні зображення будуть змінені на менший розмір, щоб поміститися в цей ліміт." + }, + "includeCurrentTime": { + "label": "Включати поточний час у контекст", + "description": "Якщо ввімкнено, поточний час та інформація про часовий пояс будуть включені в системну підказку. Вимкніть це, якщо моделі припиняють роботу через занепокоєння часом." + }, + "includeCurrentCost": { + "label": "Включати поточну вартість у контекст", + "description": "Якщо ввімкнено, поточна вартість використання API буде включена в системну підказку. Вимкніть це, якщо моделі припиняють роботу через занепокоєння вартістю." } }, "terminal": { @@ -847,10 +867,6 @@ "name": "Використовувати експериментальну уніфіковану стратегію diff", "description": "Увімкнути експериментальну уніфіковану стратегію diff. Ця стратегія може зменшити кількість повторних спроб, спричинених помилками моделі, але може викликати несподівану поведінку або неправильні редагування. Вмикайте лише якщо ти розумієш ризики та готовий ретельно переглядати всі зміни." }, - "SEARCH_AND_REPLACE": { - "name": "Використовувати експериментальний інструмент пошуку та заміни", - "description": "Увімкнути експериментальний інструмент пошуку та заміни, що дозволяє Kilo Code замінювати кілька екземплярів пошукового терміна за один запит." - }, "INSERT_BLOCK": { "name": "Використовувати експериментальний інструмент вставки вмісту", "description": "Увімкнути експериментальний інструмент вставки вмісту, що дозволяє Kilo Code вставляти вміст у певні номери рядків без необхідності створювати diff." diff --git a/webview-ui/src/i18n/locales/vi/chat.json b/webview-ui/src/i18n/locales/vi/chat.json index ce189e7bee6..8a654367a02 100644 --- a/webview-ui/src/i18n/locales/vi/chat.json +++ b/webview-ui/src/i18n/locales/vi/chat.json @@ -156,6 +156,9 @@ "initializingWarning": "Đang khởi tạo điểm kiểm tra... Nếu quá trình này mất quá nhiều thời gian, bạn có thể vô hiệu hóa điểm kiểm tra trong cài đặt và khởi động lại tác vụ của bạn.", "menu": { "viewDiff": "Xem khác biệt", + "more": "Thêm tùy chọn", + "viewDiffFromInit": "Xem tất cả các thay đổi", + "viewDiffWithCurrent": "Xem các thay đổi kể từ điểm kiểm tra này", "restore": "Khôi phục điểm kiểm tra", "restoreFiles": "Khôi phục tệp", "restoreFilesDescription": "Khôi phục các tệp dự án của bạn về bản chụp được thực hiện tại thời điểm này.", @@ -273,6 +276,7 @@ "toggleAriaLabel": "Bật/tắt tự động phê duyệt", "disabledAriaLabel": "Tự động phê duyệt đã tắt - trước tiên hãy chọn các tùy chọn", "triggerLabelOff": "Tự động phê duyệt tắt", + "triggerLabelOffShort": "Tắt", "triggerLabel_zero": "0 được tự động phê duyệt", "triggerLabel_one": "1 được tự động phê duyệt", "triggerLabel_other": "{{count}} được tự động phê duyệt", @@ -302,6 +306,19 @@ "selectModel": "Chọn roo/code-supernova từ nhà cung cấp Roo Code Cloud trong Cài đặt để bắt đầu.", "goToSettingsButton": "Đi tới Cài đặt" }, + "release": { + "heading": "Mới trong Extension:", + "openRouterEmbeddings": "Hỗ trợ các mô hình nhúng OpenRouter", + "chutesDynamic": "Chutes giờ đây tải các mô hình mới nhất một cách động", + "queuedMessagesFix": "Sửa lỗi tin nhắn trong hàng đợi bị mất" + }, + "cloudAgents": { + "heading": "Mới trên Cloud:", + "prFixer": "Giới thiệu agent đám mây PR Fixer để bổ sung cho PR Reviewer.", + "prFixerDescription": "PR Fixer của Roo Code áp dụng các thay đổi chất lượng cao cho PR của bạn, ngay từ GitHub. Gọi qua bình luận PR và nó sẽ đọc toàn bộ lịch sử bình luận để hiểu ngữ cảnh, thỏa thuận và đánh đổi - sau đó triển khai bản sửa lỗi phù hợp.", + "tryPrFixerButton": "Dùng thử PR Fixer" + }, + "careers": "Ngoài ra, chúng tôi đang tuyển dụng!", "socialLinks": "Tham gia với chúng tôi trên X, Discord, hoặc r/RooCode 🚀" }, "browser": { diff --git a/webview-ui/src/i18n/locales/vi/common.json b/webview-ui/src/i18n/locales/vi/common.json index eb71390e272..7bcb701c528 100644 --- a/webview-ui/src/i18n/locales/vi/common.json +++ b/webview-ui/src/i18n/locales/vi/common.json @@ -114,5 +114,9 @@ "months_ago": "{{count}} tháng trước", "year_ago": "một năm trước", "years_ago": "{{count}} năm trước" + }, + "errors": { + "wait_checkpoint_long_time": "Bạn đã chờ {{timeout}} giây để khởi tạo điểm kiểm tra. Nếu không cần chức năng này, hãy tắt nó trong cài đặt điểm kiểm tra.", + "init_checkpoint_fail_long_time": "Khởi tạo điểm kiểm tra mất hơn {{timeout}} giây, vì vậy các điểm kiểm tra đã bị vô hiệu hóa cho tác vụ này. Bạn có thể tắt các điểm kiểm tra hoặc tăng thời gian chờ trong cài đặt điểm kiểm tra." } } diff --git a/webview-ui/src/i18n/locales/vi/mcp.json b/webview-ui/src/i18n/locales/vi/mcp.json index c149c2b75dc..1be9ec36dae 100644 --- a/webview-ui/src/i18n/locales/vi/mcp.json +++ b/webview-ui/src/i18n/locales/vi/mcp.json @@ -25,12 +25,12 @@ "tabs": { "tools": "Công cụ", "resources": "Tài nguyên", - "errors": "Lỗi" + "logs": "Nhật ký" }, "emptyState": { "noTools": "Không tìm thấy công cụ", "noResources": "Không tìm thấy tài nguyên", - "noErrors": "Không tìm thấy lỗi" + "noLogs": "Chưa có nhật ký" }, "networkTimeout": { "label": "Thời gian chờ mạng", diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json index bf350901c97..3b94d20e364 100644 --- a/webview-ui/src/i18n/locales/vi/settings.json +++ b/webview-ui/src/i18n/locales/vi/settings.json @@ -60,6 +60,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "Khóa API", "vercelAiGatewayApiKeyPlaceholder": "Nhập khóa API Vercel AI Gateway của bạn", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "Khóa API OpenRouter", + "openRouterApiKeyPlaceholder": "Nhập khóa API OpenRouter của bạn", "openaiCompatibleProvider": "Tương thích OpenAI", "openAiKeyLabel": "Khóa API OpenAI", "openAiKeyPlaceholder": "Nhập khóa API OpenAI của bạn", @@ -126,7 +129,8 @@ "vercelAiGatewayApiKeyRequired": "Cần có khóa API Vercel AI Gateway", "ollamaBaseUrlRequired": "Yêu cầu URL cơ sở Ollama", "baseUrlRequired": "Yêu cầu URL cơ sở", - "modelDimensionMinValue": "Kích thước mô hình phải lớn hơn 0" + "modelDimensionMinValue": "Kích thước mô hình phải lớn hơn 0", + "openRouterApiKeyRequired": "Yêu cầu khóa API OpenRouter" }, "advancedConfigLabel": "Cấu hình nâng cao", "searchMinScoreLabel": "Ngưỡng điểm tìm kiếm", @@ -309,6 +313,9 @@ "getZaiApiKey": "Lấy khóa API Z AI", "zaiEntrypoint": "Điểm vào Z AI", "zaiEntrypointDescription": "Vui lòng chọn điểm vào API phù hợp dựa trên vị trí của bạn. Nếu bạn ở Trung Quốc, hãy chọn open.bigmodel.cn. Ngược lại, hãy chọn api.z.ai.", + "minimaxApiKey": "Khóa API MiniMax", + "getMiniMaxApiKey": "Lấy khóa API MiniMax", + "minimaxBaseUrl": "Điểm vào MiniMax", "geminiApiKey": "Khóa API Gemini", "getGroqApiKey": "Lấy khóa API Groq", "groqApiKey": "Khóa API Groq", @@ -362,7 +369,7 @@ "enablePromptCachingTitle": "Bật bộ nhớ đệm lời nhắc để cải thiện hiệu suất và giảm chi phí cho các mô hình được hỗ trợ.", "cacheUsageNote": "Lưu ý: Nếu bạn không thấy việc sử dụng bộ nhớ đệm, hãy thử chọn một mô hình khác và sau đó chọn lại mô hình mong muốn của bạn.", "vscodeLmModel": "Mô hình ngôn ngữ", - "vscodeLmWarning": "Lưu ý: Đây là tích hợp thử nghiệm và hỗ trợ nhà cung cấp có thể khác nhau. Nếu bạn nhận được lỗi về mô hình không được hỗ trợ, đó là vấn đề từ phía nhà cung cấp.", + "vscodeLmWarning": "Lưu ý: Các mô hình truy cập qua VS Code Language Model API có thể được nhà cung cấp bao bọc hoặc tinh chỉnh, vì vậy hành vi có thể khác so với khi dùng trực tiếp cùng mô hình từ nhà cung cấp hoặc router thông thường. Để dùng một mô hình trong menu «Language Model», trước tiên hãy chuyển sang mô hình đó rồi nhấp «Chấp nhận» trong lời nhắc Copilot Chat; nếu không bạn có thể gặp lỗi như 400 «The requested model is not supported».", "geminiParameters": { "urlContext": { "title": "Bật ngữ cảnh URL", @@ -439,7 +446,7 @@ }, "computerUse": { "label": "Sử dụng máy tính", - "description": "Mô hình này có khả năng tương tác với trình duyệt không? (ví dụ: Claude 3.7 Sonnet)." + "description": "Mô hình này có khả năng tương tác với trình duyệt không?" }, "promptCache": { "label": "Bộ nhớ đệm lời nhắc", @@ -481,6 +488,7 @@ }, "reasoningEffort": { "label": "Nỗ lực suy luận của mô hình", + "none": "Không", "minimal": "Tối thiểu (nhanh nhất)", "high": "Cao", "medium": "Trung bình", @@ -553,6 +561,10 @@ } }, "checkpoints": { + "timeout": { + "label": "Thời gian chờ khởi tạo điểm kiểm tra (giây)", + "description": "Thời gian tối đa chờ khởi tạo dịch vụ điểm kiểm tra. Mặc định là 15 giây. Khoảng: 10-60 giây." + }, "enable": { "label": "Bật điểm kiểm tra tự động", "description": "Khi được bật, Kilo Code sẽ tự động tạo các điểm kiểm tra trong quá trình thực hiện nhiệm vụ, giúp dễ dàng xem lại các thay đổi hoặc quay lại trạng thái trước đó. <0>Tìm hiểu thêm" @@ -681,6 +693,14 @@ "label": "Kích thước tổng tối đa của hình ảnh", "mb": "MB", "description": "Giới hạn kích thước tích lũy tối đa (tính bằng MB) cho tất cả hình ảnh được xử lý trong một thao tác read_file duy nhất. Khi đọc nhiều hình ảnh, kích thước của mỗi hình ảnh được cộng vào tổng. Nếu việc thêm một hình ảnh khác sẽ vượt quá giới hạn này, nó sẽ bị bỏ qua." + }, + "includeCurrentTime": { + "label": "Bao gồm thời gian hiện tại trong ngữ cảnh", + "description": "Khi được bật, thời gian hiện tại và thông tin múi giờ sẽ được bao gồm trong lời nhắc hệ thống. Tắt nếu các mô hình ngừng hoạt động do lo ngại về thời gian." + }, + "includeCurrentCost": { + "label": "Bao gồm chi phí hiện tại trong ngữ cảnh", + "description": "Khi được bật, chi phí sử dụng API hiện tại sẽ được bao gồm trong lời nhắc hệ thống. Tắt nếu các mô hình ngừng hoạt động do lo ngại về chi phí." } }, "terminal": { @@ -690,55 +710,55 @@ }, "advanced": { "label": "Cài đặt Terminal: Nâng cao", - "description": "Các tùy chọn sau có thể yêu cầu khởi động lại terminal để áp dụng cài đặt." + "description": "Các cài đặt này chỉ áp dụng khi 'Sử dụng Terminal nội tuyến' bị tắt. Chỉ ảnh hưởng đến terminal VS Code và có thể yêu cầu khởi động lại IDE." }, "outputLineLimit": { - "label": "Giới hạn đầu ra terminal", - "description": "Số dòng tối đa để đưa vào đầu ra terminal khi thực hiện lệnh. Khi vượt quá, các dòng sẽ bị xóa khỏi phần giữa, tiết kiệm token. <0>Tìm hiểu thêm" + "label": "Giới hạn đầu ra của terminal", + "description": "Giữ lại các dòng đầu tiên và cuối cùng và loại bỏ các dòng ở giữa để ở dưới giới hạn. Giảm xuống để tiết kiệm token; tăng lên để cung cấp cho Roo thêm chi tiết ở giữa. Roo thấy một trình giữ chỗ nơi nội dung bị bỏ qua.<0>Tìm hiểu thêm" }, "outputCharacterLimit": { - "label": "Giới hạn ký tự của terminal", - "description": "Số ký tự tối đa để bao gồm trong đầu ra của terminal khi thực thi lệnh. Giới hạn này được ưu tiên hơn giới hạn dòng để ngăn chặn các vấn đề về bộ nhớ do các dòng quá dài. Khi vượt quá, đầu ra sẽ bị cắt bớt. <0>Tìm hiểu thêm" + "label": "Giới hạn ký tự terminal", + "description": "Ghi đè giới hạn dòng để tránh vấn đề bộ nhớ bằng cách áp đặt giới hạn cứng cho kích thước đầu ra. Nếu vượt quá, giữ đầu và cuối, hiển thị placeholder cho Roo nơi nội dung bị bỏ qua. <0>Tìm hiểu thêm" }, "shellIntegrationTimeout": { - "label": "Thời gian chờ tích hợp shell terminal", - "description": "Thời gian tối đa để chờ tích hợp shell khởi tạo trước khi thực hiện lệnh. Đối với người dùng có thời gian khởi động shell dài, giá trị này có thể cần được tăng lên nếu bạn thấy lỗi \"Shell Integration Unavailable\" trong terminal. <0>Tìm hiểu thêm" + "label": "Timeout tích hợp shell terminal", + "description": "Thời gian đợi tích hợp shell VS Code trước khi chạy lệnh. Tăng nếu shell khởi động chậm hoặc thấy lỗi 'Shell Integration Unavailable'. <0>Tìm hiểu thêm" }, "shellIntegrationDisabled": { - "label": "Tắt tích hợp shell terminal", - "description": "Bật tùy chọn này nếu lệnh terminal không hoạt động chính xác hoặc bạn thấy lỗi 'Shell Integration Unavailable'. Tùy chọn này sử dụng phương pháp đơn giản hơn để chạy lệnh, bỏ qua một số tính năng terminal nâng cao. <0>Tìm hiểu thêm" + "label": "Sử dụng Terminal nội tuyến (được khuyến nghị)", + "description": "Chạy lệnh trong Terminal nội tuyến (trò chuyện) để bỏ qua hồ sơ/tích hợp shell để chạy nhanh hơn, đáng tin cậy hơn. Khi bị tắt, Roo sử dụng terminal VS Code với hồ sơ shell, lời nhắc và plugin của bạn. <0>Tìm hiểu thêm" }, "commandDelay": { - "label": "Độ trễ lệnh terminal", - "description": "Độ trễ tính bằng mili giây để thêm vào sau khi thực hiện lệnh. Cài đặt mặc định là 0 sẽ tắt hoàn toàn độ trễ. Điều này có thể giúp đảm bảo đầu ra lệnh được ghi lại đầy đủ trong các terminal có vấn đề về thời gian. Trong hầu hết các terminal, điều này được thực hiện bằng cách đặt `PROMPT_COMMAND='sleep N'` và PowerShell thêm `start-sleep` vào cuối mỗi lệnh. Ban đầu là giải pháp cho lỗi VSCode#237208 và có thể không cần thiết. <0>Tìm hiểu thêm" + "label": "Delay lệnh terminal", + "description": "Thêm khoảng dừng ngắn sau mỗi lệnh để VS Code terminal flush tất cả output (bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep). Chỉ dùng nếu thiếu tail output; nếu không để ở 0. <0>Tìm hiểu thêm" }, "compressProgressBar": { "label": "Nén đầu ra thanh tiến trình", - "description": "Khi được bật, xử lý đầu ra terminal với các ký tự carriage return (\\r) để mô phỏng cách terminal thật hiển thị nội dung. Điều này loại bỏ các trạng thái trung gian của thanh tiến trình, chỉ giữ lại trạng thái cuối cùng, giúp tiết kiệm không gian ngữ cảnh cho thông tin quan trọng hơn. <0>Tìm hiểu thêm" + "description": "Thu gọn các thanh tiến trình/vòng quay để chỉ giữ lại trạng thái cuối cùng (tiết kiệm token). <0>Tìm hiểu thêm" }, "powershellCounter": { - "label": "Bật giải pháp bộ đếm PowerShell", - "description": "Khi được bật, thêm một bộ đếm vào các lệnh PowerShell để đảm bảo thực thi lệnh chính xác. Điều này giúp ích với các terminal PowerShell có thể gặp vấn đề về ghi lại đầu ra. <0>Tìm hiểu thêm" + "label": "Bật workaround bộ đếm PowerShell", + "description": "Bật khi output PowerShell thiếu hoặc trùng lặp; thêm counter nhỏ vào mỗi lệnh để ổn định output. Tắt nếu output đã đúng. <0>Tìm hiểu thêm" }, "zshClearEolMark": { - "label": "Xóa dấu cuối dòng ZSH", - "description": "Khi được bật, xóa dấu cuối dòng ZSH bằng cách đặt PROMPT_EOL_MARK=''. Điều này ngăn chặn các vấn đề về diễn giải đầu ra lệnh khi kết thúc bằng các ký tự đặc biệt như '%'. <0>Tìm hiểu thêm" + "label": "Xóa dấu EOL của ZSH", + "description": "Bật tính năng này khi bạn thấy các ký tự % lạc lõng ở cuối dòng hoặc quá trình phân tích cú pháp có vẻ sai; nó sẽ bỏ qua dấu cuối dòng (%) của Zsh. <0>Tìm hiểu thêm" }, "zshOhMy": { "label": "Bật tích hợp Oh My Zsh", - "description": "Khi được bật, đặt ITERM_SHELL_INTEGRATION_INSTALLED=Yes để kích hoạt các tính năng tích hợp shell của Oh My Zsh. Việc áp dụng cài đặt này có thể yêu cầu khởi động lại IDE. <0>Tìm hiểu thêm" + "description": "Bật tính năng này khi chủ đề/plugin Oh My Zsh của bạn mong đợi tích hợp shell; nó sẽ đặt ITERM_SHELL_INTEGRATION_INSTALLED=Yes. Tắt tính năng này để tránh đặt biến đó. <0>Tìm hiểu thêm" }, "zshP10k": { "label": "Bật tích hợp Powerlevel10k", - "description": "Khi được bật, đặt POWERLEVEL9K_TERM_SHELL_INTEGRATION=true để kích hoạt các tính năng tích hợp shell của Powerlevel10k. <0>Tìm hiểu thêm" + "description": "Bật tính năng này khi sử dụng tích hợp shell Powerlevel10k. <0>Tìm hiểu thêm" }, "zdotdir": { "label": "Bật xử lý ZDOTDIR", - "description": "Khi được bật, tạo thư mục tạm thời cho ZDOTDIR để xử lý tích hợp shell zsh một cách chính xác. Điều này đảm bảo tích hợp shell VSCode hoạt động chính xác với zsh trong khi vẫn giữ nguyên cấu hình zsh của bạn. <0>Tìm hiểu thêm" + "description": "Bật tính năng này khi tích hợp shell zsh không thành công hoặc xung đột với các tệp dotfile của bạn. <0>Tìm hiểu thêm" }, "inheritEnv": { "label": "Kế thừa biến môi trường", - "description": "Khi được bật, terminal sẽ kế thừa các biến môi trường từ tiến trình cha của VSCode, như các cài đặt tích hợp shell được định nghĩa trong hồ sơ người dùng. Điều này trực tiếp chuyển đổi cài đặt toàn cục của VSCode `terminal.integrated.inheritEnv`. <0>Tìm hiểu thêm" + "description": "Bật tính năng này để kế thừa các biến môi trường từ quy trình mẹ của VS Code. <0>Tìm hiểu thêm" } }, "advancedSettings": { @@ -747,7 +767,7 @@ "advanced": { "diff": { "label": "Bật chỉnh sửa qua diff", - "description": "Khi được bật, Kilo Code sẽ có thể chỉnh sửa tệp nhanh hơn và sẽ tự động từ chối ghi toàn bộ tệp bị cắt ngắn. Hoạt động tốt nhất với mô hình Claude 4 Sonnet mới nhất.", + "description": "Khi được bật, Roo sẽ có thể chỉnh sửa tệp nhanh hơn và sẽ tự động từ chối ghi toàn bộ tệp bị cắt ngắn", "strategy": { "label": "Chiến lược diff", "options": { @@ -776,10 +796,6 @@ "name": "Sử dụng chiến lược diff thống nhất thử nghiệm", "description": "Bật chiến lược diff thống nhất thử nghiệm. Chiến lược này có thể giảm số lần thử lại do lỗi mô hình nhưng có thể gây ra hành vi không mong muốn hoặc chỉnh sửa không chính xác. Chỉ bật nếu bạn hiểu rõ các rủi ro và sẵn sàng xem xét cẩn thận tất cả các thay đổi." }, - "SEARCH_AND_REPLACE": { - "name": "Sử dụng công cụ tìm kiếm và thay thế thử nghiệm", - "description": "Bật công cụ tìm kiếm và thay thế thử nghiệm, cho phép Kilo Code thay thế nhiều phiên bản của một thuật ngữ tìm kiếm trong một yêu cầu." - }, "INSERT_BLOCK": { "name": "Sử dụng công cụ chèn nội dung thử nghiệm", "description": "Bật công cụ chèn nội dung thử nghiệm, cho phép Kilo Code chèn nội dung tại số dòng cụ thể mà không cần tạo diff." @@ -866,8 +882,6 @@ "modelInfo": { "supportsImages": "Hỗ trợ hình ảnh", "noImages": "Không hỗ trợ hình ảnh", - "supportsComputerUse": "Hỗ trợ sử dụng máy tính", - "noComputerUse": "Không hỗ trợ sử dụng máy tính", "supportsPromptCache": "Hỗ trợ bộ nhớ đệm lời nhắc", "noPromptCache": "Không hỗ trợ bộ nhớ đệm lời nhắc", "contextWindow": "Cửa sổ ngữ cảnh:", diff --git a/webview-ui/src/i18n/locales/zh-CN/chat.json b/webview-ui/src/i18n/locales/zh-CN/chat.json index 0568e9bbb3f..bdc9c674bcb 100644 --- a/webview-ui/src/i18n/locales/zh-CN/chat.json +++ b/webview-ui/src/i18n/locales/zh-CN/chat.json @@ -156,6 +156,9 @@ "initializingWarning": "正在初始化检查点...如果耗时过长,你可以在设置中禁用检查点并重新启动任务。", "menu": { "viewDiff": "查看差异", + "more": "更多选项", + "viewDiffFromInit": "查看所有更改", + "viewDiffWithCurrent": "查看自此检查点以来的更改", "restore": "恢复检查点", "restoreFiles": "恢复文件", "restoreFilesDescription": "将项目文件恢复到此检查点状态", @@ -273,6 +276,7 @@ "toggleAriaLabel": "切换自动批准", "disabledAriaLabel": "自动批准已禁用 - 请先选择选项", "triggerLabelOff": "自动批准已关闭", + "triggerLabelOffShort": "关闭", "triggerLabel_zero": "0 个自动批准", "triggerLabel_one": "1 个自动批准", "triggerLabel_other": "{{count}} 个自动批准", @@ -302,6 +306,19 @@ "selectModel": "在设置中从 Roo Code Cloud 提供商选择 roo/code-supernova 开始使用。", "goToSettingsButton": "前往设置" }, + "release": { + "heading": "扩展新功能:", + "openRouterEmbeddings": "支持 OpenRouter 嵌入模型", + "chutesDynamic": "Chutes 现在动态加载最新模型", + "queuedMessagesFix": "修复队列消息丢失问题" + }, + "cloudAgents": { + "heading": "云端新功能:", + "prFixer": "推出 PR Fixer 云端 Agent 以补充 PR 审查员。", + "prFixerDescription": "Roo Code 的 PR Fixer 直接在 GitHub 上为你的 PR 应用高质量更改。通过 PR 评论调用,它会读取整个评论历史以理解上下文、协议和权衡 - 然后实施正确的修复。", + "tryPrFixerButton": "试用 PR Fixer" + }, + "careers": "此外,我们正在招聘!", "socialLinks": "在 XDiscordr/RooCode 上关注我们 🚀" }, "browser": { diff --git a/webview-ui/src/i18n/locales/zh-CN/common.json b/webview-ui/src/i18n/locales/zh-CN/common.json index 5b5c9df1119..c9a298ab25a 100644 --- a/webview-ui/src/i18n/locales/zh-CN/common.json +++ b/webview-ui/src/i18n/locales/zh-CN/common.json @@ -114,5 +114,9 @@ "months_ago": "{{count}}个月前", "year_ago": "1年前", "years_ago": "{{count}}年前" + }, + "errors": { + "wait_checkpoint_long_time": "初始化存档点已等待 {{timeout}} 秒。如果你不需要存档点功能,请在存档点设置中关闭。", + "init_checkpoint_fail_long_time": "存档点初始化已超过 {{timeout}} 秒,因此本任务已禁用存档点。你可以关闭存档点或在存档点设置中延长等待时间。" } } diff --git a/webview-ui/src/i18n/locales/zh-CN/mcp.json b/webview-ui/src/i18n/locales/zh-CN/mcp.json index 336a31afc09..e9b484c1cb8 100644 --- a/webview-ui/src/i18n/locales/zh-CN/mcp.json +++ b/webview-ui/src/i18n/locales/zh-CN/mcp.json @@ -25,12 +25,12 @@ "tabs": { "tools": "工具", "resources": "资源", - "errors": "错误" + "logs": "日志" }, "emptyState": { "noTools": "未找到工具", "noResources": "未找到资源", - "noErrors": "未找到错误" + "noLogs": "暂无日志" }, "networkTimeout": { "label": "网络超时", diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json index ba5137a9d96..47b83559b28 100644 --- a/webview-ui/src/i18n/locales/zh-CN/settings.json +++ b/webview-ui/src/i18n/locales/zh-CN/settings.json @@ -63,6 +63,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "API 密钥", "vercelAiGatewayApiKeyPlaceholder": "输入您的 Vercel AI Gateway API 密钥", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "OpenRouter API 密钥", + "openRouterApiKeyPlaceholder": "输入您的 OpenRouter API 密钥", "mistralProvider": "Mistral", "mistralApiKeyLabel": "API 密钥:", "mistralApiKeyPlaceholder": "输入您的 Mistral API 密钥", @@ -127,7 +130,8 @@ "vercelAiGatewayApiKeyRequired": "需要 Vercel AI Gateway API 密钥", "ollamaBaseUrlRequired": "需要 Ollama 基础 URL", "baseUrlRequired": "需要基础 URL", - "modelDimensionMinValue": "模型维度必须大于 0" + "modelDimensionMinValue": "模型维度必须大于 0", + "openRouterApiKeyRequired": "OpenRouter API 密钥是必需的" }, "advancedConfigLabel": "高级配置", "searchMinScoreLabel": "搜索分数阈值", @@ -306,6 +310,9 @@ "moonshotApiKey": "Moonshot API 密钥", "getMoonshotApiKey": "获取 Moonshot API 密钥", "moonshotBaseUrl": "Moonshot 服务站点", + "minimaxApiKey": "MiniMax API 密钥", + "getMiniMaxApiKey": "获取 MiniMax API 密钥", + "minimaxBaseUrl": "MiniMax 服务站点", "zaiApiKey": "Z AI API 密钥", "getZaiApiKey": "获取 Z AI API 密钥", "zaiEntrypoint": "Z AI 服务站点", @@ -363,7 +370,7 @@ "enablePromptCachingTitle": "开启提示缓存可提升性能并节省成本", "cacheUsageNote": "提示:若未显示缓存使用情况,请切换模型后重新选择", "vscodeLmModel": "VSCode LM 模型", - "vscodeLmWarning": "注意:这是一个非常实验性的集成,提供商支持会有所不同。如果您收到有关不支持模型的错误,则这是提供商方面的问题。", + "vscodeLmWarning": "注意:通过 VS Code Language Model API 访问的模型可能由提供商进行封装或微调,因此其行为可能与直接从常见提供商或路由器使用同一模型时不同。要使用「Language Model」下拉列表中的模型,请先切换到该模型,然后在 Copilot Chat 提示中点击「接受」;否则可能会出现 400「The requested model is not supported」等错误。", "geminiParameters": { "urlContext": { "title": "启用 URL 上下文", @@ -440,7 +447,7 @@ }, "computerUse": { "label": "计算机功能调用", - "description": "此模型是否能够与浏览器交互?(例如 Claude 3.7 Sonnet)。" + "description": "此模型是否能够与浏览器交互?(例如 Claude Sonnet)。" }, "promptCache": { "label": "提示缓存", @@ -482,6 +489,7 @@ }, "reasoningEffort": { "label": "模型推理强度", + "none": "无", "minimal": "最小 (最快)", "high": "高", "medium": "中", @@ -554,6 +562,10 @@ } }, "checkpoints": { + "timeout": { + "label": "存档点初始化超时时间(秒)", + "description": "存档点服务初始化最长等待时间。默认 15 秒。范围:10-60 秒。" + }, "enable": { "label": "启用自动存档点", "description": "开启后自动创建任务存档点,方便回溯修改。 <0>了解更多" @@ -682,6 +694,14 @@ "label": "图片总大小上限", "mb": "MB", "description": "单次 read_file 操作中处理的所有图片的最大累计大小限制(MB)。读取多张图片时,每张图片的大小会累加到总大小中。如果包含另一张图片会超过此限制,则会跳过该图片。" + }, + "includeCurrentTime": { + "label": "在上下文中包含当前时间", + "description": "启用后,当前时间和时区信息将包含在系统提示中。如果模型因时间问题停止工作,请禁用此选项。" + }, + "includeCurrentCost": { + "label": "在上下文中包含当前成本", + "description": "启用后,当前 API 使用成本将包含在系统提示中。如果模型因成本问题停止工作,请禁用此选项。" } }, "terminal": { @@ -691,55 +711,55 @@ }, "advanced": { "label": "终端设置:高级", - "description": "以下选项可能需要重启终端才能应用设置" + "description": "这些设置仅在「使用内联终端」禁用时适用。仅影响 VS Code 终端,可能需要重启 IDE。" }, "outputLineLimit": { "label": "终端输出限制", - "description": "执行命令时在终端输出中包含的最大行数。超过时将从中间删除行,节省 token。 <0>了解更多" + "description": "保留首尾行并丢弃中间行以保持在限制内。降低可节省 token;提高可为 Roo 提供更多中间细节。Roo 看到内容被跳过的占位符。<0>了解更多" }, "outputCharacterLimit": { "label": "终端字符限制", - "description": "执行命令时在终端输出中包含的最大字符数。此限制优先于行数限制,以防止因行过长而导致的内存问题。超出后,输出将被截断。 <0>了解更多" + "description": "通过强制限制输出大小来覆盖行限制以防止内存问题。如果超出,保留开头和结尾并向 Roo 显示内容被跳过的占位符。<0>了解更多" }, "shellIntegrationTimeout": { - "label": "终端初始化等待时间", - "description": "执行命令前等待 Shell 集成初始化的最长时间。对于 Shell 启动时间较长的用户,如果在终端中看到\"Shell Integration Unavailable\"错误,可能需要增加此值。 <0>了解更多" + "label": "终端 shell 集成超时", + "description": "运行命令前等待 VS Code shell 集成的时间。如果 shell 启动缓慢或看到 'Shell Integration Unavailable' 错误,请提高此值。<0>了解更多" }, "shellIntegrationDisabled": { - "label": "禁用终端 Shell 集成", - "description": "如果终端命令无法正常工作或看到 'Shell Integration Unavailable' 错误,请启用此项。这将使用更简单的方法运行命令,绕过一些高级终端功能。 <0>了解更多" + "label": "使用内联终端(推荐)", + "description": "在内联终端(聊天)中运行命令以绕过 shell 配置文件/集成,实现更快、更可靠的运行。禁用时,Roo 使用 VS Code 终端及您的 shell 配置文件、提示和插件。<0>了解更多" }, "commandDelay": { "label": "终端命令延迟", - "description": "命令执行后添加的延迟时间(毫秒)。默认设置为 0 时完全禁用延迟。这可以帮助确保在有计时问题的终端中完全捕获命令输出。在大多数终端中,这是通过设置 `PROMPT_COMMAND='sleep N'` 实现的,而 PowerShell 会在每个命令末尾添加 `start-sleep`。最初是为了解决 VSCode 错误#237208,现在可能不再需要。 <0>了解更多" + "description": "在每个命令后添加短暂暂停,以便 VS Code 终端刷新所有输出(bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep)。仅在看到缺少尾部输出时使用;否则保持为 0。<0>了解更多" }, "compressProgressBar": { "label": "压缩进度条输出", - "description": "启用后,将处理包含回车符 (\\r) 的终端输出,模拟真实终端显示内容的方式。这会移除进度条的中间状态,只保留最终状态,为更重要的信息节省上下文空间。 <0>了解更多" + "description": "折叠进度条/旋转器,仅保留最终状态(节省 token)。<0>了解更多" }, "powershellCounter": { "label": "启用 PowerShell 计数器解决方案", - "description": "启用后,会在 PowerShell 命令中添加计数器以确保命令正确执行。这有助于解决可能存在输出捕获问题的 PowerShell 终端。 <0>了解更多" + "description": "当 PowerShell 输出丢失或重复时启用此选项;它会为每个命令附加一个小计数器以稳定输出。如果输出已正常,请保持关闭。<0>了解更多" }, "zshClearEolMark": { - "label": "清除 ZSH 行尾标记", - "description": "启用后,通过设置 PROMPT_EOL_MARK='' 清除 ZSH 行尾标记。这可以防止命令输出以特殊字符(如 '%')结尾时的解析问题。 <0>了解更多" + "label": "清除 ZSH EOL 标记", + "description": "当您在行尾看到零散的 % 或解析看起来错误时启用此选项;它会省略 Zsh 的行尾标记(%)。<0>了解更多" }, "zshOhMy": { "label": "启用 Oh My Zsh 集成", - "description": "启用后,设置 ITERM_SHELL_INTEGRATION_INSTALLED=Yes 以启用 Oh My Zsh shell 集成功能。应用此设置可能需要重启 IDE。 <0>了解更多" + "description": "当您的 Oh My Zsh 主题/插件期望 shell 集成时启用此选项;它会设置 ITERM_SHELL_INTEGRATION_INSTALLED=Yes。关闭此选项以避免设置该变量。<0>了解更多" }, "zshP10k": { "label": "启用 Powerlevel10k 集成", - "description": "启用后,设置 POWERLEVEL9K_TERM_SHELL_INTEGRATION=true 以启用 Powerlevel10k shell 集成功能。 <0>了解更多" + "description": "使用 Powerlevel10k shell 集成时启用此选项。<0>了解更多" }, "zdotdir": { "label": "启用 ZDOTDIR 处理", - "description": "启用后将创建临时目录用于 ZDOTDIR,以正确处理 zsh shell 集成。这确保 VSCode shell 集成能与 zsh 正常工作,同时保留您的 zsh 配置。 <0>了解更多" + "description": "当 zsh shell 集成失败或与您的 dotfiles 冲突时启用此选项。<0>了解更多" }, "inheritEnv": { "label": "继承环境变量", - "description": "启用后,终端将从 VSCode 父进程继承环境变量,如用户配置文件中定义的 shell 集成设置。这直接切换 VSCode 全局设置 `terminal.integrated.inheritEnv`。 <0>了解更多" + "description": "启用此选项以从父 VS Code 进程继承环境变量。<0>了解更多" } }, "advancedSettings": { @@ -748,7 +768,7 @@ "advanced": { "diff": { "label": "启用diff更新", - "description": "启用后,Kilo Code 将能够通过差异算法写入,避免模型输出完整文件,以降低Token消耗。与最新的 Claude 4 Sonnet 模型配合最佳。", + "description": "启用后,Kilo Code 将能够通过差异算法写入,避免模型输出完整文件,以降低Token消耗", "strategy": { "label": "Diff 策略", "options": { @@ -777,10 +797,6 @@ "name": "启用diff更新工具", "description": "可减少因模型错误导致的重复尝试,但可能引发意外操作。启用前请确保理解风险并会仔细检查所有修改。" }, - "SEARCH_AND_REPLACE": { - "name": "启用搜索和替换工具", - "description": "启用实验性搜索和替换工具,允许 Kilo Code 在一个请求中替换搜索词的多个实例。" - }, "INSERT_BLOCK": { "name": "启用插入内容工具", "description": "允许 Kilo Code 在特定行号插入内容,无需处理差异。" @@ -871,8 +887,6 @@ "modelInfo": { "supportsImages": "支持图像", "noImages": "不支持图像", - "supportsComputerUse": "支持计算机功能调用", - "noComputerUse": "不支持计算机功能调用", "supportsPromptCache": "支持提示缓存", "noPromptCache": "不支持提示缓存", "contextWindow": "上下文窗口:", diff --git a/webview-ui/src/i18n/locales/zh-TW/chat.json b/webview-ui/src/i18n/locales/zh-TW/chat.json index 7f4790822fe..cd12f4e8e11 100644 --- a/webview-ui/src/i18n/locales/zh-TW/chat.json +++ b/webview-ui/src/i18n/locales/zh-TW/chat.json @@ -162,6 +162,9 @@ "initializingWarning": "正在初始化檢查點... 如果耗時過長,您可以在設定中停用檢查點並重新啟動工作。", "menu": { "viewDiff": "檢視差異", + "more": "更多選項", + "viewDiffFromInit": "檢視所有變更", + "viewDiffWithCurrent": "檢視自此檢查點以來的變更", "restore": "還原檢查點", "restoreFiles": "還原檔案", "restoreFilesDescription": "將您的專案檔案還原到此時的快照。", @@ -297,6 +300,7 @@ "toggleAriaLabel": "切換自動批准", "disabledAriaLabel": "自動批准已禁用 - 請先選擇選項", "triggerLabelOff": "自動批准已關閉", + "triggerLabelOffShort": "關閉", "triggerLabel_zero": "0 個自動核准", "triggerLabel_one": "1 個自動核准", "triggerLabel_other": "{{count}} 個自動核准", @@ -311,6 +315,19 @@ "selectModel": "在設定中從 Roo Code Cloud 提供商選擇 roo/code-supernova 開始使用。", "goToSettingsButton": "前往設定" }, + "release": { + "heading": "擴充功能的新功能:", + "openRouterEmbeddings": "支援 OpenRouter 嵌入模型", + "chutesDynamic": "Chutes 現在動態載入最新模型", + "queuedMessagesFix": "修復佇列訊息遺失問題" + }, + "cloudAgents": { + "heading": "雲端的新功能:", + "prFixer": "推出 PR Fixer 雲端代理以補充 PR Reviewer。", + "prFixerDescription": "Roo Code 的 PR Fixer 直接從 GitHub 為您的 PR 套用高品質變更。透過 PR 留言呼叫,它會讀取整個留言歷史以理解內容、協議和權衡 - 然後實作正確的修復。", + "tryPrFixerButton": "試用 PR Fixer" + }, + "careers": "此外,我們正在招募!", "socialLinks": "在 XDiscordr/RooCode 上關注我們 🚀" }, "reasoning": { diff --git a/webview-ui/src/i18n/locales/zh-TW/common.json b/webview-ui/src/i18n/locales/zh-TW/common.json index ad13b1393bb..d6b934b0cdd 100644 --- a/webview-ui/src/i18n/locales/zh-TW/common.json +++ b/webview-ui/src/i18n/locales/zh-TW/common.json @@ -114,5 +114,9 @@ "months_ago": "{{count}} 個月前", "year_ago": "1 年前", "years_ago": "{{count}} 年前" + }, + "errors": { + "wait_checkpoint_long_time": "初始化存檔點已等待 {{timeout}} 秒。如果你不需要存檔點功能,請在存檔點設定中關閉。", + "init_checkpoint_fail_long_time": "存檔點初始化已超過 {{timeout}} 秒,因此此工作已停用存檔點。你可以關閉存檔點或在存檔點設定中延長等待時間。" } } diff --git a/webview-ui/src/i18n/locales/zh-TW/mcp.json b/webview-ui/src/i18n/locales/zh-TW/mcp.json index 3f8ab656bd7..14690df3c80 100644 --- a/webview-ui/src/i18n/locales/zh-TW/mcp.json +++ b/webview-ui/src/i18n/locales/zh-TW/mcp.json @@ -26,12 +26,12 @@ "tabs": { "tools": "工具", "resources": "資源", - "errors": "錯誤" + "logs": "日誌" }, "emptyState": { "noTools": "找不到工具", "noResources": "找不到資源", - "noErrors": "找不到錯誤" + "noLogs": "暫無日誌" }, "networkTimeout": { "label": "網路逾時", diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json index a017e872615..133a89bfcae 100644 --- a/webview-ui/src/i18n/locales/zh-TW/settings.json +++ b/webview-ui/src/i18n/locales/zh-TW/settings.json @@ -58,6 +58,9 @@ "vercelAiGatewayProvider": "Vercel AI Gateway", "vercelAiGatewayApiKeyLabel": "API 金鑰", "vercelAiGatewayApiKeyPlaceholder": "輸入您的 Vercel AI Gateway API 金鑰", + "openRouterProvider": "OpenRouter", + "openRouterApiKeyLabel": "OpenRouter API 金鑰", + "openRouterApiKeyPlaceholder": "輸入您的 OpenRouter API 金鑰", "mistralProvider": "Mistral", "mistralApiKeyLabel": "API 金鑰:", "mistralApiKeyPlaceholder": "輸入您的 Mistral API 金鑰", @@ -127,7 +130,8 @@ "vercelAiGatewayApiKeyRequired": "需要 Vercel AI Gateway API 金鑰", "ollamaBaseUrlRequired": "需要 Ollama 基礎 URL", "baseUrlRequired": "需要基礎 URL", - "modelDimensionMinValue": "模型維度必須大於 0" + "modelDimensionMinValue": "模型維度必須大於 0", + "openRouterApiKeyRequired": "OpenRouter API 密鑰是必需的" }, "advancedConfigLabel": "進階設定", "searchMinScoreLabel": "搜尋分數閾值", @@ -310,6 +314,9 @@ "getZaiApiKey": "取得 Z AI API 金鑰", "zaiEntrypoint": "Z AI 服務端點", "zaiEntrypointDescription": "請根據您的位置選擇適當的 API 服務端點。如果您在中國,請選擇 open.bigmodel.cn。否則,請選擇 api.z.ai。", + "minimaxApiKey": "MiniMax API 金鑰", + "getMiniMaxApiKey": "取得 MiniMax API 金鑰", + "minimaxBaseUrl": "MiniMax 服務端點", "geminiApiKey": "Gemini API 金鑰", "getGroqApiKey": "取得 Groq API 金鑰", "groqApiKey": "Groq API 金鑰", @@ -363,7 +370,7 @@ "enablePromptCachingTitle": "啟用提示快取以提升支援的模型效能並降低成本。", "cacheUsageNote": "注意:如果您沒有看到快取使用情況,請嘗試選擇其他模型,然後重新選擇您想要的模型。", "vscodeLmModel": "語言模型", - "vscodeLmWarning": "注意:此整合功能仍處於實驗階段,各供應商的支援程度可能不同。如果出現模型不支援的錯誤,通常是供應商方面的問題。", + "vscodeLmWarning": "注意:透過 VS Code Language Model API 存取的模型可能由供應商封裝或微調,因此其行為可能與直接從一般供應商或路由器使用相同模型時不同。要使用「Language Model」下拉式選單中的模型,請先切換到該模型,然後在 Copilot Chat 提示中點擊「接受」;否則可能會出現 400「The requested model is not supported」等錯誤。", "geminiParameters": { "urlContext": { "title": "啟用 URL 上下文", @@ -440,7 +447,7 @@ }, "computerUse": { "label": "電腦使用", - "description": "此模型是否能夠與瀏覽器互動?(例如 Claude 3.7 Sonnet)" + "description": "此模型是否能夠與瀏覽器互動?(例如 Claude Sonnet)" }, "promptCache": { "label": "提示快取", @@ -482,6 +489,7 @@ }, "reasoningEffort": { "label": "模型推理強度", + "none": "無", "minimal": "最小 (最快)", "high": "高", "medium": "中", @@ -554,6 +562,10 @@ } }, "checkpoints": { + "timeout": { + "label": "檢查點初始化逾時(秒)", + "description": "檢查點服務初始化的最長等待時間。預設為 15 秒。範圍:10-60 秒。" + }, "enable": { "label": "啟用自動檢查點", "description": "啟用後,Kilo Code 將在工作執行期間自動建立檢查點,使審核變更或回到早期狀態變得容易。 <0>了解更多" @@ -682,6 +694,14 @@ "label": "圖片總大小上限", "mb": "MB", "description": "單次 read_file 操作中處理的所有圖片的最大累計大小限制(MB)。讀取多張圖片時,每張圖片的大小會累加到總大小中。如果包含另一張圖片會超過此限制,則會跳過該圖片。" + }, + "includeCurrentTime": { + "label": "在上下文中包含目前時間", + "description": "啟用後,目前時間和時區資訊將包含在系統提示中。如果模型因時間問題停止工作,請停用此選項。" + }, + "includeCurrentCost": { + "label": "在上下文中包含目前成本", + "description": "啟用後,目前 API 使用成本將包含在系統提示中。如果模型因成本問題停止工作,請停用此選項。" } }, "terminal": { @@ -691,55 +711,55 @@ }, "advanced": { "label": "終端機設定:進階", - "description": "以下選項可能需要重新啟動終端機才能套用設定" + "description": "這些設定僅在「使用內嵌終端機」停用時適用。僅影響 VS Code 終端機,可能需要重啟 IDE。" }, "outputLineLimit": { - "label": "終端機輸出行數限制", - "description": "執行命令時終端機輸出的最大行數。超過此限制時,會從中間移除多餘的行數,以節省 token 用量。 <0>了解更多" + "label": "終端機輸出限制", + "description": "保留首尾行並丟棄中間行以保持在限制內。降低可節省 token;提高可為 Roo 提供更多中間細節。Roo 看到內容被跳過的佔位符。<0>了解更多" }, "outputCharacterLimit": { "label": "終端機字元限制", - "description": "執行指令時在終端輸出中包含的最大字元數。此限制優先於行數限制,以防止因行過長而導致的記憶體問題。超過後,輸出將被截斷。 <0>了解更多" + "description": "透過強制限制輸出大小來覆寫行限制以防止記憶體問題。如果超出,保留開頭和結尾並向 Roo 顯示內容被跳過的佔位符。<0>了解更多" }, "shellIntegrationTimeout": { - "label": "終端機 Shell 整合逾時", - "description": "執行命令前等待 Shell 整合初始化的最長時間。如果您的 Shell 啟動較慢,且終端機出現「Shell 整合無法使用」的錯誤訊息,可能需要提高此數值。 <0>了解更多" + "label": "終端機 shell 整合逾時", + "description": "執行命令前等待 VS Code shell 整合的時間。如果 shell 啟動緩慢或看到 'Shell Integration Unavailable' 錯誤,請提高此值。<0>了解更多" }, "shellIntegrationDisabled": { - "label": "停用終端機 Shell 整合", - "description": "如果終端機指令無法正常運作或看到 'Shell Integration Unavailable' 錯誤,請啟用此項。這會使用較簡單的方法執行指令,繞過一些進階終端機功能。 <0>了解更多" + "label": "使用內嵌終端機(建議)", + "description": "在內嵌終端機(聊天)中執行命令以繞過 shell 設定檔/整合,實現更快、更可靠的執行。停用時,Roo 使用 VS Code 終端機及您的 shell 設定檔、提示和外掛程式。<0>了解更多" }, "commandDelay": { "label": "終端機命令延遲", - "description": "命令執行後添加的延遲時間(毫秒)。預設值為 0 時完全停用延遲。這可以幫助確保在有計時問題的終端機中完整擷取命令輸出。在大多數終端機中,這是透過設定 `PROMPT_COMMAND='sleep N'` 實現的,而 PowerShell 會在每個命令結尾加入 `start-sleep`。最初是為了解決 VSCode 錯誤#237208,現在可能不再需要。 <0>了解更多" + "description": "在每個命令後新增短暫暫停,以便 VS Code 終端機刷新所有輸出(bash/zsh: PROMPT_COMMAND sleep; PowerShell: start-sleep)。僅在看到缺少尾部輸出時使用;否則保持為 0。<0>了解更多" }, "compressProgressBar": { "label": "壓縮進度條輸出", - "description": "啟用後,將處理包含歸位字元 (\\r) 的終端機輸出,模擬真實終端機顯示內容的方式。這會移除進度條的中間狀態,只保留最終狀態,為更重要的資訊節省上下文空間。 <0>了解更多" + "description": "折疊進度條/旋轉器,僅保留最終狀態(節省 token)。<0>了解更多" }, "powershellCounter": { "label": "啟用 PowerShell 計數器解決方案", - "description": "啟用後,會在 PowerShell 命令中加入計數器以確保命令正確執行。這有助於解決可能存在輸出擷取問題的 PowerShell 終端機。 <0>了解更多" + "description": "當 PowerShell 輸出遺失或重複時啟用此選項;它會為每個命令附加一個小計數器以穩定輸出。如果輸出已正常,請保持關閉。<0>了解更多" }, "zshClearEolMark": { - "label": "清除 ZSH 行尾標記", - "description": "啟用後,透過設定 PROMPT_EOL_MARK='' 清除 ZSH 行尾標記。這可以防止命令輸出以特殊字元(如 '%')結尾時的解析問題。 <0>了解更多" + "label": "清除 ZSH EOL 標記", + "description": "當您在行尾看到零散的 % 或解析看起來錯誤時啟用此選項;它會省略 Zsh 的行尾標記(%)。<0>了解更多" }, "zshOhMy": { "label": "啟用 Oh My Zsh 整合", - "description": "啟用後,設定 ITERM_SHELL_INTEGRATION_INSTALLED=Yes 以啟用 Oh My Zsh shell 整合功能。套用此設定可能需要重新啟動 IDE。 <0>了解更多" + "description": "當您的 Oh My Zsh 主題/外掛程式期望 shell 整合時啟用此選項;它會設定 ITERM_SHELL_INTEGRATION_INSTALLED=Yes。關閉此選項以避免設定該變數。<0>了解更多" }, "zshP10k": { "label": "啟用 Powerlevel10k 整合", - "description": "啟用後,設定 POWERLEVEL9K_TERM_SHELL_INTEGRATION=true 以啟用 Powerlevel10k shell 整合功能。 <0>了解更多" + "description": "使用 Powerlevel10k shell 整合時啟用此選項。<0>了解更多" }, "zdotdir": { "label": "啟用 ZDOTDIR 處理", - "description": "啟用後將建立暫存目錄用於 ZDOTDIR,以正確處理 zsh shell 整合。這確保 VSCode shell 整合能與 zsh 正常運作,同時保留您的 zsh 設定。 <0>了解更多" + "description": "當 zsh shell 整合失敗或與您的 dotfiles 衝突時啟用此選項。<0>了解更多" }, "inheritEnv": { "label": "繼承環境變數", - "description": "啟用後,終端機將從 VSCode 父程序繼承環境變數,如使用者設定檔中定義的 shell 整合設定。這直接切換 VSCode 全域設定 `terminal.integrated.inheritEnv`。 <0>了解更多" + "description": "啟用此選項以從父 VS Code 程序繼承環境變數。<0>了解更多" } }, "advancedSettings": { @@ -748,7 +768,7 @@ "advanced": { "diff": { "label": "透過差異比對編輯", - "description": "啟用後,Kilo Code 可更快速地編輯檔案,並自動拒絕不完整的整檔覆寫。搭配最新的 Claude 4 Sonnet 模型效果最佳。", + "description": "啟用後,Kilo Code 可更快速地編輯檔案,並自動拒絕不完整的整檔覆寫", "strategy": { "label": "差異比對策略", "options": { @@ -777,10 +797,6 @@ "name": "使用實驗性統一差異比對策略", "description": "啟用實驗性的統一差異比對策略。此策略可能減少因模型錯誤而導致的重試次數,但也可能導致意外行為或錯誤的編輯。請務必了解風險,並願意仔細檢查所有變更後再啟用。" }, - "SEARCH_AND_REPLACE": { - "name": "使用實驗性搜尋與取代工具", - "description": "啟用實驗性的搜尋與取代工具,允許 Kilo Code 在單一請求中取代多個符合的內容。" - }, "INSERT_BLOCK": { "name": "使用實驗性插入內容工具", "description": "啟用實驗性的插入內容工具,允許 Kilo Code 直接在指定行號插入內容,而無需產生差異比對。" @@ -867,8 +883,6 @@ "modelInfo": { "supportsImages": "支援影像", "noImages": "不支援影像", - "supportsComputerUse": "支援電腦使用", - "noComputerUse": "不支援電腦使用", "supportsPromptCache": "支援提示快取", "noPromptCache": "不支援提示快取", "contextWindow": "上下文視窗:", diff --git a/webview-ui/src/utils/__tests__/validate.test.ts b/webview-ui/src/utils/__tests__/validate.test.ts index 14c1057eb39..128f132c676 100644 --- a/webview-ui/src/utils/__tests__/validate.test.ts +++ b/webview-ui/src/utils/__tests__/validate.test.ts @@ -24,7 +24,7 @@ describe("Model Validation Functions", () => { outputPrice: 5.0, }, }, - "kilocode-openrouter": { + kilocode: { "valid-model": { maxTokens: 8192, contextWindow: 200000, @@ -63,10 +63,11 @@ describe("Model Validation Functions", () => { huggingface: {}, // kilocode_change start ovhcloud: {}, - chutes: {}, gemini: {}, inception: {}, // kilocode_change end + roo: {}, + chutes: {}, } const allowAllOrganization: OrganizationAllowList = {