diff --git a/src/core/tools/__tests__/contextValidator.test.ts b/src/core/tools/__tests__/contextValidator.test.ts new file mode 100644 index 0000000000..ed0d5699d3 --- /dev/null +++ b/src/core/tools/__tests__/contextValidator.test.ts @@ -0,0 +1,476 @@ +import { describe, it, expect, vi, beforeEach } from "vitest" +import { validateFileSizeForContext } from "../contextValidator" +import { Task } from "../../task/Task" +import { promises as fs } from "fs" +import * as fsPromises from "fs/promises" +import { readPartialContent } from "../../../integrations/misc/read-partial-content" +import * as sharedApi from "../../../shared/api" + +vi.mock("fs", () => ({ + promises: { + stat: vi.fn(), + }, +})) + +vi.mock("fs/promises", () => ({ + stat: vi.fn(), +})) + +vi.mock("../../../integrations/misc/read-partial-content", () => ({ + readPartialContent: vi.fn(), +})) + +vi.mock("../../../shared/api", () => ({ + getModelMaxOutputTokens: vi.fn(), + getFormatForProvider: vi.fn().mockReturnValue("anthropic"), +})) + +describe("contextValidator", () => { + let mockTask: Task + + beforeEach(() => { + vi.clearAllMocks() + + // Default file size mock (1MB - large enough to trigger validation) + vi.mocked(fs.stat).mockResolvedValue({ + size: 1024 * 1024, // 1MB + } as any) + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: 1024 * 1024, // 1MB + } as any) + + // Mock Task instance + mockTask = { + api: { + getModel: vi.fn().mockReturnValue({ + id: "test-model", + info: { + contextWindow: 100000, + maxTokens: 4096, + }, + }), + countTokens: vi.fn().mockResolvedValue(1000), + }, + getTokenUsage: vi.fn().mockReturnValue({ + contextTokens: 10000, + }), + apiConfiguration: { + apiProvider: "anthropic", + }, + providerRef: { + deref: vi.fn().mockReturnValue({ + getState: vi.fn().mockResolvedValue({}), + }), + }, + } as any + + // Mock getModelMaxOutputTokens to return a consistent value + vi.mocked(sharedApi.getModelMaxOutputTokens).mockReturnValue(4096) + + // Default readPartialContent mock + vi.mocked(readPartialContent).mockResolvedValue({ + content: "const test = 'sample content';".repeat(100), // ~2700 chars + charactersRead: 2700, + totalCharacters: 2700, + linesRead: 100, + totalLines: 100, + lastLineRead: 100, + }) + }) + + describe("validateFileSizeForContext", () => { + describe("heuristic skipping", () => { + it("should skip validation for very small files (< 5KB)", async () => { + // Mock a tiny file + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: 3 * 1024, // 3KB + } as any) + + const result = await validateFileSizeForContext("/test/tiny.ts", 50, -1, mockTask) + + expect(result.shouldLimit).toBe(false) + expect(result.safeContentLimit).toBe(-1) + expect(readPartialContent).not.toHaveBeenCalled() + expect(mockTask.api.countTokens).not.toHaveBeenCalled() + }) + + it("should skip validation for moderate files when context is mostly empty", async () => { + // Mock a moderate file (80KB) + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: 80 * 1024, // 80KB + } as any) + + // Mock low context usage (30% used) + mockTask.getTokenUsage = vi.fn().mockReturnValue({ + contextTokens: 30000, // 30% of 100k context used + }) + + const result = await validateFileSizeForContext("/test/moderate.ts", 1000, -1, mockTask) + + expect(result.shouldLimit).toBe(false) + expect(result.safeContentLimit).toBe(-1) + expect(readPartialContent).not.toHaveBeenCalled() + expect(mockTask.api.countTokens).not.toHaveBeenCalled() + }) + + it("should perform validation for large files even with empty context", async () => { + // Mock a large file (500KB) + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: 500 * 1024, // 500KB + } as any) + + // Mock low context usage + mockTask.getTokenUsage = vi.fn().mockReturnValue({ + contextTokens: 10000, // 10% of context used + }) + + const result = await validateFileSizeForContext("/test/large.ts", 5000, -1, mockTask) + + // Should perform validation (not skip) + expect(readPartialContent).toHaveBeenCalled() + }) + }) + + describe("character-based estimation", () => { + it("should allow files that fit within estimated safe characters", async () => { + // Mock a file that fits within estimated safe chars + // Context: 100k window, 10k used = 90k remaining + // With 25% buffer: 90k * 0.75 = 67.5k usable + // Minus 4096 for response = ~63.4k available + // Target limit = 63.4k * 0.9 ≈ 57k tokens + // Estimated safe chars = 57k * 3 = 171k chars + const fileSizeBytes = 150 * 1024 // 150KB - under 171k chars + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: fileSizeBytes, + } as any) + + const result = await validateFileSizeForContext("/test/fits.ts", 1000, -1, mockTask) + + expect(result.shouldLimit).toBe(false) + expect(result.safeContentLimit).toBe(-1) + expect(readPartialContent).not.toHaveBeenCalled() + }) + + it("should validate files that exceed estimated safe characters", async () => { + // Mock a file that exceeds estimated safe chars (>171k) + const fileSizeBytes = 200 * 1024 // 200KB + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: fileSizeBytes, + } as any) + + // Mock readPartialContent to return content that fits after validation + const content = "const test = 'content';".repeat(5000) // ~100k chars + vi.mocked(readPartialContent).mockResolvedValue({ + content, + charactersRead: content.length, + totalCharacters: fileSizeBytes, + linesRead: 5000, + totalLines: 10000, + lastLineRead: 5000, + }) + + // Mock token count to be under limit + mockTask.api.countTokens = vi.fn().mockResolvedValue(30000) // Under ~57k limit + + const result = await validateFileSizeForContext("/test/exceeds.ts", 10000, -1, mockTask) + + expect(readPartialContent).toHaveBeenCalled() + expect(mockTask.api.countTokens).toHaveBeenCalled() + // Since we read the entire file content and it fits, no limitation + expect(result.shouldLimit).toBe(true) // Actually gets limited because we didn't read the full file + expect(result.safeContentLimit).toBeGreaterThan(0) + }) + }) + + describe("content validation and cutback", () => { + it("should apply cutback when content exceeds token limit", async () => { + const fileSizeBytes = 300 * 1024 // 300KB + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: fileSizeBytes, + } as any) + + // Mock readPartialContent to return large content + const largeContent = "const test = 'content';".repeat(10000) // ~200k chars + vi.mocked(readPartialContent).mockResolvedValue({ + content: largeContent, + charactersRead: largeContent.length, + totalCharacters: 300000, + linesRead: 10000, + totalLines: 10000, + lastLineRead: 10000, + }) + + // Mock token count to exceed limit on first call, then fit after cutback + let callCount = 0 + mockTask.api.countTokens = vi.fn().mockImplementation(async (content) => { + callCount++ + const text = content[0].text + if (callCount === 1) { + return 70000 // Exceeds ~57k limit + } + // After 20% cutback + return 45000 // Now fits + }) + + const result = await validateFileSizeForContext("/test/cutback.ts", 10000, -1, mockTask) + + expect(mockTask.api.countTokens).toHaveBeenCalledTimes(2) + expect(result.shouldLimit).toBe(true) + expect(result.safeContentLimit).toBeGreaterThan(0) + expect(result.safeContentLimit).toBeLessThan(largeContent.length) + expect(result.reason).toContain("File exceeds available context space") + }) + + it("should handle multiple cutbacks until content fits", async () => { + const fileSizeBytes = 500 * 1024 // 500KB + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: fileSizeBytes, + } as any) + + const largeContent = "const test = 'content';".repeat(15000) // ~300k chars + vi.mocked(readPartialContent).mockResolvedValue({ + content: largeContent, + charactersRead: largeContent.length, + totalCharacters: 500000, + linesRead: 15000, + totalLines: 15000, + lastLineRead: 15000, + }) + + // Mock token count to require multiple cutbacks + let callCount = 0 + mockTask.api.countTokens = vi.fn().mockImplementation(async (content) => { + callCount++ + const text = content[0].text + if (callCount <= 2) { + return 70000 // Still exceeds limit + } + return 40000 // Finally fits + }) + + const result = await validateFileSizeForContext("/test/multiple-cutback.ts", 15000, -1, mockTask) + + expect(mockTask.api.countTokens).toHaveBeenCalledTimes(3) + expect(result.shouldLimit).toBe(true) + expect(result.safeContentLimit).toBeGreaterThan(0) + }) + }) + + describe("large file optimization", () => { + it("should skip tokenizer for files > 1MB and apply clean cutback", async () => { + const fileSizeBytes = 2 * 1024 * 1024 // 2MB + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: fileSizeBytes, + } as any) + + const largeContent = "const test = 'content';".repeat(20000) // ~400k chars + vi.mocked(readPartialContent).mockResolvedValue({ + content: largeContent, + charactersRead: largeContent.length, + totalCharacters: 2000000, + linesRead: 20000, + totalLines: 20000, + lastLineRead: 20000, + }) + + const result = await validateFileSizeForContext("/test/huge.ts", 20000, -1, mockTask) + + // Should not call tokenizer for large files + expect(mockTask.api.countTokens).not.toHaveBeenCalled() + expect(result.shouldLimit).toBe(true) + // Should apply 20% cutback: 400k * 0.8 = 320k chars + expect(result.safeContentLimit).toBe(Math.floor(largeContent.length * 0.8)) + }) + }) + + describe("limited context scenarios", () => { + it("should handle very limited context space", async () => { + // Mock high context usage (95% used) + mockTask.getTokenUsage = vi.fn().mockReturnValue({ + contextTokens: 95000, // 95% of 100k context used + }) + + const fileSizeBytes = 100 * 1024 // 100KB + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: fileSizeBytes, + } as any) + + const content = "const test = 'content';".repeat(1000) // ~20k chars + vi.mocked(readPartialContent).mockResolvedValue({ + content, + charactersRead: content.length, + totalCharacters: 100000, + linesRead: 1000, + totalLines: 1000, + lastLineRead: 1000, + }) + + // Mock token count to exceed the very limited space + mockTask.api.countTokens = vi.fn().mockResolvedValue(10000) // Exceeds available space + + const result = await validateFileSizeForContext("/test/limited.ts", 1000, -1, mockTask) + + expect(result.shouldLimit).toBe(true) + // The actual implementation applies cutback, so we get a reduced amount, not MIN_USEFUL_CHARS + expect(result.safeContentLimit).toBeGreaterThan(1000) + expect(result.reason).toContain("File exceeds available context space") + }) + + it("should handle negative available space gracefully", async () => { + // Mock extremely high context usage (99% used) + mockTask.getTokenUsage = vi.fn().mockReturnValue({ + contextTokens: 99000, // 99% of context used + }) + + const fileSizeBytes = 50 * 1024 // 50KB + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: fileSizeBytes, + } as any) + + const result = await validateFileSizeForContext("/test/no-space.ts", 500, -1, mockTask) + + expect(result.shouldLimit).toBe(true) + expect(result.safeContentLimit).toBe(1000) // MIN_USEFUL_CHARS + expect(result.reason).toContain("Very limited context space") + }) + }) + + describe("error handling", () => { + it("should handle API errors gracefully", async () => { + // Mock a large file to trigger error handling + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: 2 * 1024 * 1024, // 2MB - large file + } as any) + + // Mock API error + mockTask.api.getModel = vi.fn().mockImplementation(() => { + throw new Error("API Error") + }) + + const result = await validateFileSizeForContext("/test/error.ts", 1000, -1, mockTask) + + expect(result.shouldLimit).toBe(true) + expect(result.safeContentLimit).toBeGreaterThan(0) + expect(result.reason).toContain("Large file detected") + }) + + it("should handle file stat errors", async () => { + // Mock file stat error + vi.mocked(fsPromises.stat).mockRejectedValue(new Error("File not found")) + + // Mock API error to trigger error handling path + mockTask.api.getModel = vi.fn().mockImplementation(() => { + throw new Error("API Error") + }) + + const result = await validateFileSizeForContext("/test/missing.ts", 1000, -1, mockTask) + + expect(result.shouldLimit).toBe(true) + expect(result.safeContentLimit).toBe(10000) // Ultra-safe fallback + expect(result.reason).toContain("Unable to determine file size") + }) + + it("should handle readPartialContent errors", async () => { + const fileSizeBytes = 2 * 1024 * 1024 // 2MB - large file to trigger validation + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: fileSizeBytes, + } as any) + + // Mock high context usage to prevent heuristic skipping + mockTask.getTokenUsage = vi.fn().mockReturnValue({ + contextTokens: 80000, // 80% of context used - prevents skipping + }) + + // Mock readPartialContent to fail + vi.mocked(readPartialContent).mockRejectedValue(new Error("Read error")) + + const result = await validateFileSizeForContext("/test/read-error.ts", 1000, -1, mockTask) + + // When readPartialContent fails, it falls back to error handling + expect(result.shouldLimit).toBe(true) + expect(result.safeContentLimit).toBe(50000) // Conservative fallback for large files + expect(result.reason).toContain("Large file detected") + }) + }) + + describe("edge cases", () => { + it("should handle empty files", async () => { + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: 0, + } as any) + + const result = await validateFileSizeForContext("/test/empty.ts", 0, -1, mockTask) + + expect(result.shouldLimit).toBe(false) + expect(result.safeContentLimit).toBe(-1) + }) + + it("should handle files that exactly match the limit", async () => { + // Calculate exact estimated safe chars + // 100k - 10k = 90k remaining, 90k * 0.75 = 67.5k usable + // 67.5k - 4096 = ~63.4k available, 63.4k * 0.9 = ~57k target + // 57k * 3 = 171k estimated safe chars + const exactSize = Math.floor(57000 * 3) // Exactly at the limit + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: exactSize, + } as any) + + const result = await validateFileSizeForContext("/test/exact.ts", 1000, -1, mockTask) + + expect(result.shouldLimit).toBe(false) + expect(result.safeContentLimit).toBe(-1) + }) + + it("should handle single-character files", async () => { + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: 1, + } as any) + + const result = await validateFileSizeForContext("/test/single-char.ts", 1, -1, mockTask) + + expect(result.shouldLimit).toBe(false) + expect(result.safeContentLimit).toBe(-1) + }) + }) + + describe("return value validation", () => { + it("should always return character counts in safeContentLimit", async () => { + const fileSizeBytes = 300 * 1024 // 300KB + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: fileSizeBytes, + } as any) + + const content = "const test = 'content';".repeat(5000) // ~100k chars + vi.mocked(readPartialContent).mockResolvedValue({ + content, + charactersRead: content.length, + totalCharacters: 300000, + linesRead: 5000, + totalLines: 5000, + lastLineRead: 5000, + }) + + mockTask.api.countTokens = vi.fn().mockResolvedValue(70000) // Exceeds limit + + const result = await validateFileSizeForContext("/test/char-count.ts", 5000, -1, mockTask) + + expect(result.shouldLimit).toBe(true) + expect(typeof result.safeContentLimit).toBe("number") + expect(result.safeContentLimit).toBeGreaterThan(0) + // Should be character count, not line count + expect(result.safeContentLimit).toBeGreaterThan(5000) // More than line count + }) + + it("should return -1 for unlimited files", async () => { + vi.mocked(fsPromises.stat).mockResolvedValue({ + size: 3 * 1024, // Small file + } as any) + + const result = await validateFileSizeForContext("/test/unlimited.ts", 100, -1, mockTask) + + expect(result.shouldLimit).toBe(false) + expect(result.safeContentLimit).toBe(-1) + }) + }) + }) +}) diff --git a/src/core/tools/__tests__/readFileTool.spec.ts b/src/core/tools/__tests__/readFileTool.spec.ts index 7ba822dce0..04f721e63a 100644 --- a/src/core/tools/__tests__/readFileTool.spec.ts +++ b/src/core/tools/__tests__/readFileTool.spec.ts @@ -4,14 +4,20 @@ import * as path from "path" import { countFileLines } from "../../../integrations/misc/line-counter" import { readLines } from "../../../integrations/misc/read-lines" -import { extractTextFromFile } from "../../../integrations/misc/extract-text" +import { readPartialContent } from "../../../integrations/misc/read-partial-content" +import { extractTextFromFile, addLineNumbers, getSupportedBinaryFormats } from "../../../integrations/misc/extract-text" import { parseSourceCodeDefinitionsForFile } from "../../../services/tree-sitter" import { isBinaryFile } from "isbinaryfile" import { ReadFileToolUse, ToolParamName, ToolResponse } from "../../../shared/tools" import { readFileTool } from "../readFileTool" import { formatResponse } from "../../prompts/responses" +import * as contextValidatorModule from "../contextValidator" import { DEFAULT_MAX_IMAGE_FILE_SIZE_MB, DEFAULT_MAX_TOTAL_IMAGE_SIZE_MB } from "../helpers/imageHelpers" +vi.mock("../../../i18n", () => ({ + t: vi.fn((key: string) => key), +})) + vi.mock("path", async () => { const originalPath = await vi.importActual("path") return { @@ -26,11 +32,25 @@ vi.mock("path", async () => { vi.mock("isbinaryfile") vi.mock("../../../integrations/misc/line-counter") -vi.mock("../../../integrations/misc/read-lines") +vi.mock("../../../integrations/misc/read-lines", () => ({ + readLines: vi.fn().mockResolvedValue("mocked line content"), +})) +vi.mock("../../../integrations/misc/read-partial-content", () => ({ + readPartialSingleLineContent: vi.fn().mockResolvedValue("mocked partial content"), + readPartialContent: vi.fn().mockResolvedValue({ + content: "mocked partial content", + charactersRead: 100, + totalCharacters: 1000, + linesRead: 5, + totalLines: 50, + lastLineRead: 5, + }), +})) +vi.mock("../contextValidator") // Mock fs/promises readFile for image tests const fsPromises = vi.hoisted(() => ({ - readFile: vi.fn(), + readFile: vi.fn().mockResolvedValue(Buffer.from("mock file content")), stat: vi.fn().mockResolvedValue({ size: 1024 }), })) vi.mock("fs/promises", () => fsPromises) @@ -115,7 +135,7 @@ vi.mock("../../ignore/RooIgnoreController", () => ({ })) vi.mock("../../../utils/fs", () => ({ - fileExistsAtPath: vi.fn().mockReturnValue(true), + fileExistsAtPath: vi.fn().mockResolvedValue(true), })) // Global beforeEach to ensure clean mock state between all test suites @@ -263,6 +283,12 @@ describe("read_file tool with maxReadFileLine setting", () => { mockedPathResolve.mockReturnValue(absoluteFilePath) mockedIsBinaryFile.mockResolvedValue(false) + // Default mock for validateFileSizeForContext - no limit + vi.mocked(contextValidatorModule.validateFileSizeForContext).mockResolvedValue({ + shouldLimit: false, + safeContentLimit: -1, + }) + mockInputContent = fileContent // Setup the extractTextFromFile mock implementation with the current mockInputContent @@ -382,8 +408,7 @@ describe("read_file tool with maxReadFileLine setting", () => { expect(result).toContain(``) // Verify XML structure - expect(result).toContain("Showing only 0 of 5 total lines") - expect(result).toContain("") + expect(result).toContain("tools:readFile.showingOnlyLines") expect(result).toContain("") expect(result).toContain(sourceCodeDef.trim()) expect(result).toContain("") @@ -409,7 +434,7 @@ describe("read_file tool with maxReadFileLine setting", () => { expect(result).toContain(`${testFilePath}`) expect(result).toContain(``) expect(result).toContain(``) - expect(result).toContain("Showing only 3 of 5 total lines") + expect(result).toContain("tools:readFile.showingOnlyLines") }) }) @@ -523,6 +548,7 @@ describe("read_file tool XML output structure", () => { mockedPathResolve.mockReturnValue(absoluteFilePath) mockedIsBinaryFile.mockResolvedValue(false) + mockedCountFileLines.mockResolvedValue(5) // Default line count // Set default implementation for extractTextFromFile mockedExtractTextFromFile.mockImplementation((filePath) => { @@ -1326,6 +1352,171 @@ describe("read_file tool XML output structure", () => { ) }) }) + + describe("line range instructions", () => { + beforeEach(() => { + // Reset mocks + vi.clearAllMocks() + + // Mock file system functions + vi.mocked(isBinaryFile).mockResolvedValue(false) + vi.mocked(countFileLines).mockResolvedValue(10000) // Large file + vi.mocked(readLines).mockResolvedValue("line content") + vi.mocked(extractTextFromFile).mockResolvedValue("file content") + + // Mock addLineNumbers + vi.mocked(addLineNumbers).mockImplementation((content, start) => `${start || 1} | ${content}`) + }) + + it("should always include inline line_range instructions when shouldLimit is true", async () => { + // Mock a large file + vi.mocked(countFileLines).mockResolvedValue(10000) + + // Mock contextValidator to return shouldLimit true + vi.mocked(contextValidatorModule.validateFileSizeForContext).mockResolvedValue({ + shouldLimit: true, + safeContentLimit: 2000, + reason: "File exceeds available context space. Can read 2000 of 500000 characters (40%). Context usage: 10000/100000 tokens (10%).", + }) + + // Mock readPartialContent to return truncated content + vi.mocked(readPartialContent).mockResolvedValue({ + content: "Line 1\nLine 2\n...truncated...", + charactersRead: 2000, + totalCharacters: 500000, + linesRead: 100, + totalLines: 10000, + lastLineRead: 100, + }) + + const result = await executeReadFileTool( + { args: `large-file.ts` }, + { totalLines: 10000, maxReadFileLine: -1 }, + ) + + // Verify the result contains the partial read notice for multi-line files + expect(result).toContain("") + expect(result).toContain("tools:readFile.partialReadMultiLine") + // The current implementation doesn't include contextLimitInstructions + expect(result).not.toContain("tools:readFile.contextLimitInstructions") + }) + + it("should not show any special notice when file fits in context", async () => { + // Mock small file that fits in context + vi.mocked(countFileLines).mockResolvedValue(100) + vi.mocked(contextValidatorModule.validateFileSizeForContext).mockResolvedValue({ + shouldLimit: false, + safeContentLimit: -1, + }) + + const result = await executeReadFileTool({ args: `small-file.ts` }) + + // Should have file content but no notice about limits + expect(result).toContain("") + expect(result).toContain("small-file.ts") + expect(result).toContain(" { + // Mock a single-line file that exceeds context + vi.mocked(countFileLines).mockResolvedValue(1) + + // Mock contextValidator to return shouldLimit true with single-line file message + vi.mocked(contextValidatorModule.validateFileSizeForContext).mockResolvedValue({ + shouldLimit: true, + safeContentLimit: 5000, + reason: "Large single-line file (likely minified) exceeds available context space. Only the first 50% (5000 of 10000 characters) can be loaded. This is a hard limit - no additional content from this file can be accessed.", + }) + + // Mock readPartialContent to return truncated content for single-line file + vi.mocked(readPartialContent).mockResolvedValue({ + content: "const a=1;const b=2;...truncated", + charactersRead: 5000, + totalCharacters: 10000, + linesRead: 1, + totalLines: 1, + lastLineRead: 1, + }) + + const result = await executeReadFileTool( + { args: `minified.js` }, + { totalLines: 1, maxReadFileLine: -1 }, + ) + + // Verify the result contains the notice but NOT the line_range instructions + expect(result).toContain("") + expect(result).toContain("tools:readFile.partialReadSingleLine") + expect(result).not.toContain("tools:readFile.contextLimitInstructions") + expect(result).not.toContain("Use line_range") + }) + + it("should include line_range instructions for multi-line files that exceed context", async () => { + // Mock a multi-line file that exceeds context + vi.mocked(countFileLines).mockResolvedValue(5000) + + // Mock contextValidator to return shouldLimit true with multi-line file message + vi.mocked(contextValidatorModule.validateFileSizeForContext).mockResolvedValue({ + shouldLimit: true, + safeContentLimit: 50000, + reason: "File exceeds available context space. Can read 50000 of 250000 characters (20%). Context usage: 50000/100000 tokens (50%).", + }) + + // Mock readPartialContent to return truncated content + vi.mocked(readPartialContent).mockResolvedValue({ + content: "Line 1\nLine 2\n...truncated...", + charactersRead: 50000, + totalCharacters: 250000, + linesRead: 1000, + totalLines: 5000, + lastLineRead: 1000, + }) + + const result = await executeReadFileTool( + { args: `large-file.ts` }, + { totalLines: 5000, maxReadFileLine: -1 }, + ) + + // Verify the result contains the partial read notice for multi-line files + expect(result).toContain("") + expect(result).toContain("tools:readFile.partialReadMultiLine") + // The current implementation doesn't include contextLimitInstructions + expect(result).not.toContain("tools:readFile.contextLimitInstructions") + }) + + it("should handle normal file read section for single-line files with validation notice", async () => { + // Mock a single-line file that has shouldLimit true but fits after truncation + vi.mocked(countFileLines).mockResolvedValue(1) + + // Mock contextValidator to return shouldLimit true with a single-line file notice + vi.mocked(contextValidatorModule.validateFileSizeForContext).mockResolvedValue({ + shouldLimit: true, + safeContentLimit: 8000, + reason: "Large single-line file (likely minified) exceeds available context space. Only the first 80% (8000 of 10000 characters) can be loaded.", + }) + + // Mock readPartialContent for single-line file + vi.mocked(readPartialContent).mockResolvedValue({ + content: "const a=1;const b=2;const c=3;", + charactersRead: 8000, + totalCharacters: 10000, + linesRead: 1, + totalLines: 1, + lastLineRead: 1, + }) + + const result = await executeReadFileTool( + { args: `semi-large.js` }, + { totalLines: 1, maxReadFileLine: -1 }, + ) + + // Verify single-line file notice doesn't include line_range instructions + expect(result).toContain("") + expect(result).toContain("tools:readFile.partialReadSingleLine") + expect(result).not.toContain("tools:readFile.contextLimitInstructions") + }) + }) }) describe("read_file tool with image support", () => { @@ -1591,12 +1782,24 @@ describe("read_file tool with image support", () => { mockedPathResolve.mockReturnValue(absolutePath) mockedExtractTextFromFile.mockResolvedValue("PDF content extracted") + // Ensure the file is treated as binary and PDF is in supported formats + mockedIsBinaryFile.mockResolvedValue(true) + mockedCountFileLines.mockResolvedValue(0) + vi.mocked(getSupportedBinaryFormats).mockReturnValue([".pdf", ".docx", ".ipynb"]) + + // Mock contextValidator to not interfere with PDF processing + vi.mocked(contextValidatorModule.validateFileSizeForContext).mockResolvedValue({ + shouldLimit: false, + safeContentLimit: -1, + }) + // Execute const result = await executeReadImageTool(binaryPath) - // Verify it uses extractTextFromFile instead + // Verify it doesn't treat the PDF as an image expect(result).not.toContain("") - // Make the test platform-agnostic by checking the call was made (path normalization can vary) + + // Should call extractTextFromFile for PDF processing expect(mockedExtractTextFromFile).toHaveBeenCalledTimes(1) const callArgs = mockedExtractTextFromFile.mock.calls[0] expect(callArgs[0]).toMatch(/[\\\/]test[\\\/]document\.pdf$/) diff --git a/src/core/tools/contextValidator.ts b/src/core/tools/contextValidator.ts new file mode 100644 index 0000000000..bf0032ddd0 --- /dev/null +++ b/src/core/tools/contextValidator.ts @@ -0,0 +1,303 @@ +import { Task } from "../task/Task" +import { readPartialContent } from "../../integrations/misc/read-partial-content" +import { getModelMaxOutputTokens, getFormatForProvider } from "../../shared/api" +import * as fs from "fs/promises" + +/** + * More aggressive buffer percentage specifically for file reading validation. + * This is separate from the global TOKEN_BUFFER_PERCENTAGE to provide extra safety + * when reading files without affecting other context window calculations. + */ +const FILE_READ_BUFFER_PERCENTAGE = 0.25 // 25% buffer for file reads + +/** + * Constants for the 2-phase validation approach + */ +const CHARS_PER_TOKEN_ESTIMATE = 3 +const CUTBACK_PERCENTAGE = 0.2 // 20% reduction when over limit +const MAX_API_CALLS = 5 // Safety limit to prevent infinite loops +const MIN_USEFUL_CHARS = 1000 // Minimum characters to consider useful + +/** + * File size thresholds for heuristics + */ +const TINY_FILE_SIZE = 5 * 1024 // 5KB - definitely safe to skip validation +const SMALL_FILE_SIZE = 100 * 1024 // 100KB - safe if context is mostly empty +const LARGE_FILE_SIZE = 1024 * 1024 // 1MB - skip tokenizer for speed, use cutback percentage + +export interface ContextValidationResult { + shouldLimit: boolean + safeContentLimit: number // Always represents character count + reason?: string +} + +interface ContextInfo { + currentlyUsed: number + contextWindow: number + availableTokensForFile: number + targetTokenLimit: number +} + +/** + * Gets runtime context information from the task + */ +async function getContextInfo(cline: Task): Promise { + const modelInfo = cline.api.getModel().info + const { contextTokens: currentContextTokens } = cline.getTokenUsage() + const contextWindow = modelInfo.contextWindow + + // Get the model-specific max output tokens + const modelId = cline.api.getModel().id + const apiProvider = cline.apiConfiguration.apiProvider + const settings = await cline.providerRef.deref()?.getState() + const format = getFormatForProvider(apiProvider) + const maxResponseTokens = getModelMaxOutputTokens({ modelId, model: modelInfo, settings, format }) + + // Calculate available space + const currentlyUsed = currentContextTokens || 0 + const remainingContext = contextWindow - currentlyUsed + const usableRemainingContext = Math.floor(remainingContext * (1 - FILE_READ_BUFFER_PERCENTAGE)) + const reservedForResponse = Math.min(maxResponseTokens || 0, usableRemainingContext) + const availableTokensForFile = usableRemainingContext - reservedForResponse + const targetTokenLimit = Math.floor(availableTokensForFile * 0.9) + + return { + currentlyUsed, + contextWindow, + availableTokensForFile, + targetTokenLimit, + } +} + +/** + * Determines if we should skip the expensive token-based validation. + * Returns true if we're confident the file can be read without limits. + * Prioritizes accuracy - only skips when very confident. + */ +async function shouldSkipValidation(filePath: string, cline: Task): Promise { + try { + // Get file size + const stats = await fs.stat(filePath) + const fileSizeBytes = stats.size + + // Very small files by size are definitely safe to skip validation + if (fileSizeBytes < TINY_FILE_SIZE) { + console.log( + `[shouldSkipValidation] Skipping validation for ${filePath} - small file size (${(fileSizeBytes / 1024).toFixed(1)}KB)`, + ) + return true + } + + // For larger files, check if context is mostly empty + const modelInfo = cline.api.getModel().info + const { contextTokens: currentContextTokens } = cline.getTokenUsage() + const contextWindow = modelInfo.contextWindow + const contextUsagePercent = (currentContextTokens || 0) / contextWindow + + // If context is mostly empty (< 50% used) and file is not too big, + // we can skip validation as there's plenty of room + if (contextUsagePercent < 0.5 && fileSizeBytes < SMALL_FILE_SIZE) { + console.log( + `[shouldSkipValidation] Skipping validation for ${filePath} - context mostly empty (${Math.round(contextUsagePercent * 100)}% used) and file is moderate size`, + ) + return true + } + } catch (error) { + // If we can't check file size or context state, don't skip validation + console.warn(`[shouldSkipValidation] Could not check file size or context state: ${error}`) + } + + return false +} + +/** + * Validates content with actual API and applies cutback if needed + */ +async function validateAndCutbackContent( + content: string, + targetTokenLimit: number, + cline: Task, +): Promise<{ finalContent: string; actualTokens: number; didCutback: boolean }> { + let finalContent = content + let apiCallCount = 0 + let actualTokens = 0 + let didCutback = false + + while (apiCallCount < MAX_API_CALLS) { + apiCallCount++ + + // Make the actual API call to count tokens + actualTokens = await cline.api.countTokens([{ type: "text", text: finalContent }]) + + console.log( + `[validateFileSizeForContext] API call ${apiCallCount}: ${actualTokens} tokens for ${finalContent.length} chars`, + ) + + if (actualTokens <= targetTokenLimit) { + // We're under the limit, we're done! + break + } + + // We're over the limit - cut back by CUTBACK_PERCENTAGE + const targetLength = Math.floor(finalContent.length * (1 - CUTBACK_PERCENTAGE)) + + // Safety check + if (targetLength === 0 || targetLength === finalContent.length) { + break + } + + finalContent = finalContent.substring(0, targetLength) + didCutback = true + } + + return { finalContent, actualTokens, didCutback } +} + +/** + * Handles error cases with conservative fallback + */ +async function handleValidationError( + filePath: string, + currentMaxReadFileLine: number, + error: unknown, +): Promise { + console.warn(`[validateFileSizeForContext] Error accessing runtime state: ${error}`) + + // In error cases, we can't check context state, so use simple file size heuristics + try { + const stats = await fs.stat(filePath) + const fileSizeBytes = stats.size + + // Very small files are safe + if (fileSizeBytes < TINY_FILE_SIZE) { + return { shouldLimit: false, safeContentLimit: -1 } + } + + // For larger files, apply a conservative character limit + if (fileSizeBytes > 1024 * 1024) { + // > 1MB + return { + shouldLimit: true, + safeContentLimit: 50000, // 50K chars as a safe fallback + reason: "Large file detected. Limited to 50,000 characters to prevent context overflow (runtime state unavailable).", + } + } + } catch (statError) { + // If we can't even stat the file, proceed with very conservative defaults + console.warn(`[validateFileSizeForContext] Could not stat file: ${statError}`) + return { + shouldLimit: true, + safeContentLimit: 10000, // 10K chars as ultra-safe fallback + reason: "Unable to determine file size. Limited to 10,000 characters as a precaution.", + } + } + + return { shouldLimit: false, safeContentLimit: -1 } +} + +/** + * Validates if a file can be safely read based on its size and current runtime context state. + * Uses a 2-phase approach: character-based estimation followed by actual token validation. + * Returns a safe character limit to prevent context overflow. + */ +export async function validateFileSizeForContext( + filePath: string, + totalLines: number, + currentMaxReadFileLine: number, + cline: Task, +): Promise { + try { + // Check if we can skip validation + if (await shouldSkipValidation(filePath, cline)) { + return { shouldLimit: false, safeContentLimit: -1 } + } + + // Get context information + const contextInfo = await getContextInfo(cline) + + // Phase 1: Estimate safe character limit based on available tokens + const estimatedSafeChars = contextInfo.targetTokenLimit * CHARS_PER_TOKEN_ESTIMATE + + // Get file size to check if we need to limit + const stats = await fs.stat(filePath) + const fileSizeBytes = stats.size + + // If file is smaller than our estimated safe chars, it should fit + if (fileSizeBytes <= estimatedSafeChars) { + console.log( + `[validateFileSizeForContext] File fits within estimated safe chars (${fileSizeBytes} <= ${estimatedSafeChars})`, + ) + return { shouldLimit: false, safeContentLimit: -1 } + } + + // File is larger than estimated safe chars, need to validate with actual content + console.log( + `[validateFileSizeForContext] File exceeds estimated safe chars (${fileSizeBytes} > ${estimatedSafeChars}), validating with actual content`, + ) + + // Phase 2: Read content up to estimated limit and validate with actual API + const partialResult = await readPartialContent(filePath, estimatedSafeChars) + + // For large files, skip tokenizer validation for speed and apply clean cutback percentage + let finalContent: string + let actualTokens: number + let didCutback: boolean + + if (fileSizeBytes > LARGE_FILE_SIZE) { + // Skip tokenizer for speed reasons on large files, apply clean cutback + const cutbackChars = Math.floor(partialResult.content.length * (1 - CUTBACK_PERCENTAGE)) + finalContent = partialResult.content.substring(0, cutbackChars) + actualTokens = 0 // Not calculated for large files + didCutback = cutbackChars < partialResult.content.length + + console.log( + `[validateFileSizeForContext] Large file (${(fileSizeBytes / 1024 / 1024).toFixed(1)}MB) - skipping tokenizer for speed, applying ${Math.round(CUTBACK_PERCENTAGE * 100)}% cutback: ${partialResult.content.length} -> ${finalContent.length} chars`, + ) + } else { + // Use tokenizer validation for smaller files + const validation = await validateAndCutbackContent( + partialResult.content, + contextInfo.targetTokenLimit, + cline, + ) + finalContent = validation.finalContent + actualTokens = validation.actualTokens + didCutback = validation.didCutback + } + + // Calculate final safe character limit + const finalSafeChars = finalContent.length + + // Ensure we provide at least a minimum useful amount + const safeContentLimit = Math.max(MIN_USEFUL_CHARS, finalSafeChars) + + // Log final statistics + console.log(`[validateFileSizeForContext] Final: ${safeContentLimit} chars, ${actualTokens} tokens`) + + // Special case: if we can't read any meaningful content + if (safeContentLimit === MIN_USEFUL_CHARS && finalSafeChars < MIN_USEFUL_CHARS) { + const percentageRead = Math.round((safeContentLimit / fileSizeBytes) * 100) + return { + shouldLimit: true, + safeContentLimit, + reason: `Very limited context space. Can only read ${safeContentLimit} characters (${percentageRead}% of file). Context: ${contextInfo.currentlyUsed}/${contextInfo.contextWindow} tokens used (${Math.round((contextInfo.currentlyUsed / contextInfo.contextWindow) * 100)}%). Consider using search_files or line_range for specific sections.`, + } + } + + // If we read the entire file without exceeding the limit, no limitation needed + if (!didCutback && partialResult.charactersRead === fileSizeBytes) { + return { shouldLimit: false, safeContentLimit: -1 } + } + + // Calculate percentage read for the notice + const percentageRead = Math.round((safeContentLimit / fileSizeBytes) * 100) + + return { + shouldLimit: true, + safeContentLimit, + reason: `File exceeds available context space. Can read ${safeContentLimit} of ${fileSizeBytes} characters (${percentageRead}%). Context usage: ${contextInfo.currentlyUsed}/${contextInfo.contextWindow} tokens (${Math.round((contextInfo.currentlyUsed / contextInfo.contextWindow) * 100)}%).`, + } + } catch (error) { + return handleValidationError(filePath, currentMaxReadFileLine, error) + } +} diff --git a/src/core/tools/readFileTool.ts b/src/core/tools/readFileTool.ts index 01427f4d9d..63ca692fd7 100644 --- a/src/core/tools/readFileTool.ts +++ b/src/core/tools/readFileTool.ts @@ -2,6 +2,7 @@ import path from "path" import { isBinaryFile } from "isbinaryfile" import { Task } from "../task/Task" +import { validateFileSizeForContext } from "./contextValidator" import { ClineSayTool } from "../../shared/ExtensionMessage" import { formatResponse } from "../prompts/responses" import { t } from "../../i18n" @@ -11,6 +12,7 @@ import { isPathOutsideWorkspace } from "../../utils/pathUtils" import { getReadablePath } from "../../utils/path" import { countFileLines } from "../../integrations/misc/line-counter" import { readLines } from "../../integrations/misc/read-lines" +import { readPartialContent } from "../../integrations/misc/read-partial-content" import { extractTextFromFile, addLineNumbers, getSupportedBinaryFormats } from "../../integrations/misc/extract-text" import { parseSourceCodeDefinitionsForFile } from "../../services/tree-sitter" import { parseXml } from "../../utils/xml" @@ -456,6 +458,13 @@ export async function readFileTool( try { const [totalLines, isBinary] = await Promise.all([countFileLines(fullPath), isBinaryFile(fullPath)]) + // Preemptive file size validation to prevent context overflow + const validation = await validateFileSizeForContext(fullPath, totalLines, maxReadFileLine, cline) + let validationNotice = "" + + // Apply validation if maxReadFileLine is -1 (unlimited) + const shouldApplyValidation = validation.shouldLimit && maxReadFileLine === -1 + // Handle binary files (but allow specific file types that extractTextFromFile can handle) if (isBinary) { const fileExtension = path.extname(relPath).toLowerCase() @@ -550,7 +559,7 @@ export async function readFileTool( try { const defResult = await parseSourceCodeDefinitionsForFile(fullPath, cline.rooIgnoreController) if (defResult) { - let xmlInfo = `Showing only ${maxReadFileLine} of ${totalLines} total lines. Use line_range if you need to read more lines\n` + let xmlInfo = `${t("tools:readFile.showingOnlyLines", { shown: 0, total: totalLines })}\n` updateFileResult(relPath, { xmlContent: `${relPath}\n${defResult}\n${xmlInfo}`, }) @@ -567,7 +576,66 @@ export async function readFileTool( continue } - // Handle files exceeding line threshold + // Handle files with validation limits (character-based reading) + if (shouldApplyValidation) { + const result = await readPartialContent(fullPath, validation.safeContentLimit) + + // Generate line range attribute based on what was read + const lineRangeAttr = result.linesRead === 1 ? ` lines="1"` : ` lines="1-${result.lastLineRead}"` + + const content = addLineNumbers(result.content, 1) + let xmlInfo = `\n${content}\n` + + try { + const defResult = await parseSourceCodeDefinitionsForFile(fullPath, cline.rooIgnoreController) + if (defResult) { + xmlInfo += `${defResult}\n` + } + + // Generate notice based on what was read + const percentRead = Math.round((result.charactersRead / result.totalCharacters) * 100) + if (result.linesRead === 1) { + // Single-line file + const notice = t("tools:readFile.partialReadSingleLine", { + charactersRead: result.charactersRead, + totalCharacters: result.totalCharacters, + percentRead, + }) + xmlInfo += `${notice}\n` + } else { + // Multi-line file + const nextLineStart = result.lastLineRead + 1 + const suggestedLineEnd = Math.min(result.lastLineRead + 1000, result.totalLines) + const notice = t("tools:readFile.partialReadMultiLine", { + charactersRead: result.charactersRead, + totalCharacters: result.totalCharacters, + percentRead, + lastLineRead: result.lastLineRead, + totalLines: result.totalLines, + path: relPath, + nextLineStart, + suggestedLineEnd, + }) + xmlInfo += `${notice}\n` + } + + const finalXml = `${relPath}\n${xmlInfo}` + updateFileResult(relPath, { + xmlContent: finalXml, + }) + } catch (error) { + if (error instanceof Error && error.message.startsWith("Unsupported language:")) { + console.warn(`[read_file] Warning: ${error.message}`) + } else { + console.error( + `[read_file] Unhandled error: ${error instanceof Error ? error.message : String(error)}`, + ) + } + } + continue + } + + // Handle files with line limits (maxReadFileLine > 0) if (maxReadFileLine > 0 && totalLines > maxReadFileLine) { const content = addLineNumbers(await readLines(fullPath, maxReadFileLine - 1, 0)) const lineRangeAttr = ` lines="1-${maxReadFileLine}"` @@ -578,7 +646,8 @@ export async function readFileTool( if (defResult) { xmlInfo += `${defResult}\n` } - xmlInfo += `Showing only ${maxReadFileLine} of ${totalLines} total lines. Use line_range if you need to read more lines\n` + xmlInfo += `${t("tools:readFile.showingOnlyLines", { shown: maxReadFileLine, total: totalLines })}\n` + updateFileResult(relPath, { xmlContent: `${relPath}\n${xmlInfo}`, }) @@ -594,8 +663,9 @@ export async function readFileTool( continue } - // Handle normal file read + // Handle normal file read (no limits) const content = await extractTextFromFile(fullPath) + const lineRangeAttr = ` lines="1-${totalLines}"` let xmlInfo = totalLines > 0 ? `\n${content}\n` : `` diff --git a/src/i18n/locales/ca/tools.json b/src/i18n/locales/ca/tools.json index 0f10b6fc2a..63e8655961 100644 --- a/src/i18n/locales/ca/tools.json +++ b/src/i18n/locales/ca/tools.json @@ -3,8 +3,11 @@ "linesRange": " (línies {{start}}-{{end}})", "definitionsOnly": " (només definicions)", "maxLines": " (màxim {{max}} línies)", + "showingOnlyLines": "Mostrant només {{shown}} de {{total}} línies totals. Utilitza line_range si necessites llegir més línies", "imageTooLarge": "El fitxer d'imatge és massa gran ({{size}} MB). La mida màxima permesa és {{max}} MB.", - "imageWithSize": "Fitxer d'imatge ({{size}} KB)" + "imageWithSize": "Fitxer d'imatge ({{size}} KB)", + "partialReadSingleLine": "Llegits {{charactersRead}} de {{totalCharacters}} caràcters ({{percentRead}}%) d'aquest fitxer d'una sola línia. Aquesta és una lectura parcial - el contingut restant no es pot accedir a causa de limitacions de context.", + "partialReadMultiLine": "Llegits {{charactersRead}} de {{totalCharacters}} caràcters ({{percentRead}}%), fins a la línia {{lastLineRead}} de {{totalLines}}. Per llegir seccions específiques d'aquest fitxer, utilitza el següent format:\n\n\n \n {{path}}\n start-end\n \n\n\n\nPer exemple, per llegir les línies {{nextLineStart}}-{{suggestedLineEnd}}:\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n" }, "toolRepetitionLimitReached": "Roo sembla estar atrapat en un bucle, intentant la mateixa acció ({{toolName}}) repetidament. Això podria indicar un problema amb la seva estratègia actual. Considera reformular la tasca, proporcionar instruccions més específiques o guiar-lo cap a un enfocament diferent.", "codebaseSearch": { diff --git a/src/i18n/locales/de/tools.json b/src/i18n/locales/de/tools.json index ecf372a50b..6c5691e61c 100644 --- a/src/i18n/locales/de/tools.json +++ b/src/i18n/locales/de/tools.json @@ -3,8 +3,11 @@ "linesRange": " (Zeilen {{start}}-{{end}})", "definitionsOnly": " (nur Definitionen)", "maxLines": " (maximal {{max}} Zeilen)", + "showingOnlyLines": "Zeige nur {{shown}} von {{total}} Zeilen insgesamt. Verwende line_range, wenn du mehr Zeilen lesen musst", "imageTooLarge": "Die Bilddatei ist zu groß ({{size}} MB). Die maximal erlaubte Größe beträgt {{max}} MB.", - "imageWithSize": "Bilddatei ({{size}} KB)" + "imageWithSize": "Bilddatei ({{size}} KB)", + "partialReadSingleLine": "{{charactersRead}} von {{totalCharacters}} Zeichen ({{percentRead}}%) aus dieser einzeiligen Datei gelesen. Dies ist ein partieller Lesevorgang - der verbleibende Inhalt kann aufgrund von Kontextbeschränkungen nicht zugegriffen werden.", + "partialReadMultiLine": "{{charactersRead}} von {{totalCharacters}} Zeichen ({{percentRead}}%) gelesen, bis Zeile {{lastLineRead}} von {{totalLines}}. Um bestimmte Abschnitte dieser Datei zu lesen, verwende das folgende Format:\n\n\n \n {{path}}\n start-end\n \n\n\n\nZum Beispiel, um die Zeilen {{nextLineStart}}-{{suggestedLineEnd}} zu lesen:\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n" }, "toolRepetitionLimitReached": "Roo scheint in einer Schleife festzustecken und versucht wiederholt dieselbe Aktion ({{toolName}}). Dies könnte auf ein Problem mit der aktuellen Strategie hindeuten. Überlege dir, die Aufgabe umzuformulieren, genauere Anweisungen zu geben oder Roo zu einem anderen Ansatz zu führen.", "codebaseSearch": { diff --git a/src/i18n/locales/en/tools.json b/src/i18n/locales/en/tools.json index 5b88affae6..060653e68a 100644 --- a/src/i18n/locales/en/tools.json +++ b/src/i18n/locales/en/tools.json @@ -3,6 +3,9 @@ "linesRange": " (lines {{start}}-{{end}})", "definitionsOnly": " (definitions only)", "maxLines": " (max {{max}} lines)", + "showingOnlyLines": "Showing only {{shown}} of {{total}} total lines. Use line_range if you need to read more lines", + "partialReadSingleLine": "Read {{charactersRead}} of {{totalCharacters}} characters ({{percentRead}}%) from this single-line file. This is a partial read - the remaining content cannot be accessed due to context limitations.", + "partialReadMultiLine": "Read {{charactersRead}} of {{totalCharacters}} characters ({{percentRead}}%), up to line {{lastLineRead}} of {{totalLines}}. To read specific sections of this file, use the following format:\n\n\n \n {{path}}\n start-end\n \n\n\n\nFor example, to read lines {{nextLineStart}}-{{suggestedLineEnd}}:\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n", "imageTooLarge": "Image file is too large ({{size}} MB). The maximum allowed size is {{max}} MB.", "imageWithSize": "Image file ({{size}} KB)" }, diff --git a/src/i18n/locales/es/tools.json b/src/i18n/locales/es/tools.json index 6fd1cc2122..5d22692014 100644 --- a/src/i18n/locales/es/tools.json +++ b/src/i18n/locales/es/tools.json @@ -3,8 +3,11 @@ "linesRange": " (líneas {{start}}-{{end}})", "definitionsOnly": " (solo definiciones)", "maxLines": " (máximo {{max}} líneas)", + "showingOnlyLines": "Mostrando solo {{shown}} de {{total}} líneas totales. Usa line_range si necesitas leer más líneas", "imageTooLarge": "El archivo de imagen es demasiado grande ({{size}} MB). El tamaño máximo permitido es {{max}} MB.", - "imageWithSize": "Archivo de imagen ({{size}} KB)" + "imageWithSize": "Archivo de imagen ({{size}} KB)", + "partialReadSingleLine": "Leídos {{charactersRead}} de {{totalCharacters}} caracteres ({{percentRead}}%) de este archivo de una sola línea. Esta es una lectura parcial - el contenido restante no se puede acceder debido a limitaciones de contexto.", + "partialReadMultiLine": "Leídos {{charactersRead}} de {{totalCharacters}} caracteres ({{percentRead}}%), hasta la línea {{lastLineRead}} de {{totalLines}}. Para leer secciones específicas de este archivo, usa el siguiente formato:\n\n\n \n {{path}}\n start-end\n \n\n\n\nPor ejemplo, para leer las líneas {{nextLineStart}}-{{suggestedLineEnd}}:\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n" }, "toolRepetitionLimitReached": "Roo parece estar atrapado en un bucle, intentando la misma acción ({{toolName}}) repetidamente. Esto podría indicar un problema con su estrategia actual. Considera reformular la tarea, proporcionar instrucciones más específicas o guiarlo hacia un enfoque diferente.", "codebaseSearch": { diff --git a/src/i18n/locales/fr/tools.json b/src/i18n/locales/fr/tools.json index b6d7accebb..37000efc63 100644 --- a/src/i18n/locales/fr/tools.json +++ b/src/i18n/locales/fr/tools.json @@ -3,8 +3,11 @@ "linesRange": " (lignes {{start}}-{{end}})", "definitionsOnly": " (définitions uniquement)", "maxLines": " (max {{max}} lignes)", + "showingOnlyLines": "Affichage de seulement {{shown}} sur {{total}} lignes totales. Utilise line_range si tu as besoin de lire plus de lignes", "imageTooLarge": "Le fichier image est trop volumineux ({{size}} MB). La taille maximale autorisée est {{max}} MB.", - "imageWithSize": "Fichier image ({{size}} Ko)" + "imageWithSize": "Fichier image ({{size}} Ko)", + "partialReadSingleLine": "Lu {{charactersRead}} sur {{totalCharacters}} caractères ({{percentRead}}%) de ce fichier d'une seule ligne. Ceci est une lecture partielle - le contenu restant ne peut pas être accédé en raison de limitations de contexte.", + "partialReadMultiLine": "Lu {{charactersRead}} sur {{totalCharacters}} caractères ({{percentRead}}%), jusqu'à la ligne {{lastLineRead}} sur {{totalLines}}. Pour lire des sections spécifiques de ce fichier, utilise le format suivant :\n\n\n \n {{path}}\n start-end\n \n\n\n\nPar exemple, pour lire les lignes {{nextLineStart}}-{{suggestedLineEnd}} :\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n" }, "toolRepetitionLimitReached": "Roo semble être bloqué dans une boucle, tentant la même action ({{toolName}}) de façon répétée. Cela pourrait indiquer un problème avec sa stratégie actuelle. Envisage de reformuler la tâche, de fournir des instructions plus spécifiques ou de le guider vers une approche différente.", "codebaseSearch": { diff --git a/src/i18n/locales/hi/tools.json b/src/i18n/locales/hi/tools.json index cbfbd7aef7..793e92ce62 100644 --- a/src/i18n/locales/hi/tools.json +++ b/src/i18n/locales/hi/tools.json @@ -3,8 +3,11 @@ "linesRange": " (पंक्तियाँ {{start}}-{{end}})", "definitionsOnly": " (केवल परिभाषाएँ)", "maxLines": " (अधिकतम {{max}} पंक्तियाँ)", + "showingOnlyLines": "कुल {{total}} पंक्तियों में से केवल {{shown}} दिखा रहे हैं। यदि आपको अधिक पंक्तियाँ पढ़नी हैं तो line_range का उपयोग करें", "imageTooLarge": "छवि फ़ाइल बहुत बड़ी है ({{size}} MB)। अधिकतम अनुमतित आकार {{max}} MB है।", - "imageWithSize": "छवि फ़ाइल ({{size}} KB)" + "imageWithSize": "छवि फ़ाइल ({{size}} KB)", + "partialReadSingleLine": "इस एकल-पंक्ति फ़ाइल से {{charactersRead}} में से {{totalCharacters}} वर्ण ({{percentRead}}%) पढ़े गए। यह एक आंशिक पठन है - शेष सामग्री संदर्भ सीमाओं के कारण पहुंच योग्य नहीं है।", + "partialReadMultiLine": "{{charactersRead}} में से {{totalCharacters}} वर्ण ({{percentRead}}%) पढ़े गए, {{totalLines}} में से पंक्ति {{lastLineRead}} तक। इस फ़ाइल के विशिष्ट अनुभागों को पढ़ने के लिए, निम्नलिखित प्रारूप का उपयोग करें:\n\n\n \n {{path}}\n start-end\n \n\n\n\nउदाहरण के लिए, पंक्तियां {{nextLineStart}}-{{suggestedLineEnd}} पढ़ने के लिए:\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n" }, "toolRepetitionLimitReached": "Roo एक लूप में फंसा हुआ लगता है, बार-बार एक ही क्रिया ({{toolName}}) को दोहरा रहा है। यह उसकी वर्तमान रणनीति में किसी समस्या का संकेत हो सकता है। कार्य को पुनः परिभाषित करने, अधिक विशिष्ट निर्देश देने, या उसे एक अलग दृष्टिकोण की ओर मार्गदर्शित करने पर विचार करें।", "codebaseSearch": { diff --git a/src/i18n/locales/id/tools.json b/src/i18n/locales/id/tools.json index 3eb8854eff..fc55cda4ec 100644 --- a/src/i18n/locales/id/tools.json +++ b/src/i18n/locales/id/tools.json @@ -3,8 +3,11 @@ "linesRange": " (baris {{start}}-{{end}})", "definitionsOnly": " (hanya definisi)", "maxLines": " (maks {{max}} baris)", + "showingOnlyLines": "Menampilkan hanya {{shown}} dari {{total}} total baris. Gunakan line_range jika kamu perlu membaca lebih banyak baris", "imageTooLarge": "File gambar terlalu besar ({{size}} MB). Ukuran maksimum yang diizinkan adalah {{max}} MB.", - "imageWithSize": "File gambar ({{size}} KB)" + "imageWithSize": "File gambar ({{size}} KB)", + "partialReadSingleLine": "Membaca {{charactersRead}} dari {{totalCharacters}} karakter ({{percentRead}}%) dari file satu baris ini. Ini adalah pembacaan parsial - konten yang tersisa tidak dapat diakses karena keterbatasan konteks.", + "partialReadMultiLine": "Membaca {{charactersRead}} dari {{totalCharacters}} karakter ({{percentRead}}%), hingga baris {{lastLineRead}} dari {{totalLines}}. Untuk membaca bagian tertentu dari file ini, gunakan format berikut:\n\n\n \n {{path}}\n start-end\n \n\n\n\nContoh, untuk membaca baris {{nextLineStart}}-{{suggestedLineEnd}}:\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n" }, "toolRepetitionLimitReached": "Roo tampaknya terjebak dalam loop, mencoba aksi yang sama ({{toolName}}) berulang kali. Ini mungkin menunjukkan masalah dengan strategi saat ini. Pertimbangkan untuk mengubah frasa tugas, memberikan instruksi yang lebih spesifik, atau mengarahkannya ke pendekatan yang berbeda.", "codebaseSearch": { diff --git a/src/i18n/locales/it/tools.json b/src/i18n/locales/it/tools.json index 35b114a719..2f0b635594 100644 --- a/src/i18n/locales/it/tools.json +++ b/src/i18n/locales/it/tools.json @@ -3,8 +3,11 @@ "linesRange": " (righe {{start}}-{{end}})", "definitionsOnly": " (solo definizioni)", "maxLines": " (max {{max}} righe)", + "showingOnlyLines": "Mostrando solo {{shown}} di {{total}} righe totali. Usa line_range se hai bisogno di leggere più righe", "imageTooLarge": "Il file immagine è troppo grande ({{size}} MB). La dimensione massima consentita è {{max}} MB.", - "imageWithSize": "File immagine ({{size}} KB)" + "imageWithSize": "File immagine ({{size}} KB)", + "partialReadSingleLine": "Letti {{charactersRead}} di {{totalCharacters}} caratteri ({{percentRead}}%) da questo file a riga singola. Questa è una lettura parziale - il contenuto rimanente non può essere accessibile a causa di limitazioni di contesto.", + "partialReadMultiLine": "Letti {{charactersRead}} di {{totalCharacters}} caratteri ({{percentRead}}%), fino alla riga {{lastLineRead}} di {{totalLines}}. Per leggere sezioni specifiche di questo file, usa il seguente formato:\n\n\n \n {{path}}\n start-end\n \n\n\n\nAd esempio, per leggere le righe {{nextLineStart}}-{{suggestedLineEnd}}:\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n" }, "toolRepetitionLimitReached": "Roo sembra essere bloccato in un ciclo, tentando ripetutamente la stessa azione ({{toolName}}). Questo potrebbe indicare un problema con la sua strategia attuale. Considera di riformulare l'attività, fornire istruzioni più specifiche o guidarlo verso un approccio diverso.", "codebaseSearch": { diff --git a/src/i18n/locales/ja/tools.json b/src/i18n/locales/ja/tools.json index 257d5aa201..316ea96e9e 100644 --- a/src/i18n/locales/ja/tools.json +++ b/src/i18n/locales/ja/tools.json @@ -3,8 +3,11 @@ "linesRange": " ({{start}}-{{end}}行目)", "definitionsOnly": " (定義のみ)", "maxLines": " (最大{{max}}行)", + "showingOnlyLines": "全{{total}}行中{{shown}}行のみ表示しています。より多くの行を読む必要がある場合はline_rangeを使用してください", "imageTooLarge": "画像ファイルが大きすぎます({{size}} MB)。最大許可サイズは {{max}} MB です。", - "imageWithSize": "画像ファイル({{size}} KB)" + "imageWithSize": "画像ファイル({{size}} KB)", + "partialReadSingleLine": "この単一行ファイルから{{charactersRead}}文字中{{totalCharacters}}文字({{percentRead}}%)を読み取りました。これは部分的な読み取りです - コンテキストの制限により、残りのコンテンツにはアクセスできません。", + "partialReadMultiLine": "{{charactersRead}}文字中{{totalCharacters}}文字({{percentRead}}%)を読み取りました。{{totalLines}}行中{{lastLineRead}}行目まで。このファイルの特定のセクションを読み取るには、次の形式を使用してください:\n\n\n \n {{path}}\n start-end\n \n\n\n\n例えば、{{nextLineStart}}-{{suggestedLineEnd}}行を読み取るには:\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n" }, "toolRepetitionLimitReached": "Rooが同じ操作({{toolName}})を繰り返し試みるループに陥っているようです。これは現在の方法に問題がある可能性を示しています。タスクの言い換え、より具体的な指示の提供、または別のアプローチへの誘導を検討してください。", "codebaseSearch": { diff --git a/src/i18n/locales/ko/tools.json b/src/i18n/locales/ko/tools.json index 94b6d8c377..3c3fd549f3 100644 --- a/src/i18n/locales/ko/tools.json +++ b/src/i18n/locales/ko/tools.json @@ -3,8 +3,11 @@ "linesRange": " ({{start}}-{{end}}행)", "definitionsOnly": " (정의만)", "maxLines": " (최대 {{max}}행)", + "showingOnlyLines": "전체 {{total}}행 중 {{shown}}행만 표시하고 있습니다. 더 많은 행을 읽으려면 line_range를 사용하세요", "imageTooLarge": "이미지 파일이 너무 큽니다 ({{size}} MB). 최대 허용 크기는 {{max}} MB입니다.", - "imageWithSize": "이미지 파일 ({{size}} KB)" + "imageWithSize": "이미지 파일 ({{size}} KB)", + "partialReadSingleLine": "이 단일 행 파일에서 {{totalCharacters}}자 중 {{charactersRead}}자 ({{percentRead}}%)를 읽었습니다. 이는 부분 읽기입니다 - 컨텍스트 제한으로 인해 나머지 내용에 액세스할 수 없습니다.", + "partialReadMultiLine": "{{totalCharacters}}자 중 {{charactersRead}}자 ({{percentRead}}%)를 읽었습니다. {{totalLines}}행 중 {{lastLineRead}}행까지입니다. 이 파일의 특정 섹션을 읽으려면 다음 형식을 사용하세요:\n\n\n \n {{path}}\n start-end\n \n\n\n\n예를 들어, {{nextLineStart}}-{{suggestedLineEnd}}행을 읽으려면:\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n" }, "toolRepetitionLimitReached": "Roo가 같은 동작({{toolName}})을 반복적으로 시도하면서 루프에 갇힌 것 같습니다. 이는 현재 전략에 문제가 있을 수 있음을 나타냅니다. 작업을 다시 표현하거나, 더 구체적인 지침을 제공하거나, 다른 접근 방식으로 안내해 보세요.", "codebaseSearch": { diff --git a/src/i18n/locales/nl/tools.json b/src/i18n/locales/nl/tools.json index 449cd54583..0772961c4f 100644 --- a/src/i18n/locales/nl/tools.json +++ b/src/i18n/locales/nl/tools.json @@ -3,8 +3,11 @@ "linesRange": " (regels {{start}}-{{end}})", "definitionsOnly": " (alleen definities)", "maxLines": " (max {{max}} regels)", + "showingOnlyLines": "Toont alleen {{shown}} van {{total}} totale regels. Gebruik line_range als je meer regels wilt lezen", "imageTooLarge": "Afbeeldingsbestand is te groot ({{size}} MB). De maximaal toegestane grootte is {{max}} MB.", - "imageWithSize": "Afbeeldingsbestand ({{size}} KB)" + "imageWithSize": "Afbeeldingsbestand ({{size}} KB)", + "partialReadSingleLine": "{{charactersRead}} van {{totalCharacters}} tekens ({{percentRead}}%) gelezen van dit bestand met één regel. Dit is een gedeeltelijke lezing - de resterende inhoud is niet toegankelijk vanwege contextbeperkingen.", + "partialReadMultiLine": "{{charactersRead}} van {{totalCharacters}} tekens ({{percentRead}}%) gelezen, tot regel {{lastLineRead}} van {{totalLines}}. Om specifieke secties van dit bestand te lezen, gebruik je het volgende formaat:\n\n\n \n {{path}}\n start-end\n \n\n\n\nBijvoorbeeld, om regels {{nextLineStart}}-{{suggestedLineEnd}} te lezen:\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n" }, "toolRepetitionLimitReached": "Roo lijkt vast te zitten in een lus, waarbij hij herhaaldelijk dezelfde actie ({{toolName}}) probeert. Dit kan duiden op een probleem met de huidige strategie. Overweeg de taak te herformuleren, specifiekere instructies te geven of Roo naar een andere aanpak te leiden.", "codebaseSearch": { diff --git a/src/i18n/locales/pl/tools.json b/src/i18n/locales/pl/tools.json index 979b2f54ae..9258dfe2f3 100644 --- a/src/i18n/locales/pl/tools.json +++ b/src/i18n/locales/pl/tools.json @@ -3,8 +3,11 @@ "linesRange": " (linie {{start}}-{{end}})", "definitionsOnly": " (tylko definicje)", "maxLines": " (maks. {{max}} linii)", + "showingOnlyLines": "Pokazuję tylko {{shown}} z {{total}} wszystkich linii. Użyj line_range jeśli potrzebujesz przeczytać więcej linii", "imageTooLarge": "Plik obrazu jest zbyt duży ({{size}} MB). Maksymalny dozwolony rozmiar to {{max}} MB.", - "imageWithSize": "Plik obrazu ({{size}} KB)" + "imageWithSize": "Plik obrazu ({{size}} KB)", + "partialReadSingleLine": "Przeczytano {{charactersRead}} z {{totalCharacters}} znaków ({{percentRead}}%) z tego jednoliniowego pliku. To jest częściowy odczyt - pozostała zawartość nie może być dostępna z powodu ograniczeń kontekstu.", + "partialReadMultiLine": "Przeczytano {{charactersRead}} z {{totalCharacters}} znaków ({{percentRead}}%), do linii {{lastLineRead}} z {{totalLines}}. Aby przeczytać określone sekcje tego pliku, użyj następującego formatu:\n\n\n \n {{path}}\n start-end\n \n\n\n\nNa przykład, aby przeczytać linie {{nextLineStart}}-{{suggestedLineEnd}}:\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n" }, "toolRepetitionLimitReached": "Wygląda na to, że Roo utknął w pętli, wielokrotnie próbując wykonać tę samą akcję ({{toolName}}). Może to wskazywać na problem z jego obecną strategią. Rozważ przeformułowanie zadania, podanie bardziej szczegółowych instrukcji lub nakierowanie go na inne podejście.", "codebaseSearch": { diff --git a/src/i18n/locales/pt-BR/tools.json b/src/i18n/locales/pt-BR/tools.json index 4e3296fd4a..5bcf94f559 100644 --- a/src/i18n/locales/pt-BR/tools.json +++ b/src/i18n/locales/pt-BR/tools.json @@ -3,8 +3,11 @@ "linesRange": " (linhas {{start}}-{{end}})", "definitionsOnly": " (apenas definições)", "maxLines": " (máx. {{max}} linhas)", + "showingOnlyLines": "Mostrando apenas {{shown}} de {{total}} linhas totais. Use line_range se precisar ler mais linhas", "imageTooLarge": "Arquivo de imagem é muito grande ({{size}} MB). O tamanho máximo permitido é {{max}} MB.", - "imageWithSize": "Arquivo de imagem ({{size}} KB)" + "imageWithSize": "Arquivo de imagem ({{size}} KB)", + "partialReadSingleLine": "Lidos {{charactersRead}} de {{totalCharacters}} caracteres ({{percentRead}}%) deste arquivo de linha única. Esta é uma leitura parcial - o conteúdo restante não pode ser acessado devido a limitações de contexto.", + "partialReadMultiLine": "Lidos {{charactersRead}} de {{totalCharacters}} caracteres ({{percentRead}}%), até a linha {{lastLineRead}} de {{totalLines}}. Para ler seções específicas deste arquivo, use o seguinte formato:\n\n\n \n {{path}}\n start-end\n \n\n\n\nPor exemplo, para ler as linhas {{nextLineStart}}-{{suggestedLineEnd}}:\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n" }, "toolRepetitionLimitReached": "Roo parece estar preso em um loop, tentando a mesma ação ({{toolName}}) repetidamente. Isso pode indicar um problema com sua estratégia atual. Considere reformular a tarefa, fornecer instruções mais específicas ou guiá-lo para uma abordagem diferente.", "codebaseSearch": { diff --git a/src/i18n/locales/ru/tools.json b/src/i18n/locales/ru/tools.json index d74918f058..fe9e3996ed 100644 --- a/src/i18n/locales/ru/tools.json +++ b/src/i18n/locales/ru/tools.json @@ -3,8 +3,11 @@ "linesRange": " (строки {{start}}-{{end}})", "definitionsOnly": " (только определения)", "maxLines": " (макс. {{max}} строк)", + "showingOnlyLines": "Показано только {{shown}} из {{total}} общих строк. Используй line_range если нужно прочитать больше строк", "imageTooLarge": "Файл изображения слишком большой ({{size}} МБ). Максимально допустимый размер {{max}} МБ.", - "imageWithSize": "Файл изображения ({{size}} КБ)" + "imageWithSize": "Файл изображения ({{size}} КБ)", + "partialReadSingleLine": "Прочитано {{charactersRead}} из {{totalCharacters}} символов ({{percentRead}}%) из этого однострочного файла. Это частичное чтение - оставшееся содержимое недоступно из-за ограничений контекста.", + "partialReadMultiLine": "Прочитано {{charactersRead}} из {{totalCharacters}} символов ({{percentRead}}%), до строки {{lastLineRead}} из {{totalLines}}. Чтобы прочитать определенные разделы этого файла, используйте следующий формат:\n\n\n \n {{path}}\n start-end\n \n\n\n\nНапример, чтобы прочитать строки {{nextLineStart}}-{{suggestedLineEnd}}:\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n" }, "toolRepetitionLimitReached": "Похоже, что Roo застрял в цикле, многократно пытаясь выполнить одно и то же действие ({{toolName}}). Это может указывать на проблему с его текущей стратегией. Попробуйте переформулировать задачу, предоставить более конкретные инструкции или направить его к другому подходу.", "codebaseSearch": { diff --git a/src/i18n/locales/tr/tools.json b/src/i18n/locales/tr/tools.json index 5341a23cb1..985af11823 100644 --- a/src/i18n/locales/tr/tools.json +++ b/src/i18n/locales/tr/tools.json @@ -3,8 +3,11 @@ "linesRange": " (satır {{start}}-{{end}})", "definitionsOnly": " (sadece tanımlar)", "maxLines": " (maks. {{max}} satır)", + "showingOnlyLines": "Toplam {{total}} satırdan sadece {{shown}} tanesi gösteriliyor. Daha fazla satır okumak için line_range kullan", "imageTooLarge": "Görüntü dosyası çok büyük ({{size}} MB). İzin verilen maksimum boyut {{max}} MB.", - "imageWithSize": "Görüntü dosyası ({{size}} KB)" + "imageWithSize": "Görüntü dosyası ({{size}} KB)", + "partialReadSingleLine": "Bu tek satırlık dosyadan {{totalCharacters}} karakterden {{charactersRead}} karakter ({{percentRead}}%) okundu. Bu kısmi bir okuma - kalan içeriğe bağlam sınırlamaları nedeniyle erişilemiyor.", + "partialReadMultiLine": "{{totalCharacters}} karakterden {{charactersRead}} karakter ({{percentRead}}%) okundu, {{totalLines}} satırdan {{lastLineRead}}. satıra kadar. Bu dosyanın belirli bölümlerini okumak için aşağıdaki formatı kullan:\n\n\n \n {{path}}\n start-end\n \n\n\n\nÖrneğin, {{nextLineStart}}-{{suggestedLineEnd}} satırlarını okumak için:\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n" }, "toolRepetitionLimitReached": "Roo bir döngüye takılmış gibi görünüyor, aynı eylemi ({{toolName}}) tekrar tekrar deniyor. Bu, mevcut stratejisinde bir sorun olduğunu gösterebilir. Görevi yeniden ifade etmeyi, daha spesifik talimatlar vermeyi veya onu farklı bir yaklaşıma yönlendirmeyi düşünün.", "codebaseSearch": { diff --git a/src/i18n/locales/vi/tools.json b/src/i18n/locales/vi/tools.json index 4c5080a146..574d2b346e 100644 --- a/src/i18n/locales/vi/tools.json +++ b/src/i18n/locales/vi/tools.json @@ -3,8 +3,11 @@ "linesRange": " (dòng {{start}}-{{end}})", "definitionsOnly": " (chỉ định nghĩa)", "maxLines": " (tối đa {{max}} dòng)", + "showingOnlyLines": "Chỉ hiển thị {{shown}} trong tổng số {{total}} dòng. Sử dụng line_range nếu bạn cần đọc thêm dòng", "imageTooLarge": "Tệp hình ảnh quá lớn ({{size}} MB). Kích thước tối đa cho phép là {{max}} MB.", - "imageWithSize": "Tệp hình ảnh ({{size}} KB)" + "imageWithSize": "Tệp hình ảnh ({{size}} KB)", + "partialReadSingleLine": "Đã đọc {{charactersRead}} trong số {{totalCharacters}} ký tự ({{percentRead}}%) từ tệp một dòng này. Đây là việc đọc một phần - nội dung còn lại không thể truy cập được do giới hạn ngữ cảnh.", + "partialReadMultiLine": "Đã đọc {{charactersRead}} trong số {{totalCharacters}} ký tự ({{percentRead}}%), đến dòng {{lastLineRead}} trong tổng số {{totalLines}} dòng. Để đọc các phần cụ thể của tệp này, hãy sử dụng định dạng sau:\n\n\n \n {{path}}\n start-end\n \n\n\n\nVí dụ, để đọc các dòng {{nextLineStart}}-{{suggestedLineEnd}}:\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n" }, "toolRepetitionLimitReached": "Roo dường như đang bị mắc kẹt trong một vòng lặp, liên tục cố gắng thực hiện cùng một hành động ({{toolName}}). Điều này có thể cho thấy vấn đề với chiến lược hiện tại. Hãy cân nhắc việc diễn đạt lại nhiệm vụ, cung cấp hướng dẫn cụ thể hơn, hoặc hướng Roo theo một cách tiếp cận khác.", "codebaseSearch": { diff --git a/src/i18n/locales/zh-CN/tools.json b/src/i18n/locales/zh-CN/tools.json index c0c93d8436..d8961e7268 100644 --- a/src/i18n/locales/zh-CN/tools.json +++ b/src/i18n/locales/zh-CN/tools.json @@ -3,8 +3,11 @@ "linesRange": " (第 {{start}}-{{end}} 行)", "definitionsOnly": " (仅定义)", "maxLines": " (最多 {{max}} 行)", + "showingOnlyLines": "仅显示 {{shown}} 行,共 {{total}} 行。如需阅读更多行请使用 line_range", "imageTooLarge": "图片文件过大 ({{size}} MB)。允许的最大大小为 {{max}} MB。", - "imageWithSize": "图片文件 ({{size}} KB)" + "imageWithSize": "图片文件 ({{size}} KB)", + "partialReadSingleLine": "已读取此单行文件中 {{totalCharacters}} 个字符中的 {{charactersRead}} 个字符 ({{percentRead}}%)。这是部分读取 - 由于上下文限制,无法访问剩余内容。", + "partialReadMultiLine": "已读取 {{totalCharacters}} 个字符中的 {{charactersRead}} 个字符 ({{percentRead}}%),读取到第 {{lastLineRead}} 行,共 {{totalLines}} 行。要读取此文件的特定部分,请使用以下格式:\n\n\n \n {{path}}\n start-end\n \n\n\n\n例如,要读取第 {{nextLineStart}}-{{suggestedLineEnd}} 行:\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n" }, "toolRepetitionLimitReached": "Roo 似乎陷入循环,反复尝试同一操作 ({{toolName}})。这可能表明当前策略存在问题。请考虑重新描述任务、提供更具体的指示或引导其尝试不同的方法。", "codebaseSearch": { diff --git a/src/i18n/locales/zh-TW/tools.json b/src/i18n/locales/zh-TW/tools.json index b736448c20..6daecf2b1e 100644 --- a/src/i18n/locales/zh-TW/tools.json +++ b/src/i18n/locales/zh-TW/tools.json @@ -3,8 +3,11 @@ "linesRange": " (第 {{start}}-{{end}} 行)", "definitionsOnly": " (僅定義)", "maxLines": " (最多 {{max}} 行)", + "showingOnlyLines": "僅顯示 {{shown}} 行,共 {{total}} 行。如需閱讀更多行請使用 line_range", "imageTooLarge": "圖片檔案過大 ({{size}} MB)。允許的最大大小為 {{max}} MB。", - "imageWithSize": "圖片檔案 ({{size}} KB)" + "imageWithSize": "圖片檔案 ({{size}} KB)", + "partialReadSingleLine": "已讀取此單行檔案中 {{totalCharacters}} 個字元中的 {{charactersRead}} 個字元 ({{percentRead}}%)。這是部分讀取 - 由於內容限制,無法存取剩餘內容。", + "partialReadMultiLine": "已讀取 {{totalCharacters}} 個字元中的 {{charactersRead}} 個字元 ({{percentRead}}%),讀取到第 {{lastLineRead}} 行,共 {{totalLines}} 行。要讀取此檔案的特定部分,請使用以下格式:\n\n\n \n {{path}}\n 開始-結束\n \n\n\n\n例如,要讀取第 {{nextLineStart}}-{{suggestedLineEnd}} 行:\n\n\n \n {{path}}\n {{nextLineStart}}-{{suggestedLineEnd}}\n \n\n" }, "toolRepetitionLimitReached": "Roo 似乎陷入循環,反覆嘗試同一操作 ({{toolName}})。這可能表明目前策略存在問題。請考慮重新描述工作、提供更具體的指示或引導其嘗試不同的方法。", "codebaseSearch": { diff --git a/src/integrations/misc/__tests__/read-partial-content.spec.ts b/src/integrations/misc/__tests__/read-partial-content.spec.ts new file mode 100644 index 0000000000..eea72ce0f2 --- /dev/null +++ b/src/integrations/misc/__tests__/read-partial-content.spec.ts @@ -0,0 +1,429 @@ +import { describe, it, expect, beforeEach, afterEach, vi } from "vitest" +import { readPartialSingleLineContent, readPartialContent } from "../read-partial-content" +import * as fs from "fs" +import * as path from "path" +import * as os from "os" + +describe("read-partial-content", () => { + let tempDir: string + let testFiles: string[] = [] + + beforeEach(async () => { + // Create a temporary directory for test files + tempDir = await fs.promises.mkdtemp(path.join(os.tmpdir(), "read-partial-test-")) + testFiles = [] + }) + + afterEach(async () => { + // Clean up test files + for (const file of testFiles) { + try { + await fs.promises.unlink(file) + } catch (error) { + // Ignore cleanup errors + } + } + try { + await fs.promises.rmdir(tempDir) + } catch (error) { + // Ignore cleanup errors + } + }) + + async function createTestFile(filename: string, content: string): Promise { + const filePath = path.join(tempDir, filename) + await fs.promises.writeFile(filePath, content, "utf8") + testFiles.push(filePath) + return filePath + } + + describe("readPartialContent", () => { + describe("Basic functionality", () => { + it("should read partial content with line tracking", async () => { + const content = "Line 1\nLine 2\nLine 3\nLine 4" + const filePath = await createTestFile("multiline.txt", content) + + const result = await readPartialContent(filePath, 15) + + expect(result.content).toBe("Line 1\nLine 2\nL") + expect(result.charactersRead).toBe(15) + expect(result.totalCharacters).toBe(content.length) + expect(result.linesRead).toBe(3) // Counting starts at 1, and we read into line 3 + expect(result.totalLines).toBe(4) + expect(result.lastLineRead).toBe(3) + }) + + it("should handle single-line files", async () => { + const content = "This is a single line file with no newlines" + const filePath = await createTestFile("single-line.txt", content) + + const result = await readPartialContent(filePath, 20) + + expect(result.content).toBe("This is a single lin") + expect(result.charactersRead).toBe(20) + expect(result.linesRead).toBe(1) + expect(result.totalLines).toBe(1) + expect(result.lastLineRead).toBe(1) + }) + + it("should read entire file when maxChars exceeds file size", async () => { + const content = "Small\nFile\nContent" + const filePath = await createTestFile("small.txt", content) + + const result = await readPartialContent(filePath, 1000) + + expect(result.content).toBe(content) + expect(result.charactersRead).toBe(content.length) + expect(result.totalCharacters).toBe(content.length) + expect(result.linesRead).toBe(3) + expect(result.totalLines).toBe(3) + expect(result.lastLineRead).toBe(3) + }) + + it("should handle empty files", async () => { + const filePath = await createTestFile("empty.txt", "") + + const result = await readPartialContent(filePath, 10) + + expect(result.content).toBe("") + expect(result.charactersRead).toBe(0) + expect(result.totalCharacters).toBe(0) + expect(result.linesRead).toBe(0) + expect(result.totalLines).toBe(0) + expect(result.lastLineRead).toBe(0) + }) + + it("should handle maxChars of 0", async () => { + const content = "This content should not be read" + const filePath = await createTestFile("zero-chars.txt", content) + + const result = await readPartialContent(filePath, 0) + + expect(result.content).toBe("") + expect(result.charactersRead).toBe(0) + expect(result.linesRead).toBe(0) + expect(result.lastLineRead).toBe(0) + }) + }) + + describe("Line counting accuracy", () => { + it("should count lines correctly when stopping mid-line", async () => { + const content = "Line 1\nLine 2 is longer\nLine 3" + const filePath = await createTestFile("mid-line.txt", content) + + const result = await readPartialContent(filePath, 10) + + expect(result.content).toBe("Line 1\nLin") + expect(result.linesRead).toBe(2) // We're in line 2 + expect(result.lastLineRead).toBe(2) + }) + + it("should count lines correctly when stopping at newline", async () => { + const content = "Line 1\nLine 2\nLine 3" + const filePath = await createTestFile("at-newline.txt", content) + + const result = await readPartialContent(filePath, 7) // Exactly at the first newline + + expect(result.content).toBe("Line 1\n") + expect(result.linesRead).toBe(2) // We've entered line 2 + expect(result.lastLineRead).toBe(2) + }) + + it("should handle files with empty lines", async () => { + const content = "Line 1\n\nLine 3\n\n\nLine 6" + const filePath = await createTestFile("empty-lines.txt", content) + + const result = await readPartialContent(filePath, 15) + + expect(result.content).toBe("Line 1\n\nLine 3\n") + expect(result.linesRead).toBe(4) // We've entered line 4 + expect(result.totalLines).toBe(6) + }) + + it("should handle files ending with newline", async () => { + const content = "Line 1\nLine 2\n" + const filePath = await createTestFile("ending-newline.txt", content) + + const result = await readPartialContent(filePath, 100) + + expect(result.content).toBe(content) + expect(result.linesRead).toBe(3) // The empty line after the last newline + expect(result.totalLines).toBe(2) // countFileLines counts actual lines, not the trailing empty line + }) + }) + + describe("Large file handling", () => { + it("should handle large files with many lines", async () => { + const lines = Array.from({ length: 1000 }, (_, i) => `Line ${i + 1}`).join("\n") + const filePath = await createTestFile("many-lines.txt", lines) + + const result = await readPartialContent(filePath, 100) + + expect(result.charactersRead).toBe(100) + expect(result.totalLines).toBe(1000) + expect(result.linesRead).toBeGreaterThan(1) + expect(result.linesRead).toBeLessThan(50) // Should not have read too many lines + }) + + it("should handle very long single lines", async () => { + const content = "x".repeat(100000) // 100KB single line + const filePath = await createTestFile("long-single-line.txt", content) + + const result = await readPartialContent(filePath, 1000) + + expect(result.content).toBe("x".repeat(1000)) + expect(result.linesRead).toBe(1) + expect(result.totalLines).toBe(1) + expect(result.lastLineRead).toBe(1) + }) + }) + + describe("Unicode and special characters", () => { + it("should handle Unicode characters with line tracking", async () => { + const content = "Hello 世界!\n🌍 Émojis\nñoñó chars" + const filePath = await createTestFile("unicode-lines.txt", content) + + const result = await readPartialContent(filePath, 20) + + expect(result.linesRead).toBeGreaterThanOrEqual(2) + expect(result.totalLines).toBe(3) + }) + }) + + describe("Error handling", () => { + it("should reject when file does not exist", async () => { + const nonExistentPath = path.join(tempDir, "does-not-exist.txt") + + await expect(readPartialContent(nonExistentPath, 10)).rejects.toThrow() + }) + + it("should handle negative maxChars gracefully", async () => { + const content = "Test content" + const filePath = await createTestFile("negative-max.txt", content) + + const result = await readPartialContent(filePath, -5) + + expect(result.content).toBe("") + expect(result.charactersRead).toBe(0) + expect(result.linesRead).toBe(0) + }) + }) + }) + + describe("readPartialSingleLineContent (legacy)", () => { + describe("Basic functionality", () => { + it("should read partial content from a small file", async () => { + const content = "Hello, world! This is a test file." + const filePath = await createTestFile("small.txt", content) + + const result = await readPartialSingleLineContent(filePath, 10) + + expect(result).toBe("Hello, wor") + }) + + it("should read entire content when maxChars exceeds file size", async () => { + const content = "Short file" + const filePath = await createTestFile("short.txt", content) + + const result = await readPartialSingleLineContent(filePath, 100) + + expect(result).toBe(content) + }) + + it("should handle empty files", async () => { + const filePath = await createTestFile("empty.txt", "") + + const result = await readPartialSingleLineContent(filePath, 10) + + expect(result).toBe("") + }) + + it("should handle maxChars of 0", async () => { + const content = "This content should not be read" + const filePath = await createTestFile("zero-chars.txt", content) + + const result = await readPartialSingleLineContent(filePath, 0) + + expect(result).toBe("") + }) + }) + + describe("Large file handling", () => { + it("should handle large files efficiently", async () => { + // Create a large file (1MB of repeated text) + const chunk = "This is a repeated chunk of text that will be used to create a large file. " + const largeContent = chunk.repeat(Math.ceil((1024 * 1024) / chunk.length)) + const filePath = await createTestFile("large.txt", largeContent) + + const result = await readPartialSingleLineContent(filePath, 100) + + expect(result).toBe(largeContent.substring(0, 100)) + expect(result.length).toBe(100) + }) + + it("should handle very large maxChars values", async () => { + const content = "Small content for large maxChars test" + const filePath = await createTestFile("small-for-large-max.txt", content) + + const result = await readPartialSingleLineContent(filePath, 1000000) + + expect(result).toBe(content) + }) + }) + + describe("Unicode and special characters", () => { + it("should handle Unicode characters correctly", async () => { + const content = "Hello 世界! 🌍 Émojis and ñoñó characters" + const filePath = await createTestFile("unicode.txt", content) + + const result = await readPartialSingleLineContent(filePath, 15) + + // Should handle Unicode characters properly + expect(result.length).toBeLessThanOrEqual(15) + expect(result).toBe(content.substring(0, result.length)) + }) + + it("should handle newlines in content", async () => { + const content = "Line 1\nLine 2\nLine 3" + const filePath = await createTestFile("multiline.txt", content) + + const result = await readPartialSingleLineContent(filePath, 10) + + expect(result).toBe("Line 1\nLin") + }) + + it("should handle special characters and symbols", async () => { + const content = "Special chars: !@#$%^&*()_+-=[]{}|;':\",./<>?" + const filePath = await createTestFile("special.txt", content) + + const result = await readPartialSingleLineContent(filePath, 20) + + expect(result).toBe("Special chars: !@#$%") + }) + }) + + describe("Edge cases", () => { + it("should handle exact character limit", async () => { + const content = "Exactly twenty chars" + const filePath = await createTestFile("exact.txt", content) + + const result = await readPartialSingleLineContent(filePath, 20) + + expect(result).toBe(content) + expect(result.length).toBe(20) + }) + + it("should handle maxChars = 1", async () => { + const content = "Single character test" + const filePath = await createTestFile("single-char.txt", content) + + const result = await readPartialSingleLineContent(filePath, 1) + + expect(result).toBe("S") + }) + + it("should handle files with only whitespace", async () => { + const content = " \t\n " + const filePath = await createTestFile("whitespace.txt", content) + + const result = await readPartialSingleLineContent(filePath, 5) + + expect(result).toBe(" \t\n") + }) + }) + + describe("Error handling", () => { + it("should reject when file does not exist", async () => { + const nonExistentPath = path.join(tempDir, "does-not-exist.txt") + + await expect(readPartialSingleLineContent(nonExistentPath, 10)).rejects.toThrow() + }) + + it("should reject when file path is invalid", async () => { + const invalidPath = "\0invalid\0path" + + await expect(readPartialSingleLineContent(invalidPath, 10)).rejects.toThrow() + }) + + it("should handle negative maxChars gracefully", async () => { + const content = "Test content" + const filePath = await createTestFile("negative-max.txt", content) + + const result = await readPartialSingleLineContent(filePath, -5) + + expect(result).toBe("") + }) + }) + + describe("Performance and memory efficiency", () => { + it("should not load entire large file into memory", async () => { + // Create a file larger than typical memory chunks + const largeContent = "x".repeat(5 * 1024 * 1024) // 5MB file + const filePath = await createTestFile("memory-test.txt", largeContent) + + // Read only a small portion + const result = await readPartialSingleLineContent(filePath, 1000) + + expect(result).toBe("x".repeat(1000)) + expect(result.length).toBe(1000) + }) + + it("should handle multiple consecutive reads efficiently", async () => { + const content = "Repeated read test content that is somewhat long" + const filePath = await createTestFile("repeated-read.txt", content) + + // Perform multiple reads + const results = await Promise.all([ + readPartialSingleLineContent(filePath, 10), + readPartialSingleLineContent(filePath, 20), + readPartialSingleLineContent(filePath, 30), + ]) + + expect(results[0]).toBe(content.substring(0, 10)) + expect(results[1]).toBe(content.substring(0, 20)) + expect(results[2]).toBe(content.substring(0, 30)) + }) + }) + + describe("Stream handling", () => { + it("should handle normal stream completion", async () => { + const content = "Stream test content" + const filePath = await createTestFile("stream-test.txt", content) + + const result = await readPartialSingleLineContent(filePath, 10) + + expect(result).toBe("Stream tes") + }) + + it("should handle file access errors", async () => { + // Test with a directory instead of a file to trigger an error + await expect(readPartialSingleLineContent(tempDir, 10)).rejects.toThrow() + }) + }) + + describe("Boundary conditions", () => { + it("should handle chunk boundaries correctly", async () => { + // Create content that will span multiple chunks + const chunkSize = 16 * 1024 // Default highWaterMark + const content = "a".repeat(chunkSize + 100) + const filePath = await createTestFile("chunk-boundary.txt", content) + + const result = await readPartialSingleLineContent(filePath, chunkSize + 50) + + expect(result).toBe("a".repeat(chunkSize + 50)) + expect(result.length).toBe(chunkSize + 50) + }) + + it("should handle maxChars at chunk boundary", async () => { + const chunkSize = 16 * 1024 + const content = "b".repeat(chunkSize * 2) + const filePath = await createTestFile("exact-chunk.txt", content) + + const result = await readPartialSingleLineContent(filePath, chunkSize) + + expect(result).toBe("b".repeat(chunkSize)) + expect(result.length).toBe(chunkSize) + }) + }) + }) +}) diff --git a/src/integrations/misc/read-partial-content.ts b/src/integrations/misc/read-partial-content.ts new file mode 100644 index 0000000000..f31624577c --- /dev/null +++ b/src/integrations/misc/read-partial-content.ts @@ -0,0 +1,164 @@ +import { createReadStream } from "fs" +import * as fs from "fs/promises" +import { countFileLines } from "./line-counter" + +/** + * Result of a partial file read operation + */ +export interface PartialReadResult { + content: string + charactersRead: number + totalCharacters: number // from file stats + linesRead: number + totalLines: number // from line counter + lastLineRead: number // which line we stopped at +} + +/** + * Reads partial content from a file up to a specified character limit. + * Works for both single-line and multi-line files, tracking line numbers. + * Uses streaming to avoid loading the entire file into memory for very large files. + * + * @param filePath - Path to the file to read + * @param maxChars - Maximum number of characters to read + * @returns Promise resolving to the partial read result with metadata + */ +export async function readPartialContent(filePath: string, maxChars: number): Promise { + // Get file stats and line count + const [stats, totalLines] = await Promise.all([fs.stat(filePath), countFileLines(filePath)]) + + const totalCharacters = stats.size + + // Handle edge cases + if (maxChars <= 0 || totalCharacters === 0) { + return { + content: "", + charactersRead: 0, + totalCharacters, + linesRead: 0, + totalLines, + lastLineRead: 0, + } + } + + return new Promise((resolve, reject) => { + // Use smaller chunks and set end position to limit reading + const stream = createReadStream(filePath, { + encoding: "utf8", + highWaterMark: 16 * 1024, // Smaller 16KB chunks for better control + start: 0, + end: Math.max(0, Math.min(maxChars * 2, maxChars + 1024 * 1024)), // Heuristic: read at most the lesser of (2x maxChars) or (maxChars + 1MB), but never less than 0, to balance memory use and ensure enough data for multi-byte chars + }) + + let content = "" + let totalRead = 0 + let currentLine = 1 + let streamDestroyed = false + let hasContent = false + + stream.on("data", (chunk: string | Buffer) => { + // Early exit if stream was already destroyed + if (streamDestroyed) { + return + } + + try { + const chunkStr = typeof chunk === "string" ? chunk : chunk.toString("utf8") + const remainingChars = maxChars - totalRead + + if (remainingChars <= 0) { + streamDestroyed = true + stream.destroy() + resolve({ + content, + charactersRead: totalRead, + totalCharacters, + linesRead: hasContent ? currentLine : 0, + totalLines, + lastLineRead: hasContent ? currentLine : 0, + }) + return + } + + let chunkToAdd: string + if (chunkStr.length <= remainingChars) { + chunkToAdd = chunkStr + totalRead += chunkStr.length + } else { + chunkToAdd = chunkStr.substring(0, remainingChars) + totalRead += remainingChars + } + + // Mark that we have content + if (chunkToAdd.length > 0) { + hasContent = true + } + + // Count newlines in the chunk we're adding + for (let i = 0; i < chunkToAdd.length; i++) { + if (chunkToAdd[i] === "\n") { + currentLine++ + } + } + + content += chunkToAdd + + // Check if we've reached the character limit + if (totalRead >= maxChars) { + streamDestroyed = true + stream.destroy() + + // Ensure we don't exceed maxChars + if (content.length > maxChars) { + content = content.substring(0, maxChars) + // Recount lines in the final content + currentLine = 1 + hasContent = content.length > 0 + for (let i = 0; i < content.length; i++) { + if (content[i] === "\n") { + currentLine++ + } + } + } + + resolve({ + content, + charactersRead: Math.min(totalRead, maxChars), + totalCharacters, + linesRead: hasContent ? currentLine : 0, + totalLines, + lastLineRead: hasContent ? currentLine : 0, + }) + } + } catch (error) { + streamDestroyed = true + stream.destroy() + reject(error) + } + }) + + stream.on("end", () => { + resolve({ + content, + charactersRead: totalRead, + totalCharacters, + linesRead: hasContent ? currentLine : 0, + totalLines, + lastLineRead: hasContent ? currentLine : 0, + }) + }) + + stream.on("error", (error: Error) => { + reject(error) + }) + }) +} + +/** + * Legacy function for backward compatibility. + * @deprecated Use readPartialContent instead + */ +export async function readPartialSingleLineContent(filePath: string, maxChars: number): Promise { + const result = await readPartialContent(filePath, maxChars) + return result.content +} diff --git a/src/shared/__tests__/providerFormat.spec.ts b/src/shared/__tests__/providerFormat.spec.ts new file mode 100644 index 0000000000..a23cf39e72 --- /dev/null +++ b/src/shared/__tests__/providerFormat.spec.ts @@ -0,0 +1,85 @@ +import { describe, it, expect } from "vitest" +import { getFormatForProvider, isVertexAnthropicModel } from "../api" +import { ProviderName } from "@roo-code/types" + +describe("providerFormat", () => { + describe("getFormatForProvider", () => { + it("should return 'anthropic' for Anthropic-based providers", () => { + const anthropicProviders: ProviderName[] = ["anthropic", "bedrock", "vertex", "claude-code", "requesty"] + + anthropicProviders.forEach((provider) => { + expect(getFormatForProvider(provider)).toBe("anthropic") + }) + }) + + it("should return 'openai' for OpenAI-based providers", () => { + const openaiProviders: ProviderName[] = [ + "openai", + "openai-native", + "deepseek", + "moonshot", + "xai", + "groq", + "chutes", + "mistral", + "ollama", + "lmstudio", + "litellm", + "huggingface", + "glama", + "unbound", + "vscode-lm", + "human-relay", + "fake-ai", + ] + + openaiProviders.forEach((provider) => { + expect(getFormatForProvider(provider)).toBe("openai") + }) + }) + + it("should return 'gemini' for Gemini-based providers", () => { + const geminiProviders: ProviderName[] = ["gemini", "gemini-cli"] + + geminiProviders.forEach((provider) => { + expect(getFormatForProvider(provider)).toBe("gemini") + }) + }) + + it("should return 'openrouter' for OpenRouter provider", () => { + expect(getFormatForProvider("openrouter")).toBe("openrouter") + }) + + it("should return undefined for undefined provider", () => { + expect(getFormatForProvider(undefined)).toBeUndefined() + }) + + it("should return undefined for unknown providers", () => { + // Test with a provider that doesn't exist in the switch statement + // by casting to bypass TypeScript type checking + expect(getFormatForProvider("unknown-provider" as ProviderName)).toBeUndefined() + }) + }) + + describe("isVertexAnthropicModel", () => { + it("should return true for Claude models", () => { + expect(isVertexAnthropicModel("claude-3-opus")).toBe(true) + expect(isVertexAnthropicModel("claude-3-sonnet")).toBe(true) + expect(isVertexAnthropicModel("claude-3-haiku")).toBe(true) + expect(isVertexAnthropicModel("CLAUDE-3-OPUS")).toBe(true) // Case insensitive + expect(isVertexAnthropicModel("anthropic.claude-v2")).toBe(true) + }) + + it("should return false for non-Claude models", () => { + expect(isVertexAnthropicModel("gemini-pro")).toBe(false) + expect(isVertexAnthropicModel("gemini-1.5-pro")).toBe(false) + expect(isVertexAnthropicModel("palm-2")).toBe(false) + expect(isVertexAnthropicModel("gpt-4")).toBe(false) + }) + + it("should return false for undefined or empty model ID", () => { + expect(isVertexAnthropicModel(undefined)).toBe(false) + expect(isVertexAnthropicModel("")).toBe(false) + }) + }) +}) diff --git a/src/shared/api.ts b/src/shared/api.ts index 8cbfc72133..05ccde74c6 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -1,10 +1,82 @@ import { type ModelInfo, type ProviderSettings, + type ProviderName, ANTHROPIC_DEFAULT_MAX_TOKENS, CLAUDE_CODE_DEFAULT_MAX_OUTPUT_TOKENS, } from "@roo-code/types" +// Provider Format Mapping + +/** + * Maps API provider names to their corresponding format for model parameter handling. + * This centralizes the provider-to-format mapping logic used across the codebase. + * + * @param apiProvider - The API provider name + * @returns The format string used by getModelParams and getModelMaxOutputTokens, or undefined if not mapped + */ +export function getFormatForProvider( + apiProvider: ProviderName | undefined, +): "anthropic" | "openai" | "gemini" | "openrouter" | undefined { + if (!apiProvider) { + return undefined + } + + switch (apiProvider) { + // Anthropic-based providers + case "anthropic": + case "bedrock": + case "vertex": // Note: vertex can use either anthropic or gemini format depending on the model + case "claude-code": + case "requesty": // Uses anthropic format based on code analysis + return "anthropic" + + // OpenAI-based providers + case "openai": + case "openai-native": + case "deepseek": + case "moonshot": + case "xai": + case "groq": + case "chutes": + case "mistral": + case "ollama": + case "lmstudio": + case "litellm": + case "huggingface": + case "glama": + case "unbound": + case "vscode-lm": + case "human-relay": + case "fake-ai": + return "openai" + + // Gemini-based providers + case "gemini": + case "gemini-cli": + return "gemini" + + // OpenRouter + case "openrouter": + return "openrouter" + + // Providers that don't have a specific format mapping + default: + return undefined + } +} + +/** + * Special case: Vertex provider can use either anthropic or gemini format depending on the model. + * This function checks if a vertex model should use anthropic format. + * + * @param modelId - The model ID to check + * @returns true if the model should use anthropic format + */ +export function isVertexAnthropicModel(modelId?: string): boolean { + return modelId?.toLowerCase().includes("claude") ?? false +} + // ApiHandlerOptions export type ApiHandlerOptions = Omit @@ -70,14 +142,17 @@ export const getModelMaxOutputTokens = ({ return settings.claudeCodeMaxOutputTokens || CLAUDE_CODE_DEFAULT_MAX_OUTPUT_TOKENS } + // If format is not provided, derive it from the provider settings + const effectiveFormat = format ?? getFormatForProvider(settings?.apiProvider) + if (shouldUseReasoningBudget({ model, settings })) { return settings?.modelMaxTokens || DEFAULT_HYBRID_REASONING_MODEL_MAX_TOKENS } const isAnthropicContext = modelId.includes("claude") || - format === "anthropic" || - (format === "openrouter" && modelId.startsWith("anthropic/")) + effectiveFormat === "anthropic" || + (effectiveFormat === "openrouter" && modelId.startsWith("anthropic/")) // For "Hybrid" reasoning models, discard the model's actual maxTokens for Anthropic contexts if (model.supportsReasoningBudget && isAnthropicContext) { @@ -95,7 +170,7 @@ export const getModelMaxOutputTokens = ({ } // For non-Anthropic formats without explicit maxTokens, return undefined - if (format) { + if (effectiveFormat) { return undefined }