diff --git a/extensions/cli/src/stream/streamChatResponse.ts b/extensions/cli/src/stream/streamChatResponse.ts index 2c7ef956611..f55e6713f47 100644 --- a/extensions/cli/src/stream/streamChatResponse.ts +++ b/extensions/cli/src/stream/streamChatResponse.ts @@ -1,6 +1,7 @@ import { ModelConfig } from "@continuedev/config-yaml"; import { BaseLlmApi } from "@continuedev/openai-adapters"; import type { ChatHistoryItem } from "core/index.js"; +import { compileChatMessages } from "core/llm/countTokens.js"; import { convertFromUnifiedHistoryWithSystemMessage } from "core/util/messageConversion.js"; import * as dotenv from "dotenv"; import type { @@ -18,7 +19,6 @@ import { withExponentialBackoff, } from "../util/exponentialBackoff.js"; import { logger } from "../util/logger.js"; -import { validateContextLength } from "../util/tokenizer.js"; import { getAllTools, handleToolCalls } from "./handleToolCalls.js"; import { handleAutoCompaction } from "./streamChatResponse.autoCompaction.js"; @@ -148,20 +148,57 @@ export async function processStreamingResponse( tools, } = options; - // Validate context length before making the request - const validation = validateContextLength(chatHistory, model); - if (!validation.isValid) { - throw new Error(`Context length validation failed: ${validation.error}`); - } - - // Get fresh system message and inject it + // Get fresh system message const systemMessage = await services.systemMessage.getSystemMessage( services.toolPermissions.getState().currentMode, ); - const openaiChatHistory = convertFromUnifiedHistoryWithSystemMessage( + + // Convert unified history to ChatMessage format for compileChatMessages + const openaiMessages = convertFromUnifiedHistoryWithSystemMessage( chatHistory, systemMessage, ) as ChatCompletionMessageParam[]; + + // Convert to ChatMessage format and use compileChatMessages to handle pruning + const contextLength = model.contextLength || 4096; + const maxTokens = model.defaultCompletionOptions?.maxTokens || 1024; + + const chatMessages = openaiMessages.map((msg) => ({ + role: msg.role, + content: msg.content || "", + ...("tool_calls" in msg && msg.tool_calls + ? { toolCalls: msg.tool_calls } + : {}), + ...("tool_call_id" in msg && msg.tool_call_id + ? { toolCallId: msg.tool_call_id } + : {}), + })); + + const result = compileChatMessages({ + modelName: model.model, + msgs: chatMessages, + knownContextLength: contextLength, + maxTokens, + supportsImages: false, + tools, + }); + + if (result.didPrune) { + logger.info("Chat history pruned to fit context length", { + originalLength: chatHistory.length, + prunedLength: result.compiledChatMessages.length, + contextPercentage: `${(result.contextPercentage * 100).toFixed(1)}%`, + }); + } + + // Convert back to OpenAI format + const openaiChatHistory = result.compiledChatMessages.map((msg: any) => ({ + role: msg.role, + content: msg.content, + ...(msg.toolCalls ? { tool_calls: msg.toolCalls } : {}), + ...(msg.toolCallId ? { tool_call_id: msg.toolCallId } : {}), + })) as ChatCompletionMessageParam[]; + const requestStartTime = Date.now(); const streamFactory = async (retryAbortSignal: AbortSignal) => { diff --git a/extensions/cli/src/tools/runTerminalCommand.ts b/extensions/cli/src/tools/runTerminalCommand.ts index 00204d55bd1..7f9d2336881 100644 --- a/extensions/cli/src/tools/runTerminalCommand.ts +++ b/extensions/cli/src/tools/runTerminalCommand.ts @@ -13,6 +13,32 @@ import { import { Tool } from "./types.js"; +// Maximum number of lines and characters to return from command output +const MAX_OUTPUT_LINES = 5000; +const MAX_OUTPUT_CHARS = 200000; + +// Helper function to truncate command output by both lines and characters +// Keeps the LAST X lines/chars to capture test/install outcomes +function truncateOutput(output: string): string { + const lines = output.split("\n"); + let truncated = output; + let truncationMsg = ""; + + // First check character limit - keep last X characters + if (output.length > MAX_OUTPUT_CHARS) { + const startIndex = output.length - MAX_OUTPUT_CHARS; + truncated = output.substring(startIndex); + truncationMsg = `[Output truncated: showing last ${MAX_OUTPUT_CHARS} characters of ${output.length} total]\n\n`; + } + // Then check line limit (only if not already truncated by characters) - keep last X lines + else if (lines.length > MAX_OUTPUT_LINES) { + truncated = lines.slice(-MAX_OUTPUT_LINES).join("\n"); + truncationMsg = `[Output truncated: showing last ${MAX_OUTPUT_LINES} lines of ${lines.length} total]\n\n`; + } + + return truncationMsg ? truncationMsg + truncated : truncated; +} + // Helper function to use login shell on Unix/macOS and PowerShell on Windows function getShellCommand(command: string): { shell: string; args: string[] } { if (process.platform === "win32") { @@ -99,18 +125,9 @@ Commands are automatically executed from the current working directory (${proces let output = stdout + (stderr ? `\nStderr: ${stderr}` : ""); output += `\n\n[Command timed out after ${TIMEOUT_MS / 1000} seconds of no output]`; - // Truncate output if it has too many lines - const lines = output.split("\n"); - if (lines.length > 5000) { - const truncatedOutput = lines.slice(0, 5000).join("\n"); - resolve( - truncatedOutput + - `\n\n[Output truncated to first 5000 lines of ${lines.length} total]`, - ); - return; - } - - resolve(output); + // Truncate output by both lines and characters + const truncatedOutput = truncateOutput(output); + resolve(truncatedOutput); }, TIMEOUT_MS); }; @@ -155,18 +172,9 @@ Commands are automatically executed from the current working directory (${proces output = stdout + `\nStderr: ${stderr}`; } - // Truncate output if it has too many lines - const lines = output.split("\n"); - if (lines.length > 5000) { - const truncatedOutput = lines.slice(0, 5000).join("\n"); - resolve( - truncatedOutput + - `\n\n[Output truncated to first 5000 lines of ${lines.length} total]`, - ); - return; - } - - resolve(output); + // Truncate output by both lines and characters + const truncatedOutput = truncateOutput(output); + resolve(truncatedOutput); }); child.on("error", (error) => {