From 79ad9c353d34c9cd7bef572953f7162b9fcf3412 Mon Sep 17 00:00:00 2001 From: ThomsenDrake Date: Thu, 29 May 2025 13:45:54 -0400 Subject: [PATCH] Add XML conversion helpers so that the VS Code LM API properly emits and executes tool calls. --- .changeset/eager-buckets-feel.md | 5 - .changeset/spotty-steaks-brake.md | 5 - CHANGELOG.md | 10 + full.patch | 481 ++++++++++++++++++ src/api/providers/__tests__/vscode-lm.test.ts | 125 ++++- src/api/providers/vscode-lm.ts | 107 +++- .../__tests__/parseAssistantMessage.test.ts | 32 +- .../parseAssistantMessage.ts | 5 + .../parseAssistantMessageV2.ts | 5 + src/package.json | 2 +- webview-ui/src/components/chat/TaskHeader.tsx | 19 +- 11 files changed, 756 insertions(+), 40 deletions(-) delete mode 100644 .changeset/eager-buckets-feel.md delete mode 100644 .changeset/spotty-steaks-brake.md create mode 100644 full.patch diff --git a/.changeset/eager-buckets-feel.md b/.changeset/eager-buckets-feel.md deleted file mode 100644 index 801da70a10..0000000000 --- a/.changeset/eager-buckets-feel.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": major ---- - -Default enabled autoCondenseContext and moved settings out of Experimental diff --git a/.changeset/spotty-steaks-brake.md b/.changeset/spotty-steaks-brake.md deleted file mode 100644 index 47fa9afc09..0000000000 --- a/.changeset/spotty-steaks-brake.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -"roo-cline": patch ---- - -Skips condense operations if the context size grows & shows an error diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a690d9622..28ac818038 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Roo Code Changelog +## 4.0.0 + +### Major Changes + +- Default enabled autoCondenseContext and moved settings out of Experimental + +### Patch Changes + +- Skips condense operations if the context size grows & shows an error + ## [3.18.5] - 2025-05-27 - Add thinking controls for Requesty (thanks @dtrugman!) diff --git a/full.patch b/full.patch new file mode 100644 index 0000000000..66ecbce98c --- /dev/null +++ b/full.patch @@ -0,0 +1,481 @@ +diff --git a/.changeset/eager-buckets-feel.md b/.changeset/eager-buckets-feel.md +deleted file mode 100644 +index 801da70a..00000000 +--- a/.changeset/eager-buckets-feel.md ++++ /dev/null +@@ -1,5 +0,0 @@ +---- +-"roo-cline": major +---- +- +-Default enabled autoCondenseContext and moved settings out of Experimental +diff --git a/.changeset/spotty-steaks-brake.md b/.changeset/spotty-steaks-brake.md +deleted file mode 100644 +index 47fa9afc..00000000 +--- a/.changeset/spotty-steaks-brake.md ++++ /dev/null +@@ -1,5 +0,0 @@ +---- +-"roo-cline": patch +---- +- +-Skips condense operations if the context size grows & shows an error +diff --git a/CHANGELOG.md b/CHANGELOG.md +index 4a690d96..28ac8180 100644 +--- a/CHANGELOG.md ++++ b/CHANGELOG.md +@@ -1,5 +1,15 @@ + # Roo Code Changelog + ++## 4.0.0 ++ ++### Major Changes ++ ++- Default enabled autoCondenseContext and moved settings out of Experimental ++ ++### Patch Changes ++ ++- Skips condense operations if the context size grows & shows an error ++ + ## [3.18.5] - 2025-05-27 + + - Add thinking controls for Requesty (thanks @dtrugman!) +diff --git a/src/api/providers/__tests__/vscode-lm.test.ts b/src/api/providers/__tests__/vscode-lm.test.ts +index 59d49f76..b5790ec7 100644 +--- a/src/api/providers/__tests__/vscode-lm.test.ts ++++ b/src/api/providers/__tests__/vscode-lm.test.ts +@@ -193,6 +193,8 @@ describe("VsCodeLmHandler", () => { + callId: "call-1", + } + ++ const toolTag = `<${toolCallData.name}>add[2,2]` ++ + mockLanguageModelChat.sendRequest.mockResolvedValueOnce({ + stream: (async function* () { + yield new vscode.LanguageModelToolCallPart( +@@ -203,7 +205,7 @@ describe("VsCodeLmHandler", () => { + return + })(), + text: (async function* () { +- yield JSON.stringify({ type: "tool_call", ...toolCallData }) ++ yield toolTag + return + })(), + }) +@@ -217,8 +219,127 @@ describe("VsCodeLmHandler", () => { + expect(chunks).toHaveLength(2) // Tool call chunk + usage chunk + expect(chunks[0]).toEqual({ + type: "text", +- text: JSON.stringify({ type: "tool_call", ...toolCallData }), ++ text: toolTag, ++ }) ++ }) ++ ++ it("should escape '<' characters in tool call input", async () => { ++ const systemPrompt = "You are a helpful assistant" ++ const messages: Anthropic.Messages.MessageParam[] = [ ++ { ++ role: "user" as const, ++ content: "Test < symbol", ++ }, ++ ] ++ ++ const toolCallData = { ++ name: "tester", ++ arguments: { query: "1 < 2" }, ++ callId: "call-less", ++ } ++ ++ const escaped = `<${toolCallData.name}>1 < 2` ++ ++ mockLanguageModelChat.sendRequest.mockResolvedValueOnce({ ++ stream: (async function* () { ++ yield new vscode.LanguageModelToolCallPart( ++ toolCallData.callId, ++ toolCallData.name, ++ toolCallData.arguments, ++ ) ++ return ++ })(), ++ text: (async function* () { ++ yield escaped ++ return ++ })(), ++ }) ++ ++ const stream = handler.createMessage(systemPrompt, messages) ++ const chunks: any[] = [] ++ for await (const chunk of stream) { ++ chunks.push(chunk) ++ } ++ ++ expect(chunks[0]).toEqual({ type: "text", text: escaped }) ++ }) ++ ++ it("should escape '&' characters in tool call input", async () => { ++ const systemPrompt = "You are a helpful assistant" ++ const messages: Anthropic.Messages.MessageParam[] = [ ++ { ++ role: "user" as const, ++ content: "Test & symbol", ++ }, ++ ] ++ ++ const toolCallData = { ++ name: "tester", ++ arguments: { query: "A & B" }, ++ callId: "call-amp", ++ } ++ ++ const escaped = `<${toolCallData.name}>A & B` ++ ++ mockLanguageModelChat.sendRequest.mockResolvedValueOnce({ ++ stream: (async function* () { ++ yield new vscode.LanguageModelToolCallPart( ++ toolCallData.callId, ++ toolCallData.name, ++ toolCallData.arguments, ++ ) ++ return ++ })(), ++ text: (async function* () { ++ yield escaped ++ return ++ })(), ++ }) ++ ++ const stream = handler.createMessage(systemPrompt, messages) ++ const chunks: any[] = [] ++ for await (const chunk of stream) { ++ chunks.push(chunk) ++ } ++ ++ expect(chunks[0]).toEqual({ type: "text", text: escaped }) ++ }) ++ ++ it("should convert JSON tool call text to XML", async () => { ++ const systemPrompt = "You are a helpful assistant" ++ const messages: Anthropic.Messages.MessageParam[] = [ ++ { ++ role: "user" as const, ++ content: "Do something", ++ }, ++ ] ++ ++ const json = JSON.stringify({ ++ name: "calculator", ++ input: { op: "add", nums: [1, 2] }, ++ callId: "call-json", + }) ++ ++ const expected = `add[1,2]` ++ ++ mockLanguageModelChat.sendRequest.mockResolvedValueOnce({ ++ stream: (async function* () { ++ yield new vscode.LanguageModelTextPart(json) ++ return ++ })(), ++ text: (async function* () { ++ yield json ++ return ++ })(), ++ }) ++ ++ const stream = handler.createMessage(systemPrompt, messages) ++ const chunks: any[] = [] ++ for await (const chunk of stream) { ++ chunks.push(chunk) ++ } ++ ++ expect(chunks[0]).toEqual({ type: "text", text: expected }) + }) + + it("should handle errors", async () => { +diff --git a/src/api/providers/vscode-lm.ts b/src/api/providers/vscode-lm.ts +index 5990193e..4676cdcf 100644 +--- a/src/api/providers/vscode-lm.ts ++++ b/src/api/providers/vscode-lm.ts +@@ -12,6 +12,45 @@ import { convertToVsCodeLmMessages } from "../transform/vscode-lm-format" + import { BaseProvider } from "./base-provider" + import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" + ++// Escape &, < and > characters so tool call values can be safely ++// embedded in XML-like tags. ++export function escapeXml(value: string): string { ++ return value.replace(/&/g, "&").replace(//g, ">") ++} ++ ++// Attempt to convert a JSON string describing a tool call to the XML format used ++// by VS Code for tool call text. The expected JSON shape is: ++// `{ name: string, input?: Record, arguments?: Record, callId?: string }` ++// Returns the XML string on success or `null` if parsing fails. ++export function convertJsonToolCallToXml(json: string): string | null { ++ try { ++ const parsed = JSON.parse(json) ++ ++ if (!parsed || typeof parsed !== "object") { ++ return null ++ } ++ ++ const name: unknown = (parsed as any).name ++ const input: unknown = (parsed as any).input ?? (parsed as any).arguments ++ ++ if (typeof name !== "string" || !input || typeof input !== "object") { ++ return null ++ } ++ ++ let tag = `<${name}>` ++ for (const [key, value] of Object.entries(input as Record)) { ++ const rawVal = typeof value === "object" ? JSON.stringify(value) : String(value) ++ const val = escapeXml(rawVal) ++ tag += `<${key}>${val}` ++ } ++ tag += `` ++ ++ return tag ++ } catch { ++ return null ++ } ++} ++ + /** + * Handles interaction with VS Code's Language Model API for chat-based operations. + * This handler extends BaseProvider to provide VS Code LM specific functionality. +@@ -388,10 +427,13 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan + continue + } + +- accumulatedText += chunk.value ++ const converted = convertJsonToolCallToXml(chunk.value) ++ const textValue = converted || chunk.value ++ ++ accumulatedText += textValue + yield { + type: "text", +- text: chunk.value, ++ text: textValue, + } + } else if (chunk instanceof vscode.LanguageModelToolCallPart) { + try { +@@ -412,16 +454,21 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan + continue + } + +- // Convert tool calls to text format with proper error handling +- const toolCall = { +- type: "tool_call", +- name: chunk.name, +- arguments: chunk.input, +- callId: chunk.callId, ++ // Convert tool calls to XML style tag format ++ const buildToolTag = (name: string, input: Record): string => { ++ let tag = `<${name}>` ++ for (const [key, value] of Object.entries(input)) { ++ const rawVal = typeof value === "object" ? JSON.stringify(value) : String(value) ++ const val = escapeXml(rawVal) ++ tag += `<${key}>${val}` ++ } ++ tag += `` ++ return tag + } + +- const toolCallText = JSON.stringify(toolCall) +- accumulatedText += toolCallText ++ const toolCallText = buildToolTag(chunk.name, chunk.input as Record) ++ const normalizedToolCall = normalizeVsCodeActionTags(toolCallText) ++ accumulatedText += normalizedToolCall + + // Log tool call for debugging + console.debug("Roo Code : Processing tool call:", { +@@ -432,7 +479,7 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan + + yield { + type: "text", +- text: toolCallText, ++ text: normalizedToolCall, + } + } catch (error) { + console.error("Roo Code : Failed to process tool call:", error) +@@ -578,3 +625,41 @@ export async function getVsCodeLmModels() { + return [] + } + } ++/** ++ * Normalizes VS Code action/tool call tags by removing redundant whitespace, ++ * ensuring proper XML-like formatting, and preventing malformed tags. ++ * This is useful for tool call serialization to ensure consistency. ++ * ++ * @param toolCallText - The tool call text in XML-like format ++ * @returns The normalized tool call text ++ */ ++function normalizeVsCodeActionTags(toolCallText: string): string { ++ // Remove leading/trailing whitespace and collapse multiple spaces between tags ++ let normalized = toolCallText.trim().replace(/>\s+<") ++ ++ // Optionally, ensure all tags are properly closed (basic check) ++ // (This does not fully validate XML, just a simple sanity check) ++ const tagStack: string[] = [] ++ const tagRegex = /<\/?([a-zA-Z0-9_\-]+)[^>]*>/g ++ let match: RegExpExecArray | null ++ while ((match = tagRegex.exec(normalized)) !== null) { ++ const [tag, tagName] = match ++ if (tag.startsWith("")) { ++ // Opening tag (not self-closing) ++ tagStack.push(tagName) ++ } ++ } ++ // If stack is not empty, tags are unbalanced, return as-is ++ if (tagStack.length > 0) { ++ return normalized ++ } ++ ++ return normalized ++} +diff --git a/src/core/assistant-message/__tests__/parseAssistantMessage.test.ts b/src/core/assistant-message/__tests__/parseAssistantMessage.test.ts +index 19f88a91..093093ba 100644 +--- a/src/core/assistant-message/__tests__/parseAssistantMessage.test.ts ++++ b/src/core/assistant-message/__tests__/parseAssistantMessage.test.ts +@@ -273,10 +273,10 @@ const isEmptyTextContent = (block: AssistantMessageContent) => + + it("should handle multi-line parameters", () => { + const message = `file.ts +- line 1 +- line 2 +- line 3 +- 3` ++ line 1 ++ line 2 ++ line 3 ++ 3` + const result = parser(message).filter((block) => !isEmptyTextContent(block)) + + expect(result).toHaveLength(1) +@@ -291,6 +291,30 @@ const isEmptyTextContent = (block: AssistantMessageContent) => + expect(toolUse.partial).toBe(false) + }) + ++ it("should allow whitespace in tool and parameter tags", () => { ++ const message = "< read_file >< path >src/file.ts" ++ const result = parser(message).filter((block) => !isEmptyTextContent(block)) ++ ++ expect(result).toHaveLength(1) ++ const toolUse = result[0] as ToolUse ++ expect(toolUse.type).toBe("tool_use") ++ expect(toolUse.name).toBe("read_file") ++ expect(toolUse.params.path).toBe("src/file.ts") ++ expect(toolUse.partial).toBe(false) ++ }) ++ ++ it("should trim parameter values surrounded by whitespace and newlines", () => { ++ const message = `\n src/file.ts \n` ++ const result = parser(message).filter((block) => !isEmptyTextContent(block)) ++ ++ expect(result).toHaveLength(1) ++ const toolUse = result[0] as ToolUse ++ expect(toolUse.type).toBe("tool_use") ++ expect(toolUse.name).toBe("read_file") ++ expect(toolUse.params.path).toBe("src/file.ts") ++ expect(toolUse.partial).toBe(false) ++ }) ++ + it("should handle a complex message with multiple content types", () => { + const message = `I'll help you with that task. + +diff --git a/src/core/assistant-message/parseAssistantMessage.ts b/src/core/assistant-message/parseAssistantMessage.ts +index 2fe747a6..53937f35 100644 +--- a/src/core/assistant-message/parseAssistantMessage.ts ++++ b/src/core/assistant-message/parseAssistantMessage.ts +@@ -4,7 +4,12 @@ import { TextContent, ToolUse, ToolParamName, toolParamNames } from "../../share + + export type AssistantMessageContent = TextContent | ToolUse + ++function normalizeTags(input: string): string { ++ return input.replace(/<\s*(\/?)\s*([^>]+?)\s*>/g, "<$1$2>") ++} ++ + export function parseAssistantMessage(assistantMessage: string): AssistantMessageContent[] { ++ assistantMessage = normalizeTags(assistantMessage) + let contentBlocks: AssistantMessageContent[] = [] + let currentTextContent: TextContent | undefined = undefined + let currentTextContentStartIndex = 0 +diff --git a/src/core/assistant-message/parseAssistantMessageV2.ts b/src/core/assistant-message/parseAssistantMessageV2.ts +index 6d3594cf..7d8216fc 100644 +--- a/src/core/assistant-message/parseAssistantMessageV2.ts ++++ b/src/core/assistant-message/parseAssistantMessageV2.ts +@@ -37,7 +37,12 @@ export type AssistantMessageContent = TextContent | ToolUse + * `true`. + */ + ++function normalizeTags(input: string): string { ++ return input.replace(/<\s*(\/?)\s*([^>]+?)\s*>/g, "<$1$2>") ++} ++ + export function parseAssistantMessageV2(assistantMessage: string): AssistantMessageContent[] { ++ assistantMessage = normalizeTags(assistantMessage) + const contentBlocks: AssistantMessageContent[] = [] + + let currentTextContentStart = 0 // Index where the current text block started. +diff --git a/src/package.json b/src/package.json +index 3a391be0..b071425f 100644 +--- a/src/package.json ++++ b/src/package.json +@@ -3,7 +3,7 @@ + "displayName": "%extension.displayName%", + "description": "%extension.description%", + "publisher": "RooVeterinaryInc", +- "version": "3.18.5", ++ "version": "4.0.0", + "icon": "assets/icons/icon.png", + "galleryBanner": { + "color": "#617A91", +diff --git a/webview-ui/src/components/chat/TaskHeader.tsx b/webview-ui/src/components/chat/TaskHeader.tsx +index 71cbde10..abfc6fd4 100644 +--- a/webview-ui/src/components/chat/TaskHeader.tsx ++++ b/webview-ui/src/components/chat/TaskHeader.tsx +@@ -58,16 +58,6 @@ const TaskHeader = ({ + + const { width: windowWidth } = useWindowSize() + +- const condenseButton = ( +- +- ) +- + return ( +
+
+- {condenseButton} ++ + {!!totalCost && ${totalCost.toFixed(2)}} +
+ )} +@@ -162,7 +158,6 @@ const TaskHeader = ({ + : undefined + } + /> +- {condenseButton} +
+ )} +
diff --git a/src/api/providers/__tests__/vscode-lm.test.ts b/src/api/providers/__tests__/vscode-lm.test.ts index 59d49f764e..b5790ec7a4 100644 --- a/src/api/providers/__tests__/vscode-lm.test.ts +++ b/src/api/providers/__tests__/vscode-lm.test.ts @@ -193,6 +193,8 @@ describe("VsCodeLmHandler", () => { callId: "call-1", } + const toolTag = `<${toolCallData.name}>add[2,2]` + mockLanguageModelChat.sendRequest.mockResolvedValueOnce({ stream: (async function* () { yield new vscode.LanguageModelToolCallPart( @@ -203,7 +205,7 @@ describe("VsCodeLmHandler", () => { return })(), text: (async function* () { - yield JSON.stringify({ type: "tool_call", ...toolCallData }) + yield toolTag return })(), }) @@ -217,8 +219,127 @@ describe("VsCodeLmHandler", () => { expect(chunks).toHaveLength(2) // Tool call chunk + usage chunk expect(chunks[0]).toEqual({ type: "text", - text: JSON.stringify({ type: "tool_call", ...toolCallData }), + text: toolTag, + }) + }) + + it("should escape '<' characters in tool call input", async () => { + const systemPrompt = "You are a helpful assistant" + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user" as const, + content: "Test < symbol", + }, + ] + + const toolCallData = { + name: "tester", + arguments: { query: "1 < 2" }, + callId: "call-less", + } + + const escaped = `<${toolCallData.name}>1 < 2` + + mockLanguageModelChat.sendRequest.mockResolvedValueOnce({ + stream: (async function* () { + yield new vscode.LanguageModelToolCallPart( + toolCallData.callId, + toolCallData.name, + toolCallData.arguments, + ) + return + })(), + text: (async function* () { + yield escaped + return + })(), + }) + + const stream = handler.createMessage(systemPrompt, messages) + const chunks: any[] = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(chunks[0]).toEqual({ type: "text", text: escaped }) + }) + + it("should escape '&' characters in tool call input", async () => { + const systemPrompt = "You are a helpful assistant" + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user" as const, + content: "Test & symbol", + }, + ] + + const toolCallData = { + name: "tester", + arguments: { query: "A & B" }, + callId: "call-amp", + } + + const escaped = `<${toolCallData.name}>A & B` + + mockLanguageModelChat.sendRequest.mockResolvedValueOnce({ + stream: (async function* () { + yield new vscode.LanguageModelToolCallPart( + toolCallData.callId, + toolCallData.name, + toolCallData.arguments, + ) + return + })(), + text: (async function* () { + yield escaped + return + })(), + }) + + const stream = handler.createMessage(systemPrompt, messages) + const chunks: any[] = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(chunks[0]).toEqual({ type: "text", text: escaped }) + }) + + it("should convert JSON tool call text to XML", async () => { + const systemPrompt = "You are a helpful assistant" + const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user" as const, + content: "Do something", + }, + ] + + const json = JSON.stringify({ + name: "calculator", + input: { op: "add", nums: [1, 2] }, + callId: "call-json", }) + + const expected = `add[1,2]` + + mockLanguageModelChat.sendRequest.mockResolvedValueOnce({ + stream: (async function* () { + yield new vscode.LanguageModelTextPart(json) + return + })(), + text: (async function* () { + yield json + return + })(), + }) + + const stream = handler.createMessage(systemPrompt, messages) + const chunks: any[] = [] + for await (const chunk of stream) { + chunks.push(chunk) + } + + expect(chunks[0]).toEqual({ type: "text", text: expected }) }) it("should handle errors", async () => { diff --git a/src/api/providers/vscode-lm.ts b/src/api/providers/vscode-lm.ts index 5990193ecb..4676cdcf62 100644 --- a/src/api/providers/vscode-lm.ts +++ b/src/api/providers/vscode-lm.ts @@ -12,6 +12,45 @@ import { convertToVsCodeLmMessages } from "../transform/vscode-lm-format" import { BaseProvider } from "./base-provider" import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" +// Escape &, < and > characters so tool call values can be safely +// embedded in XML-like tags. +export function escapeXml(value: string): string { + return value.replace(/&/g, "&").replace(//g, ">") +} + +// Attempt to convert a JSON string describing a tool call to the XML format used +// by VS Code for tool call text. The expected JSON shape is: +// `{ name: string, input?: Record, arguments?: Record, callId?: string }` +// Returns the XML string on success or `null` if parsing fails. +export function convertJsonToolCallToXml(json: string): string | null { + try { + const parsed = JSON.parse(json) + + if (!parsed || typeof parsed !== "object") { + return null + } + + const name: unknown = (parsed as any).name + const input: unknown = (parsed as any).input ?? (parsed as any).arguments + + if (typeof name !== "string" || !input || typeof input !== "object") { + return null + } + + let tag = `<${name}>` + for (const [key, value] of Object.entries(input as Record)) { + const rawVal = typeof value === "object" ? JSON.stringify(value) : String(value) + const val = escapeXml(rawVal) + tag += `<${key}>${val}` + } + tag += `` + + return tag + } catch { + return null + } +} + /** * Handles interaction with VS Code's Language Model API for chat-based operations. * This handler extends BaseProvider to provide VS Code LM specific functionality. @@ -388,10 +427,13 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan continue } - accumulatedText += chunk.value + const converted = convertJsonToolCallToXml(chunk.value) + const textValue = converted || chunk.value + + accumulatedText += textValue yield { type: "text", - text: chunk.value, + text: textValue, } } else if (chunk instanceof vscode.LanguageModelToolCallPart) { try { @@ -412,16 +454,21 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan continue } - // Convert tool calls to text format with proper error handling - const toolCall = { - type: "tool_call", - name: chunk.name, - arguments: chunk.input, - callId: chunk.callId, + // Convert tool calls to XML style tag format + const buildToolTag = (name: string, input: Record): string => { + let tag = `<${name}>` + for (const [key, value] of Object.entries(input)) { + const rawVal = typeof value === "object" ? JSON.stringify(value) : String(value) + const val = escapeXml(rawVal) + tag += `<${key}>${val}` + } + tag += `` + return tag } - const toolCallText = JSON.stringify(toolCall) - accumulatedText += toolCallText + const toolCallText = buildToolTag(chunk.name, chunk.input as Record) + const normalizedToolCall = normalizeVsCodeActionTags(toolCallText) + accumulatedText += normalizedToolCall // Log tool call for debugging console.debug("Roo Code : Processing tool call:", { @@ -432,7 +479,7 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan yield { type: "text", - text: toolCallText, + text: normalizedToolCall, } } catch (error) { console.error("Roo Code : Failed to process tool call:", error) @@ -578,3 +625,41 @@ export async function getVsCodeLmModels() { return [] } } +/** + * Normalizes VS Code action/tool call tags by removing redundant whitespace, + * ensuring proper XML-like formatting, and preventing malformed tags. + * This is useful for tool call serialization to ensure consistency. + * + * @param toolCallText - The tool call text in XML-like format + * @returns The normalized tool call text + */ +function normalizeVsCodeActionTags(toolCallText: string): string { + // Remove leading/trailing whitespace and collapse multiple spaces between tags + let normalized = toolCallText.trim().replace(/>\s+<") + + // Optionally, ensure all tags are properly closed (basic check) + // (This does not fully validate XML, just a simple sanity check) + const tagStack: string[] = [] + const tagRegex = /<\/?([a-zA-Z0-9_\-]+)[^>]*>/g + let match: RegExpExecArray | null + while ((match = tagRegex.exec(normalized)) !== null) { + const [tag, tagName] = match + if (tag.startsWith("")) { + // Opening tag (not self-closing) + tagStack.push(tagName) + } + } + // If stack is not empty, tags are unbalanced, return as-is + if (tagStack.length > 0) { + return normalized + } + + return normalized +} diff --git a/src/core/assistant-message/__tests__/parseAssistantMessage.test.ts b/src/core/assistant-message/__tests__/parseAssistantMessage.test.ts index 19f88a91d7..093093ba10 100644 --- a/src/core/assistant-message/__tests__/parseAssistantMessage.test.ts +++ b/src/core/assistant-message/__tests__/parseAssistantMessage.test.ts @@ -273,10 +273,10 @@ const isEmptyTextContent = (block: AssistantMessageContent) => it("should handle multi-line parameters", () => { const message = `file.ts - line 1 - line 2 - line 3 - 3` + line 1 + line 2 + line 3 + 3` const result = parser(message).filter((block) => !isEmptyTextContent(block)) expect(result).toHaveLength(1) @@ -291,6 +291,30 @@ const isEmptyTextContent = (block: AssistantMessageContent) => expect(toolUse.partial).toBe(false) }) + it("should allow whitespace in tool and parameter tags", () => { + const message = "< read_file >< path >src/file.ts" + const result = parser(message).filter((block) => !isEmptyTextContent(block)) + + expect(result).toHaveLength(1) + const toolUse = result[0] as ToolUse + expect(toolUse.type).toBe("tool_use") + expect(toolUse.name).toBe("read_file") + expect(toolUse.params.path).toBe("src/file.ts") + expect(toolUse.partial).toBe(false) + }) + + it("should trim parameter values surrounded by whitespace and newlines", () => { + const message = `\n src/file.ts \n` + const result = parser(message).filter((block) => !isEmptyTextContent(block)) + + expect(result).toHaveLength(1) + const toolUse = result[0] as ToolUse + expect(toolUse.type).toBe("tool_use") + expect(toolUse.name).toBe("read_file") + expect(toolUse.params.path).toBe("src/file.ts") + expect(toolUse.partial).toBe(false) + }) + it("should handle a complex message with multiple content types", () => { const message = `I'll help you with that task. diff --git a/src/core/assistant-message/parseAssistantMessage.ts b/src/core/assistant-message/parseAssistantMessage.ts index 2fe747a6df..53937f3514 100644 --- a/src/core/assistant-message/parseAssistantMessage.ts +++ b/src/core/assistant-message/parseAssistantMessage.ts @@ -4,7 +4,12 @@ import { TextContent, ToolUse, ToolParamName, toolParamNames } from "../../share export type AssistantMessageContent = TextContent | ToolUse +function normalizeTags(input: string): string { + return input.replace(/<\s*(\/?)\s*([^>]+?)\s*>/g, "<$1$2>") +} + export function parseAssistantMessage(assistantMessage: string): AssistantMessageContent[] { + assistantMessage = normalizeTags(assistantMessage) let contentBlocks: AssistantMessageContent[] = [] let currentTextContent: TextContent | undefined = undefined let currentTextContentStartIndex = 0 diff --git a/src/core/assistant-message/parseAssistantMessageV2.ts b/src/core/assistant-message/parseAssistantMessageV2.ts index 6d3594cf60..7d8216fcbc 100644 --- a/src/core/assistant-message/parseAssistantMessageV2.ts +++ b/src/core/assistant-message/parseAssistantMessageV2.ts @@ -37,7 +37,12 @@ export type AssistantMessageContent = TextContent | ToolUse * `true`. */ +function normalizeTags(input: string): string { + return input.replace(/<\s*(\/?)\s*([^>]+?)\s*>/g, "<$1$2>") +} + export function parseAssistantMessageV2(assistantMessage: string): AssistantMessageContent[] { + assistantMessage = normalizeTags(assistantMessage) const contentBlocks: AssistantMessageContent[] = [] let currentTextContentStart = 0 // Index where the current text block started. diff --git a/src/package.json b/src/package.json index 3a391be0af..b071425f62 100644 --- a/src/package.json +++ b/src/package.json @@ -3,7 +3,7 @@ "displayName": "%extension.displayName%", "description": "%extension.description%", "publisher": "RooVeterinaryInc", - "version": "3.18.5", + "version": "4.0.0", "icon": "assets/icons/icon.png", "galleryBanner": { "color": "#617A91", diff --git a/webview-ui/src/components/chat/TaskHeader.tsx b/webview-ui/src/components/chat/TaskHeader.tsx index 71cbde10b7..abfc6fd41e 100644 --- a/webview-ui/src/components/chat/TaskHeader.tsx +++ b/webview-ui/src/components/chat/TaskHeader.tsx @@ -58,16 +58,6 @@ const TaskHeader = ({ const { width: windowWidth } = useWindowSize() - const condenseButton = ( - - ) - return (
- {condenseButton} + {!!totalCost && ${totalCost.toFixed(2)}}
)} @@ -162,7 +158,6 @@ const TaskHeader = ({ : undefined } /> - {condenseButton}
)}