Skip to content

Commit 4d9519e

Browse files
authored
🐛 fix when asking the agent to write code, code does not appear code box
2 parents d9c0d12 + c6f91b9 commit 4d9519e

File tree

9 files changed

+511
-718
lines changed

9 files changed

+511
-718
lines changed

frontend/app/[locale]/agents/components/DebugConfig.tsx

Lines changed: 8 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ import { ChatMessageType, TaskMessageType } from "@/types/chat";
1010
import { handleStreamResponse } from "@/app/chat/streaming/chatStreamHandler";
1111
import { ChatStreamFinalMessage } from "@/app/chat/streaming/chatStreamFinalMessage";
1212
import { TaskWindow } from "@/app/chat/streaming/taskWindow";
13+
import { transformMessagesToTaskMessages } from "@/app/chat/streaming/messageTransformer";
1314
import { ROLE_ASSISTANT } from "@/const/agentConfig";
1415
import log from "@/lib/logger";
1516

@@ -49,62 +50,17 @@ function AgentDebugging({
4950
}
5051
};
5152

52-
// Process the step content of the message
53+
// Process the step content of the message using unified transformer
5354
const processMessageSteps = (message: ChatMessageType): TaskMessageType[] => {
5455
if (!message.steps || message.steps.length === 0) return [];
5556

56-
const taskMsgs: TaskMessageType[] = [];
57-
message.steps.forEach((step) => {
58-
// Process step.contents
59-
if (step.contents && step.contents.length > 0) {
60-
step.contents.forEach((content) => {
61-
taskMsgs.push({
62-
id: content.id,
63-
role: ROLE_ASSISTANT,
64-
content: content.content,
65-
timestamp: new Date(),
66-
type: content.type,
67-
// Preserve subType so TaskWindow can style deep thinking text
68-
subType: content.subType as any,
69-
} as any);
70-
});
71-
}
72-
73-
// Process step.thinking
74-
if (step.thinking && step.thinking.content) {
75-
taskMsgs.push({
76-
id: `thinking-${step.id}`,
77-
role: ROLE_ASSISTANT,
78-
content: step.thinking.content,
79-
timestamp: new Date(),
80-
type: "model_output_thinking",
81-
});
82-
}
83-
84-
// Process step.code
85-
if (step.code && step.code.content) {
86-
taskMsgs.push({
87-
id: `code-${step.id}`,
88-
role: ROLE_ASSISTANT,
89-
content: step.code.content,
90-
timestamp: new Date(),
91-
type: "model_output_code",
92-
});
93-
}
94-
95-
// Process step.output
96-
if (step.output && step.output.content) {
97-
taskMsgs.push({
98-
id: `output-${step.id}`,
99-
role: ROLE_ASSISTANT,
100-
content: step.output.content,
101-
timestamp: new Date(),
102-
type: "tool",
103-
});
104-
}
105-
});
57+
// Use unified message transformer with includeCode: true for debug mode
58+
const { taskMessages } = transformMessagesToTaskMessages(
59+
[message],
60+
{ includeCode: true }
61+
);
10662

107-
return taskMsgs;
63+
return taskMessages;
10864
};
10965

11066
return (

frontend/app/[locale]/chat/streaming/chatStreamHandler.tsx

Lines changed: 51 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@ export const handleStreamResponse = async (
6767

6868
let lastContentType:
6969
| typeof chatConfig.contentTypes.MODEL_OUTPUT
70+
| typeof chatConfig.contentTypes.MODEL_OUTPUT_CODE
7071
| typeof chatConfig.contentTypes.PARSING
7172
| typeof chatConfig.contentTypes.EXECUTION
7273
| typeof chatConfig.contentTypes.AGENT_NEW_RUN
@@ -77,13 +78,24 @@ export const handleStreamResponse = async (
7778
| typeof chatConfig.contentTypes.PREPROCESS
7879
| null = null;
7980
let lastModelOutputIndex = -1; // Track the index of the last model output in currentStep.contents
81+
let lastCodeOutputIndex = -1; // Track the index of the last code output for proper streaming
8082
let searchResultsContent: any[] = [];
8183
let allSearchResults: any[] = [];
8284
let finalAnswer = "";
8385

8486
try {
8587
while (true) {
86-
const { done, value } = await reader.read();
88+
let readResult;
89+
try {
90+
readResult = await reader.read();
91+
} catch (readError: any) {
92+
// If read is aborted, break the loop gracefully
93+
if (readError?.name === "AbortError" || readError?.name === "AbortSignal") {
94+
break;
95+
}
96+
throw readError;
97+
}
98+
const { done, value } = readResult;
8799
if (done) break;
88100

89101
buffer += decoder.decode(value, { stream: true });
@@ -130,6 +142,7 @@ export const handleStreamResponse = async (
130142
// Reset status tracking variables
131143
lastContentType = null;
132144
lastModelOutputIndex = -1;
145+
lastCodeOutputIndex = -1;
133146

134147
break;
135148

@@ -298,70 +311,74 @@ export const handleStreamResponse = async (
298311
}
299312

300313
if (isDebug) {
301-
// In debug mode, use streaming output like model_output_thinking
302-
// Ensure contents exists
314+
// In debug mode, use MODEL_OUTPUT_CODE type for streaming output
303315
let processedContent = messageContent;
304316

305-
// Check if we should append to existing content or create new
306-
const shouldAppend =
307-
lastContentType === chatConfig.contentTypes.MODEL_OUTPUT &&
308-
lastModelOutputIndex >= 0 &&
309-
currentStep.contents[lastModelOutputIndex] &&
310-
currentStep.contents[lastModelOutputIndex].subType ===
311-
"code";
312-
313-
if (shouldAppend) {
314-
const modelOutput =
315-
currentStep.contents[lastModelOutputIndex];
317+
// Check if we should append to existing code content
318+
// Only append if the last content type was MODEL_OUTPUT_CODE and we have a valid index
319+
const shouldAppendCode =
320+
lastContentType === chatConfig.contentTypes.MODEL_OUTPUT_CODE &&
321+
lastCodeOutputIndex >= 0 &&
322+
currentStep.contents[lastCodeOutputIndex] &&
323+
currentStep.contents[lastCodeOutputIndex].type ===
324+
chatConfig.messageTypes.MODEL_OUTPUT_CODE;
325+
326+
if (shouldAppendCode) {
327+
const codeOutput =
328+
currentStep.contents[lastCodeOutputIndex];
316329
const codePrefix = t("chatStreamHandler.codePrefix");
317330

318331
// In append mode, also check for prefix in case it wasn't removed before
319332
if (
320-
modelOutput.content.includes(codePrefix) &&
333+
codeOutput.content.includes(codePrefix) &&
321334
processedContent.trim()
322335
) {
323336
// Clean existing content
324-
modelOutput.content = modelOutput.content.replace(
325-
new RegExp(codePrefix + `\\s*`),
337+
codeOutput.content = codeOutput.content.replace(
338+
new RegExp(`^(${codePrefix}|代码|Code)[::]\\s*`, "i"),
326339
""
327340
);
328341
}
329342

330-
// Directly append without prefix processing (prefix should have been removed when first created)
331-
let newContent = modelOutput.content + processedContent;
332-
// Remove "<end" suffix if present
343+
// Directly append the new content
344+
let newContent = codeOutput.content + processedContent;
345+
// Remove incomplete "<end" suffix if present (streaming artifact)
333346
if (newContent.endsWith("<end")) {
334347
newContent = newContent.slice(0, -4);
335348
}
336-
modelOutput.content = newContent;
349+
codeOutput.content = newContent;
337350
} else {
338-
// Otherwise, create new code content
339-
// Remove "代码:" prefix if present at the start of first content
351+
// Create new code content with MODEL_OUTPUT_CODE type
352+
// Remove "代码:" or "Code:" prefix if present at the start
340353
const codePrefix = t("chatStreamHandler.codePrefix");
341354
if (processedContent.startsWith(codePrefix)) {
342355
processedContent = processedContent.substring(
343356
codePrefix.length
344357
);
345358
}
346-
// Remove "<end" suffix if present
359+
// Also handle Chinese and English variants directly
360+
processedContent = processedContent.replace(/^(|Code)[:]\s*/i, "");
361+
362+
// Remove incomplete "<end" suffix if present
347363
if (processedContent.endsWith("<end")) {
348364
processedContent = processedContent.slice(0, -4);
349365
}
366+
350367
currentStep.contents.push({
351368
id: `model-code-${Date.now()}-${Math.random()
352369
.toString(36)
353370
.substring(2, 7)}`,
354-
type: chatConfig.messageTypes.MODEL_OUTPUT,
355-
subType: "code",
371+
type: chatConfig.messageTypes.MODEL_OUTPUT_CODE,
356372
content: processedContent,
357373
expanded: true,
358374
timestamp: Date.now(),
359375
});
360-
lastModelOutputIndex = currentStep.contents.length - 1;
376+
// Track the new code content index
377+
lastCodeOutputIndex = currentStep.contents.length - 1;
361378
}
362379

363-
// Update the last processed content type
364-
lastContentType = chatConfig.contentTypes.MODEL_OUTPUT;
380+
// Update the last processed content type to MODEL_OUTPUT_CODE
381+
lastContentType = chatConfig.contentTypes.MODEL_OUTPUT_CODE;
365382
} else {
366383
// In non-debug mode, use the original logic - add a stable loading prompt
367384
// Check if there is a code generation prompt
@@ -915,7 +932,11 @@ export const handleStreamResponse = async (
915932
// Reset the conversation switch status
916933
setIsSwitchedConversation(false);
917934
} catch (error) {
918-
log.error(t("chatStreamHandler.streamResponseError"), error);
935+
// Don't log AbortError as it's expected when user stops the stream
936+
const err = error as Error;
937+
if (err.name !== "AbortError") {
938+
log.error(t("chatStreamHandler.streamResponseError"), error);
939+
}
919940
throw error; // Pass the error back to the original function for processing
920941
}
921942

0 commit comments

Comments
 (0)