Skip to content

Commit 9aa4007

Browse files
authored
🤖 Change interrupt sentinel from [INTERRUPTED] to [CONTINUE] (#170)
## Problem When users interrupt a stream and then resume, we inject a synthetic `[INTERRUPTED]` message to give the model context. However, this wording can confuse models into thinking there was an error or that they should stop. ## Solution Change the sentinel text from `[INTERRUPTED]` to `[CONTINUE]` to make it clearer that the model should continue its previous work. This is a quick UX improvement while we work on more comprehensive handling of interrupted messages with thinking (see #investigate-thinking-error branch). ## Changes - Updated sentinel text: `[INTERRUPTED]` → `[CONTINUE]` - Updated all comments and tests to reflect the new wording - All 297 unit tests pass ✅ ## Testing - ✅ Unit tests updated and passing - Verified with `make test` _Generated with `cmux`_
1 parent 747ae0f commit 9aa4007

File tree

5 files changed

+11
-11
lines changed

5 files changed

+11
-11
lines changed

src/services/aiService.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -440,7 +440,7 @@ export class AIService extends EventEmitter {
440440
log.debug("Keeping reasoning parts for OpenAI (fetch wrapper handles item_references)");
441441
}
442442

443-
// Add [INTERRUPTED] sentinel to partial messages (for model context)
443+
// Add [CONTINUE] sentinel to partial messages (for model context)
444444
const messagesWithSentinel = addInterruptedSentinel(filteredMessages);
445445

446446
// Convert CmuxMessage to ModelMessage format using Vercel AI SDK utility

src/services/ipcMain.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1055,7 +1055,7 @@ export class IpcMain {
10551055
// so that stream-end can properly clean up the streaming indicator
10561056
this.aiService.replayStream(workspaceId);
10571057
} else if (partial) {
1058-
// No active stream but there's a partial - send as regular message (shows INTERRUPTED)
1058+
// No active stream but there's a partial - send as regular message (shows CONTINUE)
10591059
this.mainWindow?.webContents.send(chatChannel, partial);
10601060
}
10611061
}

src/types/message.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ export interface CmuxMetadata {
1313
providerMetadata?: Record<string, unknown>; // Raw AI SDK provider data
1414
systemMessageTokens?: number; // Token count for system message sent with this request (calculated by AIService)
1515
partial?: boolean; // Whether this message was interrupted and is incomplete
16-
synthetic?: boolean; // Whether this message was synthetically generated (e.g., [INTERRUPTED] sentinel)
16+
synthetic?: boolean; // Whether this message was synthetically generated (e.g., [CONTINUE] sentinel)
1717
error?: string; // Error message if stream failed
1818
errorType?: StreamErrorType; // Error type/category if stream failed
1919
compacted?: boolean; // Whether this message is a compacted summary of previous history

src/utils/messages/modelMessageTransform.test.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -420,13 +420,13 @@ describe("modelMessageTransform", () => {
420420

421421
const result = addInterruptedSentinel(messages);
422422

423-
// Should have 3 messages: user, assistant, [INTERRUPTED] user
423+
// Should have 3 messages: user, assistant, [CONTINUE] user
424424
expect(result).toHaveLength(3);
425425
expect(result[0].id).toBe("user-1");
426426
expect(result[1].id).toBe("assistant-1");
427427
expect(result[2].id).toBe("interrupted-assistant-1");
428428
expect(result[2].role).toBe("user");
429-
expect(result[2].parts).toEqual([{ type: "text", text: "[INTERRUPTED]" }]);
429+
expect(result[2].parts).toEqual([{ type: "text", text: "[CONTINUE]" }]);
430430
expect(result[2].metadata?.synthetic).toBe(true);
431431
expect(result[2].metadata?.timestamp).toBe(2000);
432432
});
@@ -472,10 +472,10 @@ describe("modelMessageTransform", () => {
472472

473473
const result = addInterruptedSentinel(messages);
474474

475-
// Should have 3 messages: user, assistant (reasoning only), [INTERRUPTED] user
475+
// Should have 3 messages: user, assistant (reasoning only), [CONTINUE] user
476476
expect(result).toHaveLength(3);
477477
expect(result[2].role).toBe("user");
478-
expect(result[2].parts).toEqual([{ type: "text", text: "[INTERRUPTED]" }]);
478+
expect(result[2].parts).toEqual([{ type: "text", text: "[CONTINUE]" }]);
479479
});
480480

481481
it("should handle multiple partial messages", () => {

src/utils/messages/modelMessageTransform.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -62,8 +62,8 @@ export function stripReasoningForOpenAI(messages: CmuxMessage[]): CmuxMessage[]
6262
}
6363

6464
/**
65-
* Add [INTERRUPTED] sentinel to partial messages by inserting a user message.
66-
* This helps the model understand that a message was interrupted and incomplete.
65+
* Add [CONTINUE] sentinel to partial messages by inserting a user message.
66+
* This helps the model understand that a message was interrupted and to continue.
6767
* The sentinel is ONLY for model context, not shown in UI.
6868
*
6969
* We insert a separate user message instead of modifying the assistant message
@@ -77,12 +77,12 @@ export function addInterruptedSentinel(messages: CmuxMessage[]): CmuxMessage[] {
7777
for (const msg of messages) {
7878
result.push(msg);
7979

80-
// If this is a partial assistant message, insert [INTERRUPTED] user message after it
80+
// If this is a partial assistant message, insert [CONTINUE] user message after it
8181
if (msg.role === "assistant" && msg.metadata?.partial) {
8282
result.push({
8383
id: `interrupted-${msg.id}`,
8484
role: "user",
85-
parts: [{ type: "text", text: "[INTERRUPTED]" }],
85+
parts: [{ type: "text", text: "[CONTINUE]" }],
8686
metadata: {
8787
timestamp: msg.metadata.timestamp,
8888
// Mark as synthetic so it can be identified if needed

0 commit comments

Comments
 (0)