diff --git a/packages/types/src/message.ts b/packages/types/src/message.ts index 77c055c6e15..b02078fc0d8 100644 --- a/packages/types/src/message.ts +++ b/packages/types/src/message.ts @@ -221,8 +221,6 @@ export const clineMessageSchema = z.object({ gpt5: z .object({ previous_response_id: z.string().optional(), - instructions: z.string().optional(), - reasoning_summary: z.string().optional(), }) .optional(), }) diff --git a/src/core/task-persistence/__tests__/taskMessages.spec.ts b/src/core/task-persistence/__tests__/taskMessages.spec.ts new file mode 100644 index 00000000000..ecd6225692d --- /dev/null +++ b/src/core/task-persistence/__tests__/taskMessages.spec.ts @@ -0,0 +1,71 @@ +import { describe, it, expect, vi, beforeEach } from "vitest" +import * as os from "os" +import * as path from "path" +import * as fs from "fs/promises" + +// Mocks (use hoisted to avoid initialization ordering issues) +const hoisted = vi.hoisted(() => ({ + safeWriteJsonMock: vi.fn().mockResolvedValue(undefined), +})) +vi.mock("../../../utils/safeWriteJson", () => ({ + safeWriteJson: hoisted.safeWriteJsonMock, +})) + +// Import after mocks +import { saveTaskMessages } from "../taskMessages" + +let tmpBaseDir: string + +beforeEach(async () => { + hoisted.safeWriteJsonMock.mockClear() + // Create a unique, writable temp directory to act as globalStoragePath + tmpBaseDir = await fs.mkdtemp(path.join(os.tmpdir(), "roo-test-")) +}) + +describe("taskMessages.saveTaskMessages", () => { + beforeEach(() => { + hoisted.safeWriteJsonMock.mockClear() + }) + + it("persists messages as-is", async () => { + const messages: any[] = [ + { + role: "assistant", + content: "Hello", + metadata: { + gpt5: { + previous_response_id: "resp_123", + }, + other: "keep", + }, + }, + { role: "user", content: "Do thing" }, + ] + + await saveTaskMessages({ + messages, + taskId: "task-1", + globalStoragePath: tmpBaseDir, + }) + + expect(hoisted.safeWriteJsonMock).toHaveBeenCalledTimes(1) + const [, persisted] = hoisted.safeWriteJsonMock.mock.calls[0] + expect(persisted).toEqual(messages) + }) + + it("persists messages without modification when no metadata", async () => { + const messages: any[] = [ + { role: "assistant", content: "Hi" }, + { role: "user", content: "Yo" }, + ] + + await saveTaskMessages({ + messages, + taskId: "task-2", + globalStoragePath: tmpBaseDir, + }) + + const [, persisted] = hoisted.safeWriteJsonMock.mock.calls[0] + expect(persisted).toEqual(messages) + }) +}) diff --git a/src/core/task/Task.ts b/src/core/task/Task.ts index 58ccccc1f09..c9d94a2a02e 100644 --- a/src/core/task/Task.ts +++ b/src/core/task/Task.ts @@ -2267,7 +2267,7 @@ export class Task extends EventEmitter implements TaskLike { } } - await this.persistGpt5Metadata(reasoningMessage) + await this.persistGpt5Metadata() await this.saveClineMessages() await this.providerRef.deref()?.postStateToWebview() @@ -2853,10 +2853,12 @@ export class Task extends EventEmitter implements TaskLike { } /** - * Persist GPT-5 per-turn metadata (previous_response_id, instructions, reasoning_summary) + * Persist GPT-5 per-turn metadata (previous_response_id only) * onto the last complete assistant say("text") message. + * + * Note: We do not persist system instructions or reasoning summaries. */ - private async persistGpt5Metadata(reasoningMessage?: string): Promise { + private async persistGpt5Metadata(): Promise { try { const modelId = this.api.getModel().id if (!modelId || !modelId.startsWith("gpt-5")) return @@ -2875,9 +2877,7 @@ export class Task extends EventEmitter implements TaskLike { } const gpt5Metadata: Gpt5Metadata = { ...(msg.metadata.gpt5 ?? {}), - previous_response_id: lastResponseId, - instructions: this.lastUsedInstructions, - reasoning_summary: (reasoningMessage ?? "").trim() || undefined, + ...(lastResponseId ? { previous_response_id: lastResponseId } : {}), } msg.metadata.gpt5 = gpt5Metadata } diff --git a/src/core/task/types.ts b/src/core/task/types.ts index 607be51aab3..e3641590a64 100644 --- a/src/core/task/types.ts +++ b/src/core/task/types.ts @@ -12,18 +12,6 @@ export interface Gpt5Metadata { * Used to maintain conversation continuity in subsequent requests */ previous_response_id?: string - - /** - * The system instructions/prompt used for this response - * Stored to track what instructions were active when the response was generated - */ - instructions?: string - - /** - * The reasoning summary from GPT-5's reasoning process - * Contains the model's internal reasoning if reasoning mode was enabled - */ - reasoning_summary?: string } /**