Skip to content

Commit e23047f

Browse files
committed
fix: extend previousResponseId skip logic to all OpenAI Responses API models
- Updated logic to check for both GPT-5 and Codex Mini models - These are the models that use the OpenAI Responses API endpoint - Added test coverage for Codex Mini model - Updated comments and log messages to reflect broader scope This ensures proper conversation continuity for all OpenAI models using the Responses API, not just GPT-5.
1 parent 2638d35 commit e23047f

File tree

2 files changed

+77
-15
lines changed

2 files changed

+77
-15
lines changed

src/core/task/Task.ts

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1108,15 +1108,15 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
11081108
content: [{ type: "text", text: `[new_task completed] Result: ${lastMessage}` }],
11091109
})
11101110

1111-
// When using GPT-5 with the responses API, we need to skip the previous_response_id
1112-
// for the next API call after a subtask completes, similar to what happens after
1113-
// a condense operation. This ensures the conversation continuity is properly maintained.
1111+
// When using OpenAI models with the responses API (GPT-5, Codex Mini, etc.), we need to skip
1112+
// the previous_response_id for the next API call after a subtask completes, similar to what
1113+
// happens after a condense operation. This ensures the conversation continuity is properly maintained.
11141114
const modelId = this.api.getModel().id
1115-
if (modelId && modelId.startsWith("gpt-5")) {
1115+
if (modelId && (modelId.startsWith("gpt-5") || modelId === "codex-mini-latest")) {
11161116
this.skipPrevResponseIdOnce = true
11171117
this.providerRef
11181118
.deref()
1119-
?.log(`[GPT-5] Skipping previous_response_id for next API call after subtask completion`)
1119+
?.log(`[Responses API] Skipping previous_response_id for next API call after subtask completion`)
11201120
}
11211121
} catch (error) {
11221122
this.providerRef
@@ -2346,7 +2346,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
23462346
if (truncateResult.error) {
23472347
await this.say("condense_context_error", truncateResult.error)
23482348
} else if (truncateResult.summary) {
2349-
// A condense operation occurred; for the next GPT‑5 API call we should NOT
2349+
// A condense operation occurred; for the next Responses API call we should NOT
23502350
// send previous_response_id so the request reflects the fresh condensed context.
23512351
this.skipPrevResponseIdOnce = true
23522352

@@ -2382,12 +2382,17 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
23822382
throw new Error("Auto-approval limit reached and user did not approve continuation")
23832383
}
23842384

2385-
// Determine GPT‑5 previous_response_id from last persisted assistant turn (if available),
2385+
// Determine previous_response_id for Responses API models from last persisted assistant turn (if available),
23862386
// unless a condense just occurred (skip once after condense).
23872387
let previousResponseId: string | undefined = undefined
23882388
try {
23892389
const modelId = this.api.getModel().id
2390-
if (modelId && modelId.startsWith("gpt-5") && !this.skipPrevResponseIdOnce) {
2390+
// Check if this is a model that uses the Responses API (GPT-5, Codex Mini, etc.)
2391+
if (
2392+
modelId &&
2393+
(modelId.startsWith("gpt-5") || modelId === "codex-mini-latest") &&
2394+
!this.skipPrevResponseIdOnce
2395+
) {
23912396
// Find the last assistant message that has a previous_response_id stored
23922397
const idx = findLastIndex(
23932398
this.clineMessages,
@@ -2563,13 +2568,15 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
25632568
}
25642569

25652570
/**
2566-
* Persist GPT-5 per-turn metadata (previous_response_id, instructions, reasoning_summary)
2571+
* Persist Responses API per-turn metadata (previous_response_id, instructions, reasoning_summary)
25672572
* onto the last complete assistant say("text") message.
2573+
* This is used for models that use the OpenAI Responses API (GPT-5, Codex Mini, etc.)
25682574
*/
25692575
private async persistGpt5Metadata(reasoningMessage?: string): Promise<void> {
25702576
try {
25712577
const modelId = this.api.getModel().id
2572-
if (!modelId || !modelId.startsWith("gpt-5")) return
2578+
// Only persist metadata for models using the Responses API
2579+
if (!modelId || !(modelId.startsWith("gpt-5") || modelId === "codex-mini-latest")) return
25732580

25742581
const lastResponseId: string | undefined = (this.api as any)?.getLastResponseId?.()
25752582
const idx = findLastIndex(

src/core/task/__tests__/Task.spec.ts

Lines changed: 60 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ describe("Task", () => {
157157

158158
// Verify the log was called
159159
expect(mockProvider.log).toHaveBeenCalledWith(
160-
"[GPT-5] Skipping previous_response_id for next API call after subtask completion",
160+
"[Responses API] Skipping previous_response_id for next API call after subtask completion",
161161
)
162162

163163
// Verify other expected calls
@@ -169,8 +169,63 @@ describe("Task", () => {
169169
})
170170
})
171171

172-
it("should not skip previousResponseId for non-GPT-5 models", async () => {
173-
// Create task with non-GPT-5 model
172+
it("should skip previousResponseId for Codex Mini after subtask completion", async () => {
173+
// Create task with Codex Mini model
174+
const apiConfiguration = {
175+
apiProvider: "openai-native",
176+
apiModelId: "codex-mini-latest",
177+
}
178+
179+
task = new Task({
180+
provider: mockProvider as any,
181+
apiConfiguration: apiConfiguration as any,
182+
task: "Test task",
183+
startTask: false,
184+
})
185+
186+
// Mock the API to return Codex Mini model
187+
task.api = {
188+
getModel: vi.fn(() => ({
189+
id: "codex-mini-latest",
190+
info: {
191+
contextWindow: 50000,
192+
supportsComputerUse: false,
193+
},
194+
})),
195+
} as any
196+
197+
// Spy on required methods
198+
const saySpy = vi.spyOn(task, "say").mockResolvedValue(undefined)
199+
const addToApiConversationHistorySpy = vi
200+
.spyOn(task as any, "addToApiConversationHistory")
201+
.mockResolvedValue(undefined)
202+
const emitSpy = vi.spyOn(task, "emit")
203+
204+
// Initially, skipPrevResponseIdOnce should be false
205+
expect(task["skipPrevResponseIdOnce"]).toBe(false)
206+
207+
// Resume the task with a subtask result
208+
await task.resumePausedTask("Subtask completed successfully")
209+
210+
// Verify the flag was set for Codex Mini
211+
expect(task["skipPrevResponseIdOnce"]).toBe(true)
212+
213+
// Verify the log was called
214+
expect(mockProvider.log).toHaveBeenCalledWith(
215+
"[Responses API] Skipping previous_response_id for next API call after subtask completion",
216+
)
217+
218+
// Verify other expected calls
219+
expect(emitSpy).toHaveBeenCalledWith(RooCodeEventName.TaskUnpaused)
220+
expect(saySpy).toHaveBeenCalledWith("subtask_result", "Subtask completed successfully")
221+
expect(addToApiConversationHistorySpy).toHaveBeenCalledWith({
222+
role: "user",
223+
content: [{ type: "text", text: "[new_task completed] Result: Subtask completed successfully" }],
224+
})
225+
})
226+
227+
it("should not skip previousResponseId for non-Responses API models", async () => {
228+
// Create task with non-Responses API model (e.g., Claude)
174229
const apiConfiguration = {
175230
apiProvider: "anthropic",
176231
apiModelId: "claude-3-5-sonnet-20241022",
@@ -210,9 +265,9 @@ describe("Task", () => {
210265
// Verify the flag was NOT set for non-GPT-5 models
211266
expect(task["skipPrevResponseIdOnce"]).toBe(false)
212267

213-
// Verify the GPT-5 specific log was NOT called
268+
// Verify the Responses API specific log was NOT called
214269
expect(mockProvider.log).not.toHaveBeenCalledWith(
215-
"[GPT-5] Skipping previous_response_id for next API call after subtask completion",
270+
"[Responses API] Skipping previous_response_id for next API call after subtask completion",
216271
)
217272

218273
// Verify other expected calls still happened

0 commit comments

Comments
 (0)