Skip to content

Commit 9cbf98f

Browse files
committed
fix(openai-native): address Roomote inline feedback\n\n- Delegate standard GPT-5 SSE event types to shared processor to reduce duplication\n- Add JSDoc for response ID accessors\n- Standardize key error messages for GPT-5 Responses API fallback\n- Extract persistGpt5Metadata() in Task to simplify metadata writes\n- Add malformed JSON SSE parsing test\n
1 parent f07bd16 commit 9cbf98f

File tree

3 files changed

+118
-27
lines changed

3 files changed

+118
-27
lines changed

src/api/providers/__tests__/openai-native.spec.ts

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1464,4 +1464,54 @@ describe("GPT-5 streaming event coverage (additional)", () => {
14641464
// @ts-ignore
14651465
delete global.fetch
14661466
})
1467+
1468+
it("should ignore malformed JSON lines in SSE stream", async () => {
1469+
const mockFetch = vitest.fn().mockResolvedValue({
1470+
ok: true,
1471+
body: new ReadableStream({
1472+
start(controller) {
1473+
controller.enqueue(
1474+
new TextEncoder().encode(
1475+
'data: {"type":"response.output_item.added","item":{"type":"text","text":"Before"}}\n\n',
1476+
),
1477+
)
1478+
// Malformed JSON line
1479+
controller.enqueue(
1480+
new TextEncoder().encode('data: {"type":"response.text.delta","delta":"Bad"\n\n'),
1481+
)
1482+
// Valid line after malformed
1483+
controller.enqueue(
1484+
new TextEncoder().encode(
1485+
'data: {"type":"response.output_item.added","item":{"type":"text","text":"After"}}\n\n',
1486+
),
1487+
)
1488+
controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"))
1489+
controller.close()
1490+
},
1491+
}),
1492+
})
1493+
// @ts-ignore
1494+
global.fetch = mockFetch
1495+
1496+
const handler = new OpenAiNativeHandler({
1497+
apiModelId: "gpt-5-2025-08-07",
1498+
openAiNativeApiKey: "test-api-key",
1499+
})
1500+
1501+
const systemPrompt = "You are a helpful assistant."
1502+
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hello!" }]
1503+
const stream = handler.createMessage(systemPrompt, messages)
1504+
1505+
const chunks: any[] = []
1506+
for await (const chunk of stream) {
1507+
chunks.push(chunk)
1508+
}
1509+
1510+
// It should not throw and still capture the valid texts around the malformed line
1511+
const textChunks = chunks.filter((c) => c.type === "text")
1512+
expect(textChunks.map((c: any) => c.text)).toEqual(["Before", "After"])
1513+
1514+
// @ts-ignore
1515+
delete global.fetch
1516+
})
14671517
})

src/api/providers/openai-native.ts

Lines changed: 38 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,20 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
3535
private responseIdPromise: Promise<string | undefined> | undefined
3636
private responseIdResolver: ((value: string | undefined) => void) | undefined
3737

38+
// Event types handled by the shared GPT-5 event processor to avoid duplication
39+
private readonly gpt5CoreHandledTypes = new Set<string>([
40+
"response.text.delta",
41+
"response.output_text.delta",
42+
"response.reasoning.delta",
43+
"response.reasoning_text.delta",
44+
"response.reasoning_summary.delta",
45+
"response.reasoning_summary_text.delta",
46+
"response.refusal.delta",
47+
"response.output_item.added",
48+
"response.done",
49+
"response.completed",
50+
])
51+
3852
constructor(options: ApiHandlerOptions) {
3953
super()
4054
this.options = options
@@ -462,7 +476,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
462476
}
463477

464478
if (!retryResponse.body) {
465-
throw new Error("No response body from Responses API retry")
479+
throw new Error("GPT-5 Responses API error: No response body from retry request")
466480
}
467481

468482
// Handle the successful retry response
@@ -506,7 +520,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
506520
}
507521

508522
if (!response.body) {
509-
throw new Error("No response body from Responses API")
523+
throw new Error("GPT-5 Responses API error: No response body")
510524
}
511525

512526
// Handle streaming response
@@ -603,6 +617,18 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
603617
this.resolveResponseId(parsed.response.id)
604618
}
605619

620+
// Delegate standard event types to the shared processor to avoid duplication
621+
if (parsed?.type && this.gpt5CoreHandledTypes.has(parsed.type)) {
622+
for await (const outChunk of this.processGpt5Event(parsed, model)) {
623+
// Track whether we've emitted any content so fallback handling can decide appropriately
624+
if (outChunk.type === "text" || outChunk.type === "reasoning") {
625+
hasContent = true
626+
}
627+
yield outChunk
628+
}
629+
continue
630+
}
631+
606632
// Check if this is a complete response (non-streaming format)
607633
if (parsed.response && parsed.response.output && Array.isArray(parsed.response.output)) {
608634
// Handle complete response in the initial event
@@ -1185,12 +1211,20 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
11851211
return { id: id.startsWith("o3-mini") ? "o3-mini" : id, info, ...params, verbosity: params.verbosity }
11861212
}
11871213

1188-
// Method to get the last response ID for external use
1214+
/**
1215+
* Gets the last GPT-5 response ID captured from the Responses API stream.
1216+
* Used for maintaining conversation continuity across requests.
1217+
* @returns The response ID, or undefined if not available yet
1218+
*/
11891219
getLastResponseId(): string | undefined {
11901220
return this.lastResponseId
11911221
}
11921222

1193-
// Method to set a response ID for conversation continuity
1223+
/**
1224+
* Sets the last GPT-5 response ID for conversation continuity.
1225+
* Typically only used in tests or special flows.
1226+
* @param responseId The GPT-5 response ID to store
1227+
*/
11941228
setResponseId(responseId: string): void {
11951229
this.lastResponseId = responseId
11961230
}

src/core/task/Task.ts

Lines changed: 30 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1751,29 +1751,7 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
17511751
presentAssistantMessage(this)
17521752
}
17531753

1754-
// Persist GPT‑5 per-turn metadata (response_id, instructions)
1755-
try {
1756-
const modelId = this.api.getModel().id
1757-
if (modelId && modelId.startsWith("gpt-5")) {
1758-
const lastResponseId: string | undefined = (this.api as any)?.getLastResponseId?.()
1759-
const idx = findLastIndex(
1760-
this.clineMessages,
1761-
(m) => m.type === "say" && (m as any).say === "text" && m.partial !== true,
1762-
)
1763-
if (idx !== -1) {
1764-
const msg = this.clineMessages[idx] as any
1765-
msg.metadata = msg.metadata ?? {}
1766-
msg.metadata.gpt5 = {
1767-
...(msg.metadata.gpt5 ?? {}),
1768-
response_id: lastResponseId, // This is the response ID generated by THIS turn
1769-
instructions: this.lastUsedInstructions,
1770-
reasoning_summary: (reasoningMessage ?? "").trim() || undefined,
1771-
}
1772-
}
1773-
}
1774-
} catch {
1775-
// Non-fatal
1776-
}
1754+
await this.persistGpt5Metadata(reasoningMessage)
17771755

17781756
updateApiReqMsg()
17791757
await this.saveClineMessages()
@@ -2244,6 +2222,35 @@ export class Task extends EventEmitter<TaskEvents> implements TaskLike {
22442222
}
22452223
}
22462224

2225+
/**
2226+
* Persist GPT-5 per-turn metadata (response_id, instructions, reasoning_summary)
2227+
* onto the last complete assistant say("text") message.
2228+
*/
2229+
private async persistGpt5Metadata(reasoningMessage?: string): Promise<void> {
2230+
try {
2231+
const modelId = this.api.getModel().id
2232+
if (!modelId || !modelId.startsWith("gpt-5")) return
2233+
2234+
const lastResponseId: string | undefined = (this.api as any)?.getLastResponseId?.()
2235+
const idx = findLastIndex(
2236+
this.clineMessages,
2237+
(m) => m.type === "say" && (m as any).say === "text" && m.partial !== true,
2238+
)
2239+
if (idx !== -1) {
2240+
const msg = this.clineMessages[idx] as any
2241+
msg.metadata = msg.metadata ?? {}
2242+
msg.metadata.gpt5 = {
2243+
...(msg.metadata.gpt5 ?? {}),
2244+
response_id: lastResponseId,
2245+
instructions: this.lastUsedInstructions,
2246+
reasoning_summary: (reasoningMessage ?? "").trim() || undefined,
2247+
}
2248+
}
2249+
} catch {
2250+
// Non-fatal error in metadata persistence
2251+
}
2252+
}
2253+
22472254
// Getters
22482255

22492256
public get cwd() {

0 commit comments

Comments
 (0)