Skip to content

Commit 4dbedc0

Browse files
committed
fix: Address PR review feedback - fix stale resume IDs, update model description, remove duplicate test, revert gitignore
1 parent 24010b4 commit 4dbedc0

File tree

4 files changed

+30
-53
lines changed

4 files changed

+30
-53
lines changed

.gitignore

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,3 @@ logs
4949

5050
# Qdrant
5151
qdrant_storage/
52-
53-
# ignore temp background docs
54-
TEMP_OPENAI_BACKGROUND_TASK_DOCS.DM
55-
TEMP_DOCS/

packages/types/src/providers/openai.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ export const openAiNativeModels = {
4747
inputPrice: 15.0,
4848
outputPrice: 120.0,
4949
description:
50-
"GPT-5 Pro: a slow, reasoning-focused model built to tackle tough problems. Requests can take several minutes to finish. Responses API only; no streaming, so it may appear stuck until the reply is ready.",
50+
"GPT-5 Pro: A slow, reasoning-focused model for complex problems. Uses background mode with resilient streaming - requests may take several minutes with automatic recovery if connection drops.",
5151
supportsVerbosity: true,
5252
supportsTemperature: false,
5353
backgroundMode: true,

src/api/providers/openai-native.ts

Lines changed: 29 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,9 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
5454
// Track whether current request is in background mode for status chunk annotation
5555
private currentRequestIsBackground?: boolean
5656
private resumeCutoffSequence?: number
57+
// Per-request tracking to prevent stale resume attempts
58+
private currentRequestResponseId?: string
59+
private currentRequestSequenceNumber?: number
5760

5861
// Event types handled by the shared event processor to avoid duplication
5962
private readonly coreHandledEventTypes = new Set<string>([
@@ -325,12 +328,15 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
325328
): ApiStream {
326329
// Annotate if this request uses background mode (used for status chunks)
327330
this.currentRequestIsBackground = !!requestBody?.background
331+
// Reset per-request tracking to prevent stale values from previous requests
332+
this.currentRequestResponseId = undefined
333+
this.currentRequestSequenceNumber = undefined
328334

329335
const canAttemptResume = () =>
330336
this.currentRequestIsBackground &&
331337
(this.options.openAiNativeBackgroundAutoResume ?? true) &&
332-
!!this.lastResponseId &&
333-
typeof this.lastSequenceNumber === "number"
338+
!!this.currentRequestResponseId &&
339+
typeof this.currentRequestSequenceNumber === "number"
334340

335341
try {
336342
// Use the official SDK
@@ -356,8 +362,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
356362
// Stream dropped mid-flight; attempt resume for background requests
357363
if (canAttemptResume()) {
358364
for await (const chunk of this.attemptResumeOrPoll(
359-
this.lastResponseId!,
360-
this.lastSequenceNumber!,
365+
this.currentRequestResponseId!,
366+
this.currentRequestSequenceNumber!,
361367
model,
362368
)) {
363369
yield chunk
@@ -411,8 +417,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
411417
} catch (fallbackErr) {
412418
if (canAttemptResume()) {
413419
for await (const chunk of this.attemptResumeOrPoll(
414-
this.lastResponseId!,
415-
this.lastSequenceNumber!,
420+
this.currentRequestResponseId!,
421+
this.currentRequestSequenceNumber!,
416422
model,
417423
)) {
418424
yield chunk
@@ -436,8 +442,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
436442
}
437443
if (canAttemptResume()) {
438444
for await (const chunk of this.attemptResumeOrPoll(
439-
this.lastResponseId!,
440-
this.lastSequenceNumber!,
445+
this.currentRequestResponseId!,
446+
this.currentRequestSequenceNumber!,
441447
model,
442448
)) {
443449
yield chunk
@@ -466,8 +472,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
466472
}
467473
if (canAttemptResume()) {
468474
for await (const chunk of this.attemptResumeOrPoll(
469-
this.lastResponseId!,
470-
this.lastSequenceNumber!,
475+
this.currentRequestResponseId!,
476+
this.currentRequestSequenceNumber!,
471477
model,
472478
)) {
473479
yield chunk
@@ -489,8 +495,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
489495
}
490496
if (canAttemptResume()) {
491497
for await (const chunk of this.attemptResumeOrPoll(
492-
this.lastResponseId!,
493-
this.lastSequenceNumber!,
498+
this.currentRequestResponseId!,
499+
this.currentRequestSequenceNumber!,
494500
model,
495501
)) {
496502
yield chunk
@@ -500,8 +506,10 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
500506
throw fallbackErr
501507
}
502508
} finally {
503-
// Always clear background flag at end of request lifecycle
509+
// Always clear background flag and per-request tracking at end of request lifecycle
504510
this.currentRequestIsBackground = undefined
511+
this.currentRequestResponseId = undefined
512+
this.currentRequestSequenceNumber = undefined
505513
}
506514
}
507515

@@ -818,11 +826,15 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
818826
// Record sequence number for cursor tracking
819827
if (typeof parsed?.sequence_number === "number") {
820828
this.lastSequenceNumber = parsed.sequence_number
829+
// Also track for per-request resume capability
830+
this.currentRequestSequenceNumber = parsed.sequence_number
821831
}
822832

823833
// Store response ID for conversation continuity
824834
if (parsed.response?.id) {
825835
this.resolveResponseId(parsed.response.id)
836+
// Also track for per-request resume capability
837+
this.currentRequestResponseId = parsed.response.id
826838
}
827839
// Capture resolved service tier if present
828840
if (parsed.response?.service_tier) {
@@ -1493,6 +1505,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
14931505
// Persist response id for conversation continuity when available
14941506
if (event?.response?.id) {
14951507
this.resolveResponseId(event.response.id)
1508+
// Also track for per-request resume capability
1509+
this.currentRequestResponseId = event.response.id
14961510
}
14971511
// Capture resolved service tier when available
14981512
if (event?.response?.service_tier) {
@@ -1501,6 +1515,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
15011515
// Record sequence number for cursor tracking
15021516
if (typeof event?.sequence_number === "number") {
15031517
this.lastSequenceNumber = event.sequence_number
1518+
// Also track for per-request resume capability
1519+
this.currentRequestSequenceNumber = event.sequence_number
15041520
}
15051521

15061522
// Map lifecycle events to status chunks

webview-ui/src/utils/__tests__/backgroundStatus.test.ts

Lines changed: 0 additions & 35 deletions
This file was deleted.

0 commit comments

Comments
 (0)