Skip to content

Commit 6af1f2f

Browse files
committed
fix: enable prompt enhancement for GPT-5 and Codex Mini models
- Implement completePromptViaResponsesApi to collect streaming responses - Add SSE fallback for robustness when SDK fails - Support all GPT-5 variants (gpt-5, gpt-5-mini, gpt-5-nano) and Codex Mini - Add comprehensive test coverage for new functionality - Fixes #7334
1 parent 7137c19 commit 6af1f2f

File tree

2 files changed

+466
-6
lines changed

2 files changed

+466
-6
lines changed

src/api/providers/__tests__/openai-native.spec.ts

Lines changed: 213 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -470,6 +470,182 @@ describe("OpenAiNativeHandler", () => {
470470
})
471471
})
472472

473+
it("should complete prompt successfully with GPT-5 model via streaming collection", async () => {
474+
// Mock fetch for Responses API
475+
const mockFetch = vitest.fn().mockResolvedValue({
476+
ok: true,
477+
body: new ReadableStream({
478+
start(controller) {
479+
controller.enqueue(
480+
new TextEncoder().encode('data: {"type":"response.text.delta","delta":"Enhanced "}\n\n'),
481+
)
482+
controller.enqueue(
483+
new TextEncoder().encode('data: {"type":"response.text.delta","delta":"prompt "}\n\n'),
484+
)
485+
controller.enqueue(
486+
new TextEncoder().encode('data: {"type":"response.text.delta","delta":"response"}\n\n'),
487+
)
488+
controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"))
489+
controller.close()
490+
},
491+
}),
492+
})
493+
global.fetch = mockFetch as any
494+
495+
handler = new OpenAiNativeHandler({
496+
apiModelId: "gpt-5-2025-08-07",
497+
openAiNativeApiKey: "test-api-key",
498+
})
499+
500+
const result = await handler.completePrompt("Test prompt")
501+
expect(result).toBe("Enhanced prompt response")
502+
503+
// Verify the request was made with correct parameters
504+
expect(mockFetch).toHaveBeenCalledWith(
505+
"https://api.openai.com/v1/responses",
506+
expect.objectContaining({
507+
method: "POST",
508+
headers: expect.objectContaining({
509+
"Content-Type": "application/json",
510+
Authorization: "Bearer test-api-key",
511+
Accept: "text/event-stream",
512+
}),
513+
body: expect.stringContaining('"input":"User: Test prompt"'),
514+
}),
515+
)
516+
517+
const requestBody = JSON.parse(mockFetch.mock.calls[0][1].body)
518+
expect(requestBody).toMatchObject({
519+
model: "gpt-5-2025-08-07",
520+
stream: true,
521+
temperature: 1,
522+
})
523+
524+
// Clean up
525+
delete (global as any).fetch
526+
})
527+
528+
it("should complete prompt successfully with GPT-5-mini model", async () => {
529+
// Mock fetch for Responses API
530+
const mockFetch = vitest.fn().mockResolvedValue({
531+
ok: true,
532+
body: new ReadableStream({
533+
start(controller) {
534+
controller.enqueue(
535+
new TextEncoder().encode(
536+
'data: {"type":"response.output_item.added","item":{"type":"text","text":"Mini response"}}\n\n',
537+
),
538+
)
539+
controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"))
540+
controller.close()
541+
},
542+
}),
543+
})
544+
global.fetch = mockFetch as any
545+
546+
handler = new OpenAiNativeHandler({
547+
apiModelId: "gpt-5-mini-2025-08-07",
548+
openAiNativeApiKey: "test-api-key",
549+
})
550+
551+
const result = await handler.completePrompt("Test prompt")
552+
expect(result).toBe("Mini response")
553+
554+
// Clean up
555+
delete (global as any).fetch
556+
})
557+
558+
it("should complete prompt successfully with GPT-5-nano model", async () => {
559+
// Mock fetch for Responses API
560+
const mockFetch = vitest.fn().mockResolvedValue({
561+
ok: true,
562+
body: new ReadableStream({
563+
start(controller) {
564+
controller.enqueue(
565+
new TextEncoder().encode('data: {"type":"response.output_text.delta","delta":"Nano "}\n\n'),
566+
)
567+
controller.enqueue(
568+
new TextEncoder().encode(
569+
'data: {"type":"response.output_text.delta","delta":"response"}\n\n',
570+
),
571+
)
572+
controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"))
573+
controller.close()
574+
},
575+
}),
576+
})
577+
global.fetch = mockFetch as any
578+
579+
handler = new OpenAiNativeHandler({
580+
apiModelId: "gpt-5-nano-2025-08-07",
581+
openAiNativeApiKey: "test-api-key",
582+
})
583+
584+
const result = await handler.completePrompt("Test prompt")
585+
expect(result).toBe("Nano response")
586+
587+
// Clean up
588+
delete (global as any).fetch
589+
})
590+
591+
it("should handle GPT-5 completePrompt with reasoning response", async () => {
592+
// Mock fetch for Responses API with reasoning
593+
const mockFetch = vitest.fn().mockResolvedValue({
594+
ok: true,
595+
body: new ReadableStream({
596+
start(controller) {
597+
// Include reasoning in the response
598+
controller.enqueue(
599+
new TextEncoder().encode(
600+
'data: {"type":"response.reasoning.delta","delta":"Let me think about this..."}\n\n',
601+
),
602+
)
603+
controller.enqueue(
604+
new TextEncoder().encode('data: {"type":"response.text.delta","delta":"Final answer"}\n\n'),
605+
)
606+
controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"))
607+
controller.close()
608+
},
609+
}),
610+
})
611+
global.fetch = mockFetch as any
612+
613+
handler = new OpenAiNativeHandler({
614+
apiModelId: "gpt-5-2025-08-07",
615+
openAiNativeApiKey: "test-api-key",
616+
reasoningEffort: "high",
617+
})
618+
619+
const result = await handler.completePrompt("Complex prompt")
620+
// Should return the text content (reasoning is collected but text takes priority)
621+
expect(result).toBe("Final answer")
622+
623+
// Clean up
624+
delete (global as any).fetch
625+
})
626+
627+
it("should handle GPT-5 completePrompt API errors", async () => {
628+
// Mock fetch with error response
629+
const mockFetch = vitest.fn().mockResolvedValue({
630+
ok: false,
631+
status: 401,
632+
text: async () => JSON.stringify({ error: { message: "Invalid API key" } }),
633+
})
634+
global.fetch = mockFetch as any
635+
636+
handler = new OpenAiNativeHandler({
637+
apiModelId: "gpt-5-2025-08-07",
638+
openAiNativeApiKey: "invalid-key",
639+
})
640+
641+
await expect(handler.completePrompt("Test prompt")).rejects.toThrow(
642+
"Failed to complete prompt via GPT-5 API: GPT-5 API request failed (401): Invalid API key",
643+
)
644+
645+
// Clean up
646+
delete (global as any).fetch
647+
})
648+
473649
it("should complete prompt successfully with o1 model", async () => {
474650
handler = new OpenAiNativeHandler({
475651
apiModelId: "o1",
@@ -1679,16 +1855,49 @@ describe("GPT-5 streaming event coverage (additional)", () => {
16791855
delete (global as any).fetch
16801856
})
16811857

1682-
it("should handle codex-mini-latest non-streaming completion", async () => {
1858+
it("should handle codex-mini-latest non-streaming completion via streaming collection", async () => {
1859+
// Mock fetch for Responses API that will be used for non-streaming completion
1860+
const mockFetch = vitest.fn().mockResolvedValue({
1861+
ok: true,
1862+
body: new ReadableStream({
1863+
start(controller) {
1864+
controller.enqueue(
1865+
new TextEncoder().encode(
1866+
'data: {"type":"response.output_text.delta","delta":"def hello_world():"}\n\n',
1867+
),
1868+
)
1869+
controller.enqueue(
1870+
new TextEncoder().encode(
1871+
'data: {"type":"response.output_text.delta","delta":"\\n print(\\"Hello, World!\\")"}\n\n',
1872+
),
1873+
)
1874+
controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"))
1875+
controller.close()
1876+
},
1877+
}),
1878+
})
1879+
global.fetch = mockFetch as any
1880+
16831881
handler = new OpenAiNativeHandler({
16841882
...mockOptions,
16851883
apiModelId: "codex-mini-latest",
16861884
})
16871885

1688-
// Codex Mini now uses the same Responses API as GPT-5, which doesn't support non-streaming
1689-
await expect(handler.completePrompt("Write a hello world function in Python")).rejects.toThrow(
1690-
"completePrompt is not supported for codex-mini-latest. Use createMessage (Responses API) instead.",
1886+
// Codex Mini now collects the streaming response for non-streaming completion
1887+
const result = await handler.completePrompt("Write a hello world function in Python")
1888+
expect(result).toBe('def hello_world():\n print("Hello, World!")')
1889+
1890+
// Verify the request was made with correct parameters
1891+
expect(mockFetch).toHaveBeenCalledWith(
1892+
"https://api.openai.com/v1/responses",
1893+
expect.objectContaining({
1894+
method: "POST",
1895+
body: expect.stringContaining('"input":"User: Write a hello world function in Python"'),
1896+
}),
16911897
)
1898+
1899+
// Clean up
1900+
delete (global as any).fetch
16921901
})
16931902

16941903
it("should handle codex-mini-latest API errors", async () => {

0 commit comments

Comments
 (0)