Skip to content

Commit 65a2ade

Browse files
committed
feat: add support for OpenAI gpt-5-chat-latest model
- Added gpt-5-chat-latest model configuration to openAiNativeModels - Updated OpenAiNativeHandler to recognize gpt-5-chat-latest as a Responses API model - Added comprehensive tests for the new model - Model is optimized for conversational AI and non-reasoning tasks Fixes #7057
1 parent 962df86 commit 65a2ade

File tree

3 files changed

+161
-1
lines changed

3 files changed

+161
-1
lines changed

packages/types/src/providers/openai.ts

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,18 @@ export type OpenAiNativeModelId = keyof typeof openAiNativeModels
66
export const openAiNativeDefaultModelId: OpenAiNativeModelId = "gpt-5-2025-08-07"
77

88
export const openAiNativeModels = {
9+
"gpt-5-chat-latest": {
10+
maxTokens: 128000,
11+
contextWindow: 400000,
12+
supportsImages: true,
13+
supportsPromptCache: true,
14+
supportsReasoningEffort: false,
15+
inputPrice: 1.25,
16+
outputPrice: 10.0,
17+
cacheReadsPrice: 0.13,
18+
description: "GPT-5 Chat Latest: Optimized for conversational AI and non-reasoning tasks",
19+
supportsVerbosity: true,
20+
},
921
"gpt-5-2025-08-07": {
1022
maxTokens: 128000,
1123
contextWindow: 400000,
Lines changed: 147 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,147 @@
1+
import { describe, it, expect, vi, beforeEach } from "vitest"
2+
import { OpenAiNativeHandler } from "../openai-native"
3+
import { ApiHandlerOptions } from "../../../shared/api"
4+
import { Anthropic } from "@anthropic-ai/sdk"
5+
6+
// Mock OpenAI
7+
vi.mock("openai", () => {
8+
return {
9+
default: class MockOpenAI {
10+
responses = {
11+
create: vi.fn(),
12+
}
13+
chat = {
14+
completions: {
15+
create: vi.fn(),
16+
},
17+
}
18+
},
19+
}
20+
})
21+
22+
describe("OpenAiNativeHandler - GPT-5 Chat Latest", () => {
23+
let handler: OpenAiNativeHandler
24+
let mockOptions: ApiHandlerOptions
25+
26+
beforeEach(() => {
27+
vi.clearAllMocks()
28+
mockOptions = {
29+
apiModelId: "gpt-5-chat-latest",
30+
openAiNativeApiKey: "test-api-key",
31+
openAiNativeBaseUrl: "https://api.openai.com",
32+
}
33+
handler = new OpenAiNativeHandler(mockOptions)
34+
})
35+
36+
describe("Model Configuration", () => {
37+
it("should correctly configure gpt-5-chat-latest model", () => {
38+
const model = handler.getModel()
39+
40+
expect(model.id).toBe("gpt-5-chat-latest")
41+
expect(model.info.maxTokens).toBe(128000)
42+
expect(model.info.contextWindow).toBe(400000)
43+
expect(model.info.supportsImages).toBe(true)
44+
expect(model.info.supportsPromptCache).toBe(true)
45+
expect(model.info.supportsReasoningEffort).toBe(false) // Non-reasoning model
46+
expect(model.info.description).toBe(
47+
"GPT-5 Chat Latest: Optimized for conversational AI and non-reasoning tasks",
48+
)
49+
})
50+
51+
it("should not include reasoning effort for gpt-5-chat-latest", () => {
52+
const model = handler.getModel()
53+
54+
// Should not have reasoning parameters since it's a non-reasoning model
55+
expect(model.reasoning).toBeUndefined()
56+
})
57+
})
58+
59+
describe("API Endpoint Selection", () => {
60+
it("should use Responses API for gpt-5-chat-latest", async () => {
61+
// Mock fetch for Responses API
62+
const mockFetch = vi.fn().mockResolvedValue({
63+
ok: true,
64+
body: new ReadableStream({
65+
start(controller) {
66+
controller.enqueue(
67+
new TextEncoder().encode('data: {"type":"response.text.delta","delta":"Hello"}\n\n'),
68+
)
69+
controller.enqueue(
70+
new TextEncoder().encode(
71+
'data: {"type":"response.done","response":{"id":"test-id","usage":{"input_tokens":10,"output_tokens":5}}}\n\n',
72+
),
73+
)
74+
controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"))
75+
controller.close()
76+
},
77+
}),
78+
})
79+
global.fetch = mockFetch
80+
81+
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Hello" }]
82+
83+
const stream = handler.createMessage("System prompt", messages)
84+
const chunks = []
85+
for await (const chunk of stream) {
86+
chunks.push(chunk)
87+
}
88+
89+
// Verify it called the Responses API endpoint
90+
expect(mockFetch).toHaveBeenCalledWith(
91+
"https://api.openai.com/v1/responses",
92+
expect.objectContaining({
93+
method: "POST",
94+
headers: expect.objectContaining({
95+
"Content-Type": "application/json",
96+
Authorization: "Bearer test-api-key",
97+
}),
98+
body: expect.stringContaining('"model":"gpt-5-chat-latest"'),
99+
}),
100+
)
101+
102+
// Verify the request body doesn't include reasoning parameters
103+
const requestBody = JSON.parse(mockFetch.mock.calls[0][1].body)
104+
expect(requestBody.reasoning).toBeUndefined()
105+
})
106+
})
107+
108+
describe("Conversation Features", () => {
109+
it("should support conversation continuity with previous_response_id", async () => {
110+
// Mock fetch for Responses API
111+
const mockFetch = vi.fn().mockResolvedValue({
112+
ok: true,
113+
body: new ReadableStream({
114+
start(controller) {
115+
controller.enqueue(
116+
new TextEncoder().encode('data: {"type":"response.text.delta","delta":"Response"}\n\n'),
117+
)
118+
controller.enqueue(
119+
new TextEncoder().encode(
120+
'data: {"type":"response.done","response":{"id":"response-123","usage":{"input_tokens":10,"output_tokens":5}}}\n\n',
121+
),
122+
)
123+
controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"))
124+
controller.close()
125+
},
126+
}),
127+
})
128+
global.fetch = mockFetch
129+
130+
const messages: Anthropic.Messages.MessageParam[] = [{ role: "user", content: "Follow-up question" }]
131+
132+
const stream = handler.createMessage("System prompt", messages, {
133+
taskId: "test-task",
134+
previousResponseId: "previous-response-456",
135+
})
136+
137+
const chunks = []
138+
for await (const chunk of stream) {
139+
chunks.push(chunk)
140+
}
141+
142+
// Verify the request includes previous_response_id
143+
const requestBody = JSON.parse(mockFetch.mock.calls[0][1].body)
144+
expect(requestBody.previous_response_id).toBe("previous-response-456")
145+
})
146+
})
147+
})

src/api/providers/openai-native.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1139,7 +1139,8 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
11391139

11401140
private isResponsesApiModel(modelId: string): boolean {
11411141
// Both GPT-5 and Codex Mini use the v1/responses endpoint
1142-
return modelId.startsWith("gpt-5") || modelId === "codex-mini-latest"
1142+
// gpt-5-chat-latest also uses the Responses API
1143+
return modelId.startsWith("gpt-5") || modelId === "codex-mini-latest" || modelId === "gpt-5-chat-latest"
11431144
}
11441145

11451146
private async *handleStreamResponse(

0 commit comments

Comments
 (0)