Skip to content

Commit 6430042

Browse files
committed
test: add comprehensive tests for codex-cli-native provider
- Test factory integration with OpenAiNativeHandler - Test token passing from secrets to API handler - Test streaming and non-streaming responses - Test custom base URL configuration - Test all supported models
1 parent 3f4b427 commit 6430042

File tree

1 file changed

+308
-0
lines changed

1 file changed

+308
-0
lines changed
Lines changed: 308 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,308 @@
1+
// npx vitest run api/providers/__tests__/codex-cli-native.spec.ts
2+
3+
import { Anthropic } from "@anthropic-ai/sdk"
4+
import { OpenAiNativeHandler } from "../openai-native"
5+
import { buildApiHandler } from "../../index"
6+
import { ApiHandlerOptions } from "../../../shared/api"
7+
8+
// Mock OpenAI client
9+
const mockResponsesCreate = vitest.fn()
10+
11+
vitest.mock("openai", () => {
12+
return {
13+
__esModule: true,
14+
default: vitest.fn().mockImplementation(() => ({
15+
responses: {
16+
create: mockResponsesCreate,
17+
},
18+
})),
19+
}
20+
})
21+
22+
describe("Codex CLI Native Provider", () => {
23+
let handler: OpenAiNativeHandler
24+
let mockOptions: ApiHandlerOptions
25+
const systemPrompt = "You are a helpful assistant."
26+
const messages: Anthropic.Messages.MessageParam[] = [
27+
{
28+
role: "user",
29+
content: "Hello!",
30+
},
31+
]
32+
33+
beforeEach(() => {
34+
mockOptions = {
35+
apiProvider: "codex-cli-native",
36+
apiModelId: "gpt-4o",
37+
codexCliOpenAiNativeToken: "test-bearer-token",
38+
} as any
39+
mockResponsesCreate.mockClear()
40+
// Clear fetch mock if it exists
41+
if ((global as any).fetch) {
42+
delete (global as any).fetch
43+
}
44+
})
45+
46+
afterEach(() => {
47+
// Clean up fetch mock
48+
if ((global as any).fetch) {
49+
delete (global as any).fetch
50+
}
51+
})
52+
53+
describe("Factory Integration", () => {
54+
it("should create OpenAiNativeHandler when codex-cli-native is selected", () => {
55+
const handler = buildApiHandler(mockOptions as any)
56+
expect(handler).toBeInstanceOf(OpenAiNativeHandler)
57+
})
58+
59+
it("should pass the token from codexCliOpenAiNativeToken to openAiNativeApiKey", async () => {
60+
// Mock fetch for testing
61+
const mockFetch = vitest.fn().mockResolvedValue({
62+
ok: true,
63+
body: new ReadableStream({
64+
start(controller) {
65+
controller.enqueue(
66+
new TextEncoder().encode('data: {"type":"response.text.delta","delta":"Test"}\n\n'),
67+
)
68+
controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"))
69+
controller.close()
70+
},
71+
}),
72+
})
73+
global.fetch = mockFetch as any
74+
75+
// Mock SDK to fail so it uses fetch
76+
mockResponsesCreate.mockRejectedValue(new Error("SDK not available"))
77+
78+
const handler = buildApiHandler(mockOptions as any)
79+
const stream = handler.createMessage(systemPrompt, messages)
80+
81+
// Consume the stream to trigger the fetch call
82+
for await (const chunk of stream) {
83+
// Just consume the stream
84+
}
85+
86+
// Verify the Authorization header uses the token from codexCliOpenAiNativeToken
87+
expect(mockFetch).toHaveBeenCalledWith(
88+
expect.any(String),
89+
expect.objectContaining({
90+
headers: expect.objectContaining({
91+
Authorization: "Bearer test-bearer-token",
92+
}),
93+
}),
94+
)
95+
})
96+
97+
it("should use default OpenAI base URL if not specified", async () => {
98+
// Mock fetch for testing
99+
const mockFetch = vitest.fn().mockResolvedValue({
100+
ok: true,
101+
body: new ReadableStream({
102+
start(controller) {
103+
controller.enqueue(
104+
new TextEncoder().encode('data: {"type":"response.text.delta","delta":"Test"}\n\n'),
105+
)
106+
controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"))
107+
controller.close()
108+
},
109+
}),
110+
})
111+
global.fetch = mockFetch as any
112+
113+
// Mock SDK to fail so it uses fetch
114+
mockResponsesCreate.mockRejectedValue(new Error("SDK not available"))
115+
116+
const handler = buildApiHandler(mockOptions as any)
117+
const stream = handler.createMessage(systemPrompt, messages)
118+
119+
// Consume the stream to trigger the fetch call
120+
for await (const chunk of stream) {
121+
// Just consume the stream
122+
}
123+
124+
// Verify it uses the default OpenAI API endpoint (Responses API for gpt-4o)
125+
expect(mockFetch).toHaveBeenCalledWith("https://api.openai.com/v1/responses", expect.any(Object))
126+
})
127+
128+
it("should use custom base URL if provided", async () => {
129+
// Mock fetch for testing
130+
const mockFetch = vitest.fn().mockResolvedValue({
131+
ok: true,
132+
body: new ReadableStream({
133+
start(controller) {
134+
controller.enqueue(
135+
new TextEncoder().encode('data: {"type":"response.text.delta","delta":"Test"}\n\n'),
136+
)
137+
controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"))
138+
controller.close()
139+
},
140+
}),
141+
})
142+
global.fetch = mockFetch as any
143+
144+
// Mock SDK to fail so it uses fetch
145+
mockResponsesCreate.mockRejectedValue(new Error("SDK not available"))
146+
147+
const optionsWithCustomUrl = {
148+
...mockOptions,
149+
openAiNativeBaseUrl: "https://custom.api.com",
150+
}
151+
152+
const handler = buildApiHandler(optionsWithCustomUrl as any)
153+
const stream = handler.createMessage(systemPrompt, messages)
154+
155+
// Consume the stream to trigger the fetch call
156+
for await (const chunk of stream) {
157+
// Just consume the stream
158+
}
159+
160+
// Verify it uses the custom base URL (Responses API for gpt-4o)
161+
expect(mockFetch).toHaveBeenCalledWith("https://custom.api.com/v1/responses", expect.any(Object))
162+
})
163+
})
164+
165+
describe("Streaming", () => {
166+
it("should handle streaming responses via Responses API", async () => {
167+
// Mock fetch for Responses API fallback
168+
const mockFetch = vitest.fn().mockResolvedValue({
169+
ok: true,
170+
body: new ReadableStream({
171+
start(controller) {
172+
controller.enqueue(
173+
new TextEncoder().encode('data: {"type":"response.text.delta","delta":"Hello"}\n\n'),
174+
)
175+
controller.enqueue(
176+
new TextEncoder().encode('data: {"type":"response.text.delta","delta":" from"}\n\n'),
177+
)
178+
controller.enqueue(
179+
new TextEncoder().encode('data: {"type":"response.text.delta","delta":" Codex"}\n\n'),
180+
)
181+
controller.enqueue(
182+
new TextEncoder().encode(
183+
'data: {"type":"response.done","response":{"usage":{"prompt_tokens":10,"completion_tokens":3}}}\n\n',
184+
),
185+
)
186+
controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"))
187+
controller.close()
188+
},
189+
}),
190+
})
191+
global.fetch = mockFetch as any
192+
193+
// Mock SDK to fail so it falls back to fetch
194+
mockResponsesCreate.mockRejectedValue(new Error("SDK not available"))
195+
196+
const handler = buildApiHandler(mockOptions as any)
197+
const stream = handler.createMessage(systemPrompt, messages)
198+
const chunks: any[] = []
199+
for await (const chunk of stream) {
200+
chunks.push(chunk)
201+
}
202+
203+
expect(chunks.length).toBeGreaterThan(0)
204+
const textChunks = chunks.filter((chunk) => chunk.type === "text")
205+
expect(textChunks).toHaveLength(3)
206+
expect(textChunks[0].text).toBe("Hello")
207+
expect(textChunks[1].text).toBe(" from")
208+
expect(textChunks[2].text).toBe(" Codex")
209+
})
210+
211+
it("should handle API errors", async () => {
212+
// Mock fetch to return error
213+
const mockFetch = vitest.fn().mockResolvedValue({
214+
ok: false,
215+
status: 401,
216+
text: async () => "Unauthorized",
217+
})
218+
global.fetch = mockFetch as any
219+
220+
// Mock SDK to fail
221+
mockResponsesCreate.mockRejectedValue(new Error("SDK not available"))
222+
223+
const handler = buildApiHandler(mockOptions as any)
224+
const stream = handler.createMessage(systemPrompt, messages)
225+
226+
await expect(async () => {
227+
for await (const _chunk of stream) {
228+
// Should not reach here
229+
}
230+
}).rejects.toThrow("Authentication failed")
231+
})
232+
})
233+
234+
describe("Non-streaming completion", () => {
235+
it("should handle non-streaming completion using Responses API", async () => {
236+
// Mock the responses.create method to return a non-streaming response
237+
mockResponsesCreate.mockResolvedValue({
238+
output: [
239+
{
240+
type: "message",
241+
content: [
242+
{
243+
type: "output_text",
244+
text: "This is the completion response from Codex CLI",
245+
},
246+
],
247+
},
248+
],
249+
})
250+
251+
const handler = buildApiHandler(mockOptions as any) as OpenAiNativeHandler
252+
const result = await handler.completePrompt("Test prompt")
253+
254+
expect(result).toBe("This is the completion response from Codex CLI")
255+
expect(mockResponsesCreate).toHaveBeenCalledWith(
256+
expect.objectContaining({
257+
model: "gpt-4o",
258+
stream: false,
259+
store: false,
260+
input: [
261+
{
262+
role: "user",
263+
content: [{ type: "input_text", text: "Test prompt" }],
264+
},
265+
],
266+
}),
267+
)
268+
})
269+
})
270+
271+
describe("Model selection", () => {
272+
it("should support all OpenAI native models", () => {
273+
const models = ["gpt-4o", "gpt-4o-mini", "gpt-5-2025-08-07", "gpt-5-mini-2025-08-07"]
274+
275+
models.forEach((modelId) => {
276+
const options = {
277+
...mockOptions,
278+
apiModelId: modelId,
279+
}
280+
const handler = buildApiHandler(options as any)
281+
const modelInfo = handler.getModel()
282+
expect(modelInfo.id).toBe(modelId)
283+
expect(modelInfo.info).toBeDefined()
284+
})
285+
})
286+
})
287+
288+
describe("Token validation", () => {
289+
it("should work with empty token (for initial setup)", () => {
290+
const optionsWithoutToken = {
291+
...mockOptions,
292+
codexCliOpenAiNativeToken: "",
293+
}
294+
const handler = buildApiHandler(optionsWithoutToken as any)
295+
expect(handler).toBeInstanceOf(OpenAiNativeHandler)
296+
})
297+
298+
it("should work with undefined token (before sign-in)", () => {
299+
const optionsWithoutToken = {
300+
apiProvider: "codex-cli-native" as const,
301+
apiModelId: "gpt-4o",
302+
// codexCliOpenAiNativeToken is not provided
303+
} as any
304+
const handler = buildApiHandler(optionsWithoutToken)
305+
expect(handler).toBeInstanceOf(OpenAiNativeHandler)
306+
})
307+
})
308+
})

0 commit comments

Comments
 (0)