Skip to content

Commit 4149db7

Browse files
Andrei UngureanuAndrei Ungureanu
authored andcommitted
Add Codex provider integration
1 parent 866c5b9 commit 4149db7

File tree

20 files changed

+1176
-14
lines changed

20 files changed

+1176
-14
lines changed

packages/types/src/provider-settings.ts

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import {
88
cerebrasModels,
99
chutesModels,
1010
claudeCodeModels,
11+
codexModels,
1112
deepSeekModels,
1213
doubaoModels,
1314
featherlessModels,
@@ -122,6 +123,7 @@ export const providerNames = [
122123
"cerebras",
123124
"chutes",
124125
"claude-code",
126+
"codex",
125127
"doubao",
126128
"deepseek",
127129
"featherless",
@@ -254,6 +256,15 @@ const openAiSchema = baseProviderSettingsSchema.extend({
254256
openAiHeaders: z.record(z.string(), z.string()).optional(),
255257
})
256258

259+
// Codex shares OpenAI settings and adds optional CLI config & binary paths for seamless auth
260+
const codexSchema = openAiSchema.extend({
261+
apiModelId: z.string().optional(),
262+
codexCliConfigPath: z.string().optional(),
263+
codexCliPath: z.string().optional(),
264+
codexDebugEnabled: z.boolean().optional(),
265+
codexDebugLogPath: z.string().optional(),
266+
})
267+
257268
const ollamaSchema = baseProviderSettingsSchema.extend({
258269
ollamaModelId: z.string().optional(),
259270
ollamaBaseUrl: z.string().optional(),
@@ -418,6 +429,7 @@ const defaultSchema = z.object({
418429
export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProvider", [
419430
anthropicSchema.merge(z.object({ apiProvider: z.literal("anthropic") })),
420431
claudeCodeSchema.merge(z.object({ apiProvider: z.literal("claude-code") })),
432+
codexSchema.merge(z.object({ apiProvider: z.literal("codex") })),
421433
glamaSchema.merge(z.object({ apiProvider: z.literal("glama") })),
422434
openRouterSchema.merge(z.object({ apiProvider: z.literal("openrouter") })),
423435
bedrockSchema.merge(z.object({ apiProvider: z.literal("bedrock") })),
@@ -459,6 +471,7 @@ export const providerSettingsSchema = z.object({
459471
apiProvider: providerNamesSchema.optional(),
460472
...anthropicSchema.shape,
461473
...claudeCodeSchema.shape,
474+
...codexSchema.shape,
462475
...glamaSchema.shape,
463476
...openRouterSchema.shape,
464477
...bedrockSchema.shape,
@@ -548,6 +561,7 @@ export const isTypicalProvider = (key: unknown): key is TypicalProvider =>
548561
export const modelIdKeysByProvider: Record<TypicalProvider, ModelIdKey> = {
549562
anthropic: "apiModelId",
550563
"claude-code": "apiModelId",
564+
codex: "apiModelId",
551565
glama: "glamaModelId",
552566
openrouter: "openRouterModelId",
553567
bedrock: "apiModelId",
@@ -633,6 +647,7 @@ export const MODELS_BY_PROVIDER: Record<
633647
models: Object.keys(chutesModels),
634648
},
635649
"claude-code": { id: "claude-code", label: "Claude Code", models: Object.keys(claudeCodeModels) },
650+
codex: { id: "codex", label: "Codex", models: Object.keys(codexModels) },
636651
deepseek: {
637652
id: "deepseek",
638653
label: "DeepSeek",
Lines changed: 148 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,148 @@
1+
import type { ModelInfo, ReasoningEffortWithMinimal } from "../model.js"
2+
import { openAiNativeModels } from "./openai.js"
3+
4+
export type CodexModelId =
5+
| "gpt-5-codex"
6+
| "gpt-5-codex-minimal"
7+
| "gpt-5-codex-low"
8+
| "gpt-5-codex-medium"
9+
| "gpt-5-codex-high"
10+
| "gpt-5"
11+
| "gpt-5-minimal"
12+
| "gpt-5-low"
13+
| "gpt-5-medium"
14+
| "gpt-5-high"
15+
| "codex-mini-latest"
16+
17+
export type CodexCliModelInfo = {
18+
id: string
19+
label?: string
20+
description?: string
21+
model?: string
22+
effort?: ReasoningEffortWithMinimal
23+
}
24+
25+
export interface CodexCliPreset {
26+
id: CodexModelId
27+
label: string
28+
description: string
29+
cliModel: string
30+
effort?: ReasoningEffortWithMinimal
31+
}
32+
33+
export const codexDefaultModelId: CodexModelId = "gpt-5-codex"
34+
35+
const CLI_DEFAULT_MODEL = "gpt-5"
36+
37+
const codexBaseModelInfo: ModelInfo = {
38+
...openAiNativeModels["gpt-5-2025-08-07"],
39+
supportsReasoningEffort: true,
40+
}
41+
42+
export const codexCliPresets: CodexCliPreset[] = [
43+
{
44+
id: "gpt-5-codex",
45+
label: "gpt-5-codex (auto)",
46+
description: "— uses your Codex CLI default reasoning effort",
47+
cliModel: CLI_DEFAULT_MODEL,
48+
},
49+
{
50+
id: "gpt-5-codex-minimal",
51+
label: "gpt-5-codex minimal",
52+
description: "— fastest responses with limited reasoning; ideal for lightweight edits and quick fixes",
53+
cliModel: CLI_DEFAULT_MODEL,
54+
effort: "minimal",
55+
},
56+
{
57+
id: "gpt-5-codex-low",
58+
label: "gpt-5-codex low",
59+
description: "— prioritises speed while keeping some reasoning depth for straightforward coding tasks",
60+
cliModel: CLI_DEFAULT_MODEL,
61+
effort: "low",
62+
},
63+
{
64+
id: "gpt-5-codex-medium",
65+
label: "gpt-5-codex medium",
66+
description: "— balanced reasoning for everyday development work (Codex CLI default)",
67+
cliModel: CLI_DEFAULT_MODEL,
68+
effort: "medium",
69+
},
70+
{
71+
id: "gpt-5-codex-high",
72+
label: "gpt-5-codex high",
73+
description: "— maximum reasoning depth for complex or ambiguous engineering problems",
74+
cliModel: CLI_DEFAULT_MODEL,
75+
effort: "high",
76+
},
77+
]
78+
79+
const legacyRedirectEntries: ReadonlyArray<[string, CodexModelId]> = [
80+
["gpt-5", "gpt-5-codex"],
81+
["gpt-5-minimal", "gpt-5-codex-minimal"],
82+
["gpt-5-low", "gpt-5-codex-low"],
83+
["gpt-5-medium", "gpt-5-codex-medium"],
84+
["gpt-5-high", "gpt-5-codex-high"],
85+
["gpt-5-codex", "gpt-5-codex"],
86+
["gpt-5-codex-minimal", "gpt-5-codex-minimal"],
87+
["gpt-5-codex-low", "gpt-5-codex-low"],
88+
["gpt-5-codex-medium", "gpt-5-codex-medium"],
89+
["gpt-5-codex-high", "gpt-5-codex-high"],
90+
["codex-mini-latest", "codex-mini-latest"],
91+
]
92+
93+
export const codexLegacyModelRedirects: Record<string, CodexModelId> = Object.fromEntries(legacyRedirectEntries)
94+
95+
const presetMap = new Map<CodexModelId, CodexCliPreset>()
96+
97+
for (const preset of codexCliPresets) {
98+
presetMap.set(preset.id, preset)
99+
}
100+
101+
for (const [legacyId, targetId] of legacyRedirectEntries) {
102+
const target = presetMap.get(targetId as CodexModelId)
103+
if (target && !presetMap.has(legacyId as CodexModelId)) {
104+
presetMap.set(legacyId as CodexModelId, {
105+
...target,
106+
id: legacyId as CodexModelId,
107+
})
108+
}
109+
}
110+
111+
const derivedModels: Record<string, ModelInfo> = {}
112+
113+
for (const preset of presetMap.values()) {
114+
derivedModels[preset.id] = {
115+
...codexBaseModelInfo,
116+
description: preset.description || codexBaseModelInfo.description,
117+
}
118+
}
119+
120+
if (openAiNativeModels["codex-mini-latest"]) {
121+
derivedModels["codex-mini-latest"] = {
122+
...openAiNativeModels["codex-mini-latest"],
123+
supportsReasoningEffort: true,
124+
}
125+
}
126+
127+
export const codexModels = derivedModels as Record<CodexModelId, ModelInfo>
128+
129+
export const fallbackCodexCliModels: CodexCliModelInfo[] = codexCliPresets.map((preset) => ({
130+
id: preset.id,
131+
label: preset.label,
132+
description: preset.description,
133+
model: preset.cliModel,
134+
effort: preset.effort,
135+
}))
136+
137+
export const normalizeCodexModelId = (id?: string | null): CodexModelId => {
138+
const trimmed = (id ?? "").trim()
139+
if (!trimmed) {
140+
return codexDefaultModelId
141+
}
142+
return (codexLegacyModelRedirects[trimmed] ?? trimmed) as CodexModelId
143+
}
144+
145+
export const getCodexPreset = (id: string | undefined) => {
146+
const normalized = normalizeCodexModelId(id)
147+
return presetMap.get(normalized)
148+
}

packages/types/src/providers/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ export * from "./moonshot.js"
1919
export * from "./ollama.js"
2020
export * from "./openai.js"
2121
export * from "./openrouter.js"
22+
export * from "./codex.js"
2223
export * from "./qwen-code.js"
2324
export * from "./requesty.js"
2425
export * from "./roo.js"

packages/types/src/providers/openai.ts

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,24 @@ export const openAiNativeModels = {
3737
{ name: "priority", contextWindow: 400000, inputPrice: 2.5, outputPrice: 20.0, cacheReadsPrice: 0.25 },
3838
],
3939
},
40+
"gpt-5-codex": {
41+
maxTokens: 128000,
42+
contextWindow: 400000,
43+
supportsImages: true,
44+
supportsPromptCache: true,
45+
supportsReasoningEffort: true,
46+
reasoningEffort: "medium",
47+
inputPrice: 1.25,
48+
outputPrice: 10.0,
49+
cacheReadsPrice: 0.13,
50+
description: "GPT-5 Codex: Optimized for software engineering workflows with Codex tooling integration",
51+
supportsVerbosity: true,
52+
supportsTemperature: false,
53+
tiers: [
54+
{ name: "flex", contextWindow: 400000, inputPrice: 0.625, outputPrice: 5.0, cacheReadsPrice: 0.0625 },
55+
{ name: "priority", contextWindow: 400000, inputPrice: 2.5, outputPrice: 20.0, cacheReadsPrice: 0.25 },
56+
],
57+
},
4058
"gpt-5-mini-2025-08-07": {
4159
maxTokens: 128000,
4260
contextWindow: 400000,

src/api/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ import {
1010
AwsBedrockHandler,
1111
CerebrasHandler,
1212
OpenRouterHandler,
13+
CodexHandler,
1314
VertexHandler,
1415
AnthropicVertexHandler,
1516
OpenAiHandler,
@@ -121,6 +122,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
121122
return new DoubaoHandler(options)
122123
case "qwen-code":
123124
return new QwenCodeHandler(options)
125+
case "codex":
126+
return new CodexHandler(options)
124127
case "moonshot":
125128
return new MoonshotHandler(options)
126129
case "vscode-lm":

src/api/providers/__tests__/codex.spec.ts

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest"
2+
import type { MockInstance } from "vitest"
23
import type { Anthropic } from "@anthropic-ai/sdk"
34

45
import type { ApiStreamChunk } from "../../transform/stream"
@@ -53,11 +54,17 @@ function asyncGeneratorFromChunks(chunks: ApiStreamChunk[]): AsyncGenerator<ApiS
5354

5455
describe("CodexHandler", () => {
5556
let handler: CodexHandler
56-
let fallbackSpy: ReturnType<typeof vi.spyOn>
57+
let fallbackSpy: MockInstance<typeof OpenAiNativeHandler.prototype.createMessage>
5758

5859
beforeEach(() => {
5960
createSessionMock.mockReset()
60-
fallbackSpy = vi.spyOn(OpenAiNativeHandler.prototype, "createMessage").mockImplementation(async function* () {
61+
fallbackSpy = vi.spyOn(OpenAiNativeHandler.prototype, "createMessage")
62+
fallbackSpy.mockImplementation(async function* (
63+
this: OpenAiNativeHandler,
64+
_systemPrompt: string,
65+
_messages: Anthropic.Messages.MessageParam[],
66+
_metadata?: ApiHandlerCreateMessageMetadata,
67+
) {
6168
yield { type: "text", text: "[fallback]" }
6269
})
6370
handler = new CodexHandler(defaultOptions)

src/api/providers/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ export { OllamaHandler } from "./ollama"
2121
export { OpenAiNativeHandler } from "./openai-native"
2222
export { OpenAiHandler } from "./openai"
2323
export { OpenRouterHandler } from "./openrouter"
24+
export { CodexHandler } from "./codex"
2425
export { QwenCodeHandler } from "./qwen-code"
2526
export { RequestyHandler } from "./requesty"
2627
export { SambaNovaHandler } from "./sambanova"

0 commit comments

Comments
 (0)