Skip to content
Closed
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions packages/types/src/provider-settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import {
cerebrasModels,
chutesModels,
claudeCodeModels,
codexModels,
deepSeekModels,
doubaoModels,
featherlessModels,
Expand Down Expand Up @@ -122,6 +123,7 @@ export const providerNames = [
"cerebras",
"chutes",
"claude-code",
"codex",
"doubao",
"deepseek",
"featherless",
Expand Down Expand Up @@ -254,6 +256,15 @@ const openAiSchema = baseProviderSettingsSchema.extend({
openAiHeaders: z.record(z.string(), z.string()).optional(),
})

// Codex shares OpenAI settings and adds optional CLI config & binary paths for seamless auth
const codexSchema = openAiSchema.extend({
apiModelId: z.string().optional(),
codexCliConfigPath: z.string().optional(),
codexCliPath: z.string().optional(),
codexDebugEnabled: z.boolean().optional(),
codexDebugLogPath: z.string().optional(),
})

const ollamaSchema = baseProviderSettingsSchema.extend({
ollamaModelId: z.string().optional(),
ollamaBaseUrl: z.string().optional(),
Expand Down Expand Up @@ -418,6 +429,7 @@ const defaultSchema = z.object({
export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProvider", [
anthropicSchema.merge(z.object({ apiProvider: z.literal("anthropic") })),
claudeCodeSchema.merge(z.object({ apiProvider: z.literal("claude-code") })),
codexSchema.merge(z.object({ apiProvider: z.literal("codex") })),
glamaSchema.merge(z.object({ apiProvider: z.literal("glama") })),
openRouterSchema.merge(z.object({ apiProvider: z.literal("openrouter") })),
bedrockSchema.merge(z.object({ apiProvider: z.literal("bedrock") })),
Expand Down Expand Up @@ -459,6 +471,7 @@ export const providerSettingsSchema = z.object({
apiProvider: providerNamesSchema.optional(),
...anthropicSchema.shape,
...claudeCodeSchema.shape,
...codexSchema.shape,
...glamaSchema.shape,
...openRouterSchema.shape,
...bedrockSchema.shape,
Expand Down Expand Up @@ -548,6 +561,7 @@ export const isTypicalProvider = (key: unknown): key is TypicalProvider =>
export const modelIdKeysByProvider: Record<TypicalProvider, ModelIdKey> = {
anthropic: "apiModelId",
"claude-code": "apiModelId",
codex: "apiModelId",
glama: "glamaModelId",
openrouter: "openRouterModelId",
bedrock: "apiModelId",
Expand Down Expand Up @@ -633,6 +647,7 @@ export const MODELS_BY_PROVIDER: Record<
models: Object.keys(chutesModels),
},
"claude-code": { id: "claude-code", label: "Claude Code", models: Object.keys(claudeCodeModels) },
codex: { id: "codex", label: "Codex", models: Object.keys(codexModels) },
deepseek: {
id: "deepseek",
label: "DeepSeek",
Expand Down
148 changes: 148 additions & 0 deletions packages/types/src/providers/codex.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
import type { ModelInfo, ReasoningEffortWithMinimal } from "../model.js"
import { openAiNativeModels } from "./openai.js"

export type CodexModelId =
| "gpt-5-codex"
| "gpt-5-codex-minimal"
| "gpt-5-codex-low"
| "gpt-5-codex-medium"
| "gpt-5-codex-high"
| "gpt-5"
| "gpt-5-minimal"
| "gpt-5-low"
| "gpt-5-medium"
| "gpt-5-high"
| "codex-mini-latest"

export type CodexCliModelInfo = {
id: string
label?: string
description?: string
model?: string
effort?: ReasoningEffortWithMinimal
}

export interface CodexCliPreset {
id: CodexModelId
label: string
description: string
cliModel: string
effort?: ReasoningEffortWithMinimal
}

export const codexDefaultModelId: CodexModelId = "gpt-5-codex"

const CLI_DEFAULT_MODEL = "gpt-5"

const codexBaseModelInfo: ModelInfo = {
...openAiNativeModels["gpt-5-2025-08-07"],
supportsReasoningEffort: true,
}

export const codexCliPresets: CodexCliPreset[] = [
{
id: "gpt-5-codex",
label: "gpt-5-codex (auto)",
description: "— uses your Codex CLI default reasoning effort",
cliModel: CLI_DEFAULT_MODEL,
},
{
id: "gpt-5-codex-minimal",
label: "gpt-5-codex minimal",
description: "— fastest responses with limited reasoning; ideal for lightweight edits and quick fixes",
cliModel: CLI_DEFAULT_MODEL,
effort: "minimal",
},
{
id: "gpt-5-codex-low",
label: "gpt-5-codex low",
description: "— prioritises speed while keeping some reasoning depth for straightforward coding tasks",
cliModel: CLI_DEFAULT_MODEL,
effort: "low",
},
{
id: "gpt-5-codex-medium",
label: "gpt-5-codex medium",
description: "— balanced reasoning for everyday development work (Codex CLI default)",
cliModel: CLI_DEFAULT_MODEL,
effort: "medium",
},
{
id: "gpt-5-codex-high",
label: "gpt-5-codex high",
description: "— maximum reasoning depth for complex or ambiguous engineering problems",
cliModel: CLI_DEFAULT_MODEL,
effort: "high",
},
]

const legacyRedirectEntries: ReadonlyArray<[string, CodexModelId]> = [
["gpt-5", "gpt-5-codex"],
["gpt-5-minimal", "gpt-5-codex-minimal"],
["gpt-5-low", "gpt-5-codex-low"],
["gpt-5-medium", "gpt-5-codex-medium"],
["gpt-5-high", "gpt-5-codex-high"],
["gpt-5-codex", "gpt-5-codex"],
["gpt-5-codex-minimal", "gpt-5-codex-minimal"],
["gpt-5-codex-low", "gpt-5-codex-low"],
["gpt-5-codex-medium", "gpt-5-codex-medium"],
["gpt-5-codex-high", "gpt-5-codex-high"],
["codex-mini-latest", "codex-mini-latest"],
]

export const codexLegacyModelRedirects: Record<string, CodexModelId> = Object.fromEntries(legacyRedirectEntries)

const presetMap = new Map<CodexModelId, CodexCliPreset>()

for (const preset of codexCliPresets) {
presetMap.set(preset.id, preset)
}

for (const [legacyId, targetId] of legacyRedirectEntries) {
const target = presetMap.get(targetId as CodexModelId)
if (target && !presetMap.has(legacyId as CodexModelId)) {
presetMap.set(legacyId as CodexModelId, {
...target,
id: legacyId as CodexModelId,
})
}
}

const derivedModels: Record<string, ModelInfo> = {}

for (const preset of presetMap.values()) {
derivedModels[preset.id] = {
...codexBaseModelInfo,
description: preset.description || codexBaseModelInfo.description,
}
}

if (openAiNativeModels["codex-mini-latest"]) {
derivedModels["codex-mini-latest"] = {
...openAiNativeModels["codex-mini-latest"],
supportsReasoningEffort: true,
}
}

export const codexModels = derivedModels as Record<CodexModelId, ModelInfo>

export const fallbackCodexCliModels: CodexCliModelInfo[] = codexCliPresets.map((preset) => ({
id: preset.id,
label: preset.label,
description: preset.description,
model: preset.cliModel,
effort: preset.effort,
}))

export const normalizeCodexModelId = (id?: string | null): CodexModelId => {
const trimmed = (id ?? "").trim()
if (!trimmed) {
return codexDefaultModelId
}
return (codexLegacyModelRedirects[trimmed] ?? trimmed) as CodexModelId
}

export const getCodexPreset = (id: string | undefined) => {
const normalized = normalizeCodexModelId(id)
return presetMap.get(normalized)
}
1 change: 1 addition & 0 deletions packages/types/src/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ export * from "./moonshot.js"
export * from "./ollama.js"
export * from "./openai.js"
export * from "./openrouter.js"
export * from "./codex.js"
export * from "./qwen-code.js"
export * from "./requesty.js"
export * from "./roo.js"
Expand Down
18 changes: 18 additions & 0 deletions packages/types/src/providers/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,24 @@ export const openAiNativeModels = {
{ name: "priority", contextWindow: 400000, inputPrice: 2.5, outputPrice: 20.0, cacheReadsPrice: 0.25 },
],
},
"gpt-5-codex": {
maxTokens: 128000,
contextWindow: 400000,
supportsImages: true,
supportsPromptCache: true,
supportsReasoningEffort: true,
reasoningEffort: "medium",
inputPrice: 1.25,
outputPrice: 10.0,
cacheReadsPrice: 0.13,
description: "GPT-5 Codex: Optimized for software engineering workflows with Codex tooling integration",
supportsVerbosity: true,
supportsTemperature: false,
tiers: [
{ name: "flex", contextWindow: 400000, inputPrice: 0.625, outputPrice: 5.0, cacheReadsPrice: 0.0625 },
{ name: "priority", contextWindow: 400000, inputPrice: 2.5, outputPrice: 20.0, cacheReadsPrice: 0.25 },
],
},
"gpt-5-mini-2025-08-07": {
maxTokens: 128000,
contextWindow: 400000,
Expand Down
3 changes: 3 additions & 0 deletions src/api/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import {
AwsBedrockHandler,
CerebrasHandler,
OpenRouterHandler,
CodexHandler,
VertexHandler,
AnthropicVertexHandler,
OpenAiHandler,
Expand Down Expand Up @@ -121,6 +122,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
return new DoubaoHandler(options)
case "qwen-code":
return new QwenCodeHandler(options)
case "codex":
return new CodexHandler(options)
case "moonshot":
return new MoonshotHandler(options)
case "vscode-lm":
Expand Down
Loading
Loading