Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions packages/types/src/provider-settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import {
cerebrasModels,
chutesModels,
claudeCodeModels,
codexCliModels,
deepSeekModels,
doubaoModels,
featherlessModels,
Expand All @@ -34,6 +35,7 @@ import {
export const providerNames = [
"anthropic",
"claude-code",
"codex-cli",
"glama",
"openrouter",
"bedrock",
Expand Down Expand Up @@ -338,6 +340,11 @@ const rooSchema = apiModelIdProviderModelSchema.extend({
// No additional fields needed - uses cloud authentication
})

const codexCliSchema = apiModelIdProviderModelSchema.extend({
codexCliPath: z.string().optional(), // Optional custom path to CLI
codexCliSessionToken: z.string().optional(), // Session token stored securely
})

const vercelAiGatewaySchema = baseProviderSettingsSchema.extend({
vercelAiGatewayApiKey: z.string().optional(),
vercelAiGatewayModelId: z.string().optional(),
Expand All @@ -350,6 +357,7 @@ const defaultSchema = z.object({
export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProvider", [
anthropicSchema.merge(z.object({ apiProvider: z.literal("anthropic") })),
claudeCodeSchema.merge(z.object({ apiProvider: z.literal("claude-code") })),
codexCliSchema.merge(z.object({ apiProvider: z.literal("codex-cli") })),
glamaSchema.merge(z.object({ apiProvider: z.literal("glama") })),
openRouterSchema.merge(z.object({ apiProvider: z.literal("openrouter") })),
bedrockSchema.merge(z.object({ apiProvider: z.literal("bedrock") })),
Expand Down Expand Up @@ -391,6 +399,7 @@ export const providerSettingsSchema = z.object({
apiProvider: providerNamesSchema.optional(),
...anthropicSchema.shape,
...claudeCodeSchema.shape,
...codexCliSchema.shape,
...glamaSchema.shape,
...openRouterSchema.shape,
...bedrockSchema.shape,
Expand Down Expand Up @@ -507,6 +516,7 @@ export const MODELS_BY_PROVIDER: Record<
models: Object.keys(chutesModels),
},
"claude-code": { id: "claude-code", label: "Claude Code", models: Object.keys(claudeCodeModels) },
"codex-cli": { id: "codex-cli", label: "Codex CLI", models: Object.keys(codexCliModels) },
deepseek: {
id: "deepseek",
label: "DeepSeek",
Expand Down
167 changes: 167 additions & 0 deletions packages/types/src/providers/codex-cli.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,167 @@
import type { ModelInfo } from "../model.js"

// Codex CLI models - same as OpenAI models but accessed through local CLI
export type CodexCliModelId = keyof typeof codexCliModels

export const codexCliDefaultModelId: CodexCliModelId = "gpt-5-2025-08-07"

// These models mirror OpenAI's offerings but are accessed through the Codex CLI
export const codexCliModels = {
"gpt-5-chat-latest": {
maxTokens: 128000,
contextWindow: 400000,
supportsImages: true,
supportsPromptCache: true,
supportsReasoningEffort: false,
inputPrice: 1.25,
outputPrice: 10.0,
cacheReadsPrice: 0.13,
description: "GPT-5 Chat Latest: Optimized for conversational AI and non-reasoning tasks",
supportsVerbosity: true,
},
"gpt-5-2025-08-07": {
maxTokens: 128000,
contextWindow: 400000,
supportsImages: true,
supportsPromptCache: true,
supportsReasoningEffort: true,
reasoningEffort: "medium",
inputPrice: 1.25,
outputPrice: 10.0,
cacheReadsPrice: 0.13,
description: "GPT-5: The best model for coding and agentic tasks across domains",
supportsVerbosity: true,
supportsTemperature: false,
},
"gpt-5-mini-2025-08-07": {
maxTokens: 128000,
contextWindow: 400000,
supportsImages: true,
supportsPromptCache: true,
supportsReasoningEffort: true,
reasoningEffort: "medium",
inputPrice: 0.25,
outputPrice: 2.0,
cacheReadsPrice: 0.03,
description: "GPT-5 Mini: A faster, more cost-efficient version of GPT-5 for well-defined tasks",
supportsVerbosity: true,
supportsTemperature: false,
},
"gpt-5-nano-2025-08-07": {
maxTokens: 128000,
contextWindow: 400000,
supportsImages: true,
supportsPromptCache: true,
supportsReasoningEffort: true,
reasoningEffort: "medium",
inputPrice: 0.05,
outputPrice: 0.4,
cacheReadsPrice: 0.01,
description: "GPT-5 Nano: Fastest, most cost-efficient version of GPT-5",
supportsVerbosity: true,
supportsTemperature: false,
},
"gpt-4.1": {
maxTokens: 32_768,
contextWindow: 1_047_576,
supportsImages: true,
supportsPromptCache: true,
inputPrice: 2,
outputPrice: 8,
cacheReadsPrice: 0.5,
supportsTemperature: true,
},
"gpt-4.1-mini": {
maxTokens: 32_768,
contextWindow: 1_047_576,
supportsImages: true,
supportsPromptCache: true,
inputPrice: 0.4,
outputPrice: 1.6,
cacheReadsPrice: 0.1,
supportsTemperature: true,
},
"gpt-4.1-nano": {
maxTokens: 32_768,
contextWindow: 1_047_576,
supportsImages: true,
supportsPromptCache: true,
inputPrice: 0.1,
outputPrice: 0.4,
cacheReadsPrice: 0.025,
supportsTemperature: true,
},
"gpt-4o": {
maxTokens: 16_384,
contextWindow: 128_000,
supportsImages: true,
supportsPromptCache: true,
inputPrice: 2.5,
outputPrice: 10,
cacheReadsPrice: 1.25,
supportsTemperature: true,
},
"gpt-4o-mini": {
maxTokens: 16_384,
contextWindow: 128_000,
supportsImages: true,
supportsPromptCache: true,
inputPrice: 0.15,
outputPrice: 0.6,
cacheReadsPrice: 0.075,
supportsTemperature: true,
},
o3: {
maxTokens: 100_000,
contextWindow: 200_000,
supportsImages: true,
supportsPromptCache: true,
inputPrice: 2.0,
outputPrice: 8.0,
cacheReadsPrice: 0.5,
supportsReasoningEffort: true,
reasoningEffort: "medium",
supportsTemperature: false,
},
"o3-mini": {
maxTokens: 100_000,
contextWindow: 200_000,
supportsImages: false,
supportsPromptCache: true,
inputPrice: 1.1,
outputPrice: 4.4,
cacheReadsPrice: 0.55,
supportsReasoningEffort: true,
reasoningEffort: "medium",
supportsTemperature: false,
},
o1: {
maxTokens: 100_000,
contextWindow: 200_000,
supportsImages: true,
supportsPromptCache: true,
inputPrice: 15,
outputPrice: 60,
cacheReadsPrice: 7.5,
supportsTemperature: false,
},
"o1-mini": {
maxTokens: 65_536,
contextWindow: 128_000,
supportsImages: true,
supportsPromptCache: true,
inputPrice: 1.1,
outputPrice: 4.4,
cacheReadsPrice: 0.55,
supportsTemperature: false,
},
} as const satisfies Record<string, ModelInfo>

export const codexCliModelInfoSaneDefaults: ModelInfo = {
maxTokens: -1,
contextWindow: 128_000,
supportsImages: true,
supportsPromptCache: false,
inputPrice: 0,
outputPrice: 0,
}
1 change: 1 addition & 0 deletions packages/types/src/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ export * from "./bedrock.js"
export * from "./cerebras.js"
export * from "./chutes.js"
export * from "./claude-code.js"
export * from "./codex-cli.js"
export * from "./deepseek.js"
export * from "./doubao.js"
export * from "./featherless.js"
Expand Down
3 changes: 3 additions & 0 deletions src/api/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import {
ChutesHandler,
LiteLLMHandler,
ClaudeCodeHandler,
CodexCliHandler,
QwenCodeHandler,
SambaNovaHandler,
IOIntelligenceHandler,
Expand Down Expand Up @@ -95,6 +96,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
return new AnthropicHandler(options)
case "claude-code":
return new ClaudeCodeHandler(options)
case "codex-cli":
return new CodexCliHandler(options)
case "glama":
return new GlamaHandler(options)
case "openrouter":
Expand Down
Loading
Loading