diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index 6d628ddfdf..6c02d63e94 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -8,6 +8,7 @@ import { cerebrasModels, chutesModels, claudeCodeModels, + codexCliModels, deepSeekModels, doubaoModels, featherlessModels, @@ -34,6 +35,7 @@ import { export const providerNames = [ "anthropic", "claude-code", + "codex-cli", "glama", "openrouter", "bedrock", @@ -338,6 +340,11 @@ const rooSchema = apiModelIdProviderModelSchema.extend({ // No additional fields needed - uses cloud authentication }) +const codexCliSchema = apiModelIdProviderModelSchema.extend({ + codexCliPath: z.string().optional(), // Optional custom path to CLI + codexCliSessionToken: z.string().optional(), // Session token stored securely +}) + const vercelAiGatewaySchema = baseProviderSettingsSchema.extend({ vercelAiGatewayApiKey: z.string().optional(), vercelAiGatewayModelId: z.string().optional(), @@ -350,6 +357,7 @@ const defaultSchema = z.object({ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProvider", [ anthropicSchema.merge(z.object({ apiProvider: z.literal("anthropic") })), claudeCodeSchema.merge(z.object({ apiProvider: z.literal("claude-code") })), + codexCliSchema.merge(z.object({ apiProvider: z.literal("codex-cli") })), glamaSchema.merge(z.object({ apiProvider: z.literal("glama") })), openRouterSchema.merge(z.object({ apiProvider: z.literal("openrouter") })), bedrockSchema.merge(z.object({ apiProvider: z.literal("bedrock") })), @@ -391,6 +399,7 @@ export const providerSettingsSchema = z.object({ apiProvider: providerNamesSchema.optional(), ...anthropicSchema.shape, ...claudeCodeSchema.shape, + ...codexCliSchema.shape, ...glamaSchema.shape, ...openRouterSchema.shape, ...bedrockSchema.shape, @@ -507,6 +516,7 @@ export const MODELS_BY_PROVIDER: Record< models: Object.keys(chutesModels), }, "claude-code": { id: "claude-code", label: "Claude Code", models: Object.keys(claudeCodeModels) }, + "codex-cli": { id: "codex-cli", label: "Codex CLI", models: Object.keys(codexCliModels) }, deepseek: { id: "deepseek", label: "DeepSeek", diff --git a/packages/types/src/providers/codex-cli.ts b/packages/types/src/providers/codex-cli.ts new file mode 100644 index 0000000000..1f3f1869c2 --- /dev/null +++ b/packages/types/src/providers/codex-cli.ts @@ -0,0 +1,167 @@ +import type { ModelInfo } from "../model.js" + +// Codex CLI models - same as OpenAI models but accessed through local CLI +export type CodexCliModelId = keyof typeof codexCliModels + +export const codexCliDefaultModelId: CodexCliModelId = "gpt-5-2025-08-07" + +// These models mirror OpenAI's offerings but are accessed through the Codex CLI +export const codexCliModels = { + "gpt-5-chat-latest": { + maxTokens: 128000, + contextWindow: 400000, + supportsImages: true, + supportsPromptCache: true, + supportsReasoningEffort: false, + inputPrice: 1.25, + outputPrice: 10.0, + cacheReadsPrice: 0.13, + description: "GPT-5 Chat Latest: Optimized for conversational AI and non-reasoning tasks", + supportsVerbosity: true, + }, + "gpt-5-2025-08-07": { + maxTokens: 128000, + contextWindow: 400000, + supportsImages: true, + supportsPromptCache: true, + supportsReasoningEffort: true, + reasoningEffort: "medium", + inputPrice: 1.25, + outputPrice: 10.0, + cacheReadsPrice: 0.13, + description: "GPT-5: The best model for coding and agentic tasks across domains", + supportsVerbosity: true, + supportsTemperature: false, + }, + "gpt-5-mini-2025-08-07": { + maxTokens: 128000, + contextWindow: 400000, + supportsImages: true, + supportsPromptCache: true, + supportsReasoningEffort: true, + reasoningEffort: "medium", + inputPrice: 0.25, + outputPrice: 2.0, + cacheReadsPrice: 0.03, + description: "GPT-5 Mini: A faster, more cost-efficient version of GPT-5 for well-defined tasks", + supportsVerbosity: true, + supportsTemperature: false, + }, + "gpt-5-nano-2025-08-07": { + maxTokens: 128000, + contextWindow: 400000, + supportsImages: true, + supportsPromptCache: true, + supportsReasoningEffort: true, + reasoningEffort: "medium", + inputPrice: 0.05, + outputPrice: 0.4, + cacheReadsPrice: 0.01, + description: "GPT-5 Nano: Fastest, most cost-efficient version of GPT-5", + supportsVerbosity: true, + supportsTemperature: false, + }, + "gpt-4.1": { + maxTokens: 32_768, + contextWindow: 1_047_576, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 2, + outputPrice: 8, + cacheReadsPrice: 0.5, + supportsTemperature: true, + }, + "gpt-4.1-mini": { + maxTokens: 32_768, + contextWindow: 1_047_576, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 0.4, + outputPrice: 1.6, + cacheReadsPrice: 0.1, + supportsTemperature: true, + }, + "gpt-4.1-nano": { + maxTokens: 32_768, + contextWindow: 1_047_576, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 0.1, + outputPrice: 0.4, + cacheReadsPrice: 0.025, + supportsTemperature: true, + }, + "gpt-4o": { + maxTokens: 16_384, + contextWindow: 128_000, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 2.5, + outputPrice: 10, + cacheReadsPrice: 1.25, + supportsTemperature: true, + }, + "gpt-4o-mini": { + maxTokens: 16_384, + contextWindow: 128_000, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 0.15, + outputPrice: 0.6, + cacheReadsPrice: 0.075, + supportsTemperature: true, + }, + o3: { + maxTokens: 100_000, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 2.0, + outputPrice: 8.0, + cacheReadsPrice: 0.5, + supportsReasoningEffort: true, + reasoningEffort: "medium", + supportsTemperature: false, + }, + "o3-mini": { + maxTokens: 100_000, + contextWindow: 200_000, + supportsImages: false, + supportsPromptCache: true, + inputPrice: 1.1, + outputPrice: 4.4, + cacheReadsPrice: 0.55, + supportsReasoningEffort: true, + reasoningEffort: "medium", + supportsTemperature: false, + }, + o1: { + maxTokens: 100_000, + contextWindow: 200_000, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 15, + outputPrice: 60, + cacheReadsPrice: 7.5, + supportsTemperature: false, + }, + "o1-mini": { + maxTokens: 65_536, + contextWindow: 128_000, + supportsImages: true, + supportsPromptCache: true, + inputPrice: 1.1, + outputPrice: 4.4, + cacheReadsPrice: 0.55, + supportsTemperature: false, + }, +} as const satisfies Record + +export const codexCliModelInfoSaneDefaults: ModelInfo = { + maxTokens: -1, + contextWindow: 128_000, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 0, + outputPrice: 0, +} diff --git a/packages/types/src/providers/index.ts b/packages/types/src/providers/index.ts index 21e43aaa99..3ffc7bdd37 100644 --- a/packages/types/src/providers/index.ts +++ b/packages/types/src/providers/index.ts @@ -3,6 +3,7 @@ export * from "./bedrock.js" export * from "./cerebras.js" export * from "./chutes.js" export * from "./claude-code.js" +export * from "./codex-cli.js" export * from "./deepseek.js" export * from "./doubao.js" export * from "./featherless.js" diff --git a/src/api/index.ts b/src/api/index.ts index ac00967676..ab71450910 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -30,6 +30,7 @@ import { ChutesHandler, LiteLLMHandler, ClaudeCodeHandler, + CodexCliHandler, QwenCodeHandler, SambaNovaHandler, IOIntelligenceHandler, @@ -95,6 +96,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler { return new AnthropicHandler(options) case "claude-code": return new ClaudeCodeHandler(options) + case "codex-cli": + return new CodexCliHandler(options) case "glama": return new GlamaHandler(options) case "openrouter": diff --git a/src/api/providers/codex-cli.ts b/src/api/providers/codex-cli.ts new file mode 100644 index 0000000000..90c21b1169 --- /dev/null +++ b/src/api/providers/codex-cli.ts @@ -0,0 +1,512 @@ +import { Anthropic } from "@anthropic-ai/sdk" +import OpenAI from "openai" +import { exec } from "child_process" +import { promisify } from "util" +import * as vscode from "vscode" +import * as path from "path" +import * as fs from "fs/promises" + +import { type ModelInfo, codexCliModels, codexCliDefaultModelId, codexCliModelInfoSaneDefaults } from "@roo-code/types" + +import type { ApiHandlerOptions } from "../../shared/api" + +import { XmlMatcher } from "../../utils/xml-matcher" + +import { convertToOpenAiMessages } from "../transform/openai-format" +import { ApiStream, ApiStreamUsageChunk } from "../transform/stream" +import { getModelParams } from "../transform/model-params" + +import { DEFAULT_HEADERS } from "./constants" +import { BaseProvider } from "./base-provider" +import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" +import { getApiRequestTimeout } from "./utils/timeout-config" +import { handleOpenAIError } from "./utils/openai-error-handler" + +const execAsync = promisify(exec) + +/** + * Codex CLI Handler - Uses local CLI for authentication instead of API keys + * Behaves exactly like OpenAI but with local login flow + */ +export class CodexCliHandler extends BaseProvider implements SingleCompletionHandler { + protected options: ApiHandlerOptions + private client: OpenAI | null = null + private readonly providerName = "Codex CLI" + private isInitialized = false + + constructor(options: ApiHandlerOptions) { + super() + this.options = options + } + + /** + * Initialize the OpenAI client with session token from CLI + */ + private async initializeClient(): Promise { + if (this.isInitialized && this.client) { + return + } + + try { + // Get the session token from secure storage or CLI + const sessionToken = await this.getSessionToken() + + if (!sessionToken) { + throw new Error("Not signed in. Please sign in to Codex CLI first.") + } + + // Use the session token as the API key + // The actual endpoint will be the same as OpenAI + const baseURL = "https://api.openai.com/v1" + + const headers = { + ...DEFAULT_HEADERS, + } + + const timeout = getApiRequestTimeout() + + this.client = new OpenAI({ + baseURL, + apiKey: sessionToken, + defaultHeaders: headers, + timeout, + }) + + this.isInitialized = true + } catch (error) { + this.client = null + this.isInitialized = false + throw error + } + } + + /** + * Get session token from secure storage or CLI + */ + private async getSessionToken(): Promise { + // First check if we have a stored session token + const storedToken = this.options.codexCliSessionToken + if (storedToken) { + return storedToken + } + + // Try to get token from CLI + const cliPath = this.options.codexCliPath || "codex" + + try { + const { stdout } = await execAsync(`${cliPath} auth status --json`) + const authStatus = JSON.parse(stdout) + + if (authStatus.authenticated && authStatus.token) { + // Store the token for future use + // Note: In the actual implementation, this should trigger a save to secure storage + return authStatus.token + } + } catch (error) { + // CLI not available or not authenticated + console.error("Failed to get auth status from Codex CLI:", error) + } + + return undefined + } + + /** + * Sign in to Codex CLI + */ + public async signIn(): Promise { + const cliPath = this.options.codexCliPath || "codex" + + try { + // Execute the sign-in command + // This should open a browser for authentication + const { stdout, stderr } = await execAsync(`${cliPath} auth login`) + + if (stderr) { + console.error("Codex CLI sign-in error:", stderr) + return false + } + + // After successful login, get the token + const token = await this.getSessionToken() + if (token) { + // Reset initialization to force client recreation + this.isInitialized = false + this.client = null + await this.initializeClient() + return true + } + } catch (error) { + console.error("Failed to sign in to Codex CLI:", error) + } + + return false + } + + /** + * Sign out from Codex CLI + */ + public async signOut(): Promise { + const cliPath = this.options.codexCliPath || "codex" + + try { + const { stderr } = await execAsync(`${cliPath} auth logout`) + + if (stderr) { + console.error("Codex CLI sign-out error:", stderr) + return false + } + + // Clear the client + this.client = null + this.isInitialized = false + + return true + } catch (error) { + console.error("Failed to sign out from Codex CLI:", error) + } + + return false + } + + /** + * Check if CLI is available + */ + public async isCliAvailable(): Promise { + const cliPath = this.options.codexCliPath || "codex" + + try { + await execAsync(`${cliPath} --version`) + return true + } catch { + return false + } + } + + /** + * Check authentication status + */ + public async isAuthenticated(): Promise { + const token = await this.getSessionToken() + return !!token + } + + override async *createMessage( + systemPrompt: string, + messages: Anthropic.Messages.MessageParam[], + metadata?: ApiHandlerCreateMessageMetadata, + ): ApiStream { + // Ensure client is initialized + await this.initializeClient() + + if (!this.client) { + throw new Error("Codex CLI client not initialized. Please sign in first.") + } + + const { info: modelInfo } = this.getModel() + const modelId = this.options.apiModelId ?? codexCliDefaultModelId + + // Handle O1/O3/O4 family models specially + if (modelId.includes("o1") || modelId.includes("o3") || modelId.includes("o4")) { + yield* this.handleO3FamilyMessage(modelId, systemPrompt, messages, modelInfo) + return + } + + // Standard streaming for other models + if (this.options.openAiStreamingEnabled ?? true) { + let systemMessage: OpenAI.Chat.ChatCompletionSystemMessageParam = { + role: "system", + content: systemPrompt, + } + + let convertedMessages + + if (modelInfo.supportsPromptCache) { + systemMessage = { + role: "system", + content: [ + { + type: "text", + text: systemPrompt, + // @ts-ignore-next-line + cache_control: { type: "ephemeral" }, + }, + ], + } + } + + convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages)] + + if (modelInfo.supportsPromptCache) { + // Add cache_control to the last two user messages + const lastTwoUserMessages = convertedMessages.filter((msg) => msg.role === "user").slice(-2) + + lastTwoUserMessages.forEach((msg) => { + if (typeof msg.content === "string") { + msg.content = [{ type: "text", text: msg.content }] + } + + if (Array.isArray(msg.content)) { + let lastTextPart = msg.content.filter((part) => part.type === "text").pop() + + if (!lastTextPart) { + lastTextPart = { type: "text", text: "..." } + msg.content.push(lastTextPart) + } + + // @ts-ignore-next-line + lastTextPart["cache_control"] = { type: "ephemeral" } + } + }) + } + + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { + model: modelId, + temperature: this.options.modelTemperature ?? 0, + messages: convertedMessages, + stream: true as const, + stream_options: { include_usage: true }, + } + + // Add max_tokens if needed + this.addMaxTokensIfNeeded(requestOptions, modelInfo) + + let stream + try { + stream = await this.client.chat.completions.create(requestOptions) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } + + const matcher = new XmlMatcher( + "think", + (chunk) => + ({ + type: chunk.matched ? "reasoning" : "text", + text: chunk.data, + }) as const, + ) + + let lastUsage + + for await (const chunk of stream) { + const delta = chunk.choices[0]?.delta ?? {} + + if (delta.content) { + for (const chunk of matcher.update(delta.content)) { + yield chunk + } + } + + if ("reasoning_content" in delta && delta.reasoning_content) { + yield { + type: "reasoning", + text: (delta.reasoning_content as string | undefined) || "", + } + } + if (chunk.usage) { + lastUsage = chunk.usage + } + } + + for (const chunk of matcher.final()) { + yield chunk + } + + if (lastUsage) { + yield this.processUsageMetrics(lastUsage, modelInfo) + } + } else { + // Non-streaming mode + const systemMessage: OpenAI.Chat.ChatCompletionUserMessageParam = { + role: "user", + content: systemPrompt, + } + + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { + model: modelId, + messages: [systemMessage, ...convertToOpenAiMessages(messages)], + } + + // Add max_tokens if needed + this.addMaxTokensIfNeeded(requestOptions, modelInfo) + + let response + try { + response = await this.client.chat.completions.create(requestOptions) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } + + yield { + type: "text", + text: response.choices[0]?.message.content || "", + } + + yield this.processUsageMetrics(response.usage, modelInfo) + } + } + + private async *handleO3FamilyMessage( + modelId: string, + systemPrompt: string, + messages: Anthropic.Messages.MessageParam[], + modelInfo: ModelInfo, + ): ApiStream { + if (!this.client) { + throw new Error("Codex CLI client not initialized") + } + + if (this.options.openAiStreamingEnabled ?? true) { + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { + model: modelId, + messages: [ + { + role: "developer", + content: `Formatting re-enabled\n${systemPrompt}`, + }, + ...convertToOpenAiMessages(messages), + ], + stream: true, + stream_options: { include_usage: true }, + reasoning_effort: modelInfo.reasoningEffort as "low" | "medium" | "high" | undefined, + temperature: undefined, + } + + this.addMaxTokensIfNeeded(requestOptions, modelInfo) + + let stream + try { + stream = await this.client.chat.completions.create(requestOptions) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } + + yield* this.handleStreamResponse(stream) + } else { + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { + model: modelId, + messages: [ + { + role: "developer", + content: `Formatting re-enabled\n${systemPrompt}`, + }, + ...convertToOpenAiMessages(messages), + ], + reasoning_effort: modelInfo.reasoningEffort as "low" | "medium" | "high" | undefined, + temperature: undefined, + } + + this.addMaxTokensIfNeeded(requestOptions, modelInfo) + + let response + try { + response = await this.client.chat.completions.create(requestOptions) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } + + yield { + type: "text", + text: response.choices[0]?.message.content || "", + } + yield this.processUsageMetrics(response.usage) + } + } + + private async *handleStreamResponse(stream: AsyncIterable): ApiStream { + for await (const chunk of stream) { + const delta = chunk.choices[0]?.delta + if (delta?.content) { + yield { + type: "text", + text: delta.content, + } + } + + if (chunk.usage) { + yield { + type: "usage", + inputTokens: chunk.usage.prompt_tokens || 0, + outputTokens: chunk.usage.completion_tokens || 0, + } + } + } + } + + protected processUsageMetrics(usage: any, _modelInfo?: ModelInfo): ApiStreamUsageChunk { + return { + type: "usage", + inputTokens: usage?.prompt_tokens || 0, + outputTokens: usage?.completion_tokens || 0, + cacheWriteTokens: usage?.cache_creation_input_tokens || undefined, + cacheReadTokens: usage?.cache_read_input_tokens || undefined, + } + } + + override getModel() { + const id = this.options.apiModelId ?? codexCliDefaultModelId + const info = codexCliModels[id as keyof typeof codexCliModels] ?? codexCliModelInfoSaneDefaults + const params = getModelParams({ format: "openai", modelId: id, model: info, settings: this.options }) + return { id, info, ...params } + } + + async completePrompt(prompt: string): Promise { + await this.initializeClient() + + if (!this.client) { + throw new Error("Codex CLI client not initialized. Please sign in first.") + } + + try { + const model = this.getModel() + const modelInfo = model.info + + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { + model: model.id, + messages: [{ role: "user", content: prompt }], + } + + // Add max_tokens if needed + this.addMaxTokensIfNeeded(requestOptions, modelInfo) + + let response + try { + response = await this.client.chat.completions.create(requestOptions) + } catch (error) { + throw handleOpenAIError(error, this.providerName) + } + + return response.choices[0]?.message.content || "" + } catch (error) { + if (error instanceof Error) { + throw new Error(`${this.providerName} completion error: ${error.message}`) + } + + throw error + } + } + + /** + * Adds max_completion_tokens to the request body if needed based on provider configuration + */ + protected addMaxTokensIfNeeded( + requestOptions: + | OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming + | OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming, + modelInfo: ModelInfo, + ): void { + // Only add max_completion_tokens if includeMaxTokens is true + if (this.options.includeMaxTokens === true) { + // Use user-configured modelMaxTokens if available, otherwise fall back to model's default maxTokens + requestOptions.max_completion_tokens = this.options.modelMaxTokens || modelInfo.maxTokens + } + } +} + +/** + * Get available models for Codex CLI + * Since it uses the same models as OpenAI, we return the predefined list + */ +export async function getCodexCliModels(): Promise { + return Object.keys(codexCliModels) +} diff --git a/src/api/providers/index.ts b/src/api/providers/index.ts index 85d877b6bc..902c9a14b3 100644 --- a/src/api/providers/index.ts +++ b/src/api/providers/index.ts @@ -4,6 +4,7 @@ export { AwsBedrockHandler } from "./bedrock" export { CerebrasHandler } from "./cerebras" export { ChutesHandler } from "./chutes" export { ClaudeCodeHandler } from "./claude-code" +export { CodexCliHandler } from "./codex-cli" export { DeepSeekHandler } from "./deepseek" export { DoubaoHandler } from "./doubao" export { MoonshotHandler } from "./moonshot" diff --git a/webview-ui/src/components/settings/providers/CodexCli.tsx b/webview-ui/src/components/settings/providers/CodexCli.tsx new file mode 100644 index 0000000000..ebff88b6bc --- /dev/null +++ b/webview-ui/src/components/settings/providers/CodexCli.tsx @@ -0,0 +1,187 @@ +import { useState, useCallback, useEffect } from "react" +import { VSCodeButton, VSCodeTextField } from "@vscode/webview-ui-toolkit/react" + +import type { ModelInfo, ProviderSettings } from "@roo-code/types" + +import { inputEventTransform } from "../transforms" + +type CodexCliProps = { + apiConfiguration: ProviderSettings + setApiConfigurationField: (field: keyof ProviderSettings, value: ProviderSettings[keyof ProviderSettings]) => void + selectedModelInfo?: ModelInfo +} + +export const CodexCli = ({ apiConfiguration, setApiConfigurationField }: CodexCliProps) => { + const [isAuthenticated, setIsAuthenticated] = useState(false) + const [isCheckingAuth, setIsCheckingAuth] = useState(false) + const [isSigningIn, setIsSigningIn] = useState(false) + const [isSigningOut, setIsSigningOut] = useState(false) + const [cliAvailable, setCliAvailable] = useState(null) + const [customPath, setCustomPath] = useState(apiConfiguration?.codexCliPath || "") + const [showCustomPath, setShowCustomPath] = useState(!!apiConfiguration?.codexCliPath) + + // Check authentication status on mount and when custom path changes + useEffect(() => { + const checkCliAvailability = async () => { + try { + // For now, assume CLI is available + // In a real implementation, this would check if the CLI is installed + setCliAvailable(true) + } catch (error) { + console.error("Failed to check CLI availability:", error) + setCliAvailable(false) + } + } + + const checkAuthStatus = async () => { + setIsCheckingAuth(true) + try { + // Check if we have a stored session token + if (apiConfiguration?.codexCliSessionToken) { + setIsAuthenticated(true) + } else { + setIsAuthenticated(false) + } + } catch (error) { + console.error("Failed to check auth status:", error) + setIsAuthenticated(false) + } finally { + setIsCheckingAuth(false) + } + } + + checkAuthStatus() + checkCliAvailability() + }, [customPath, apiConfiguration?.codexCliSessionToken]) + + const handleSignIn = async () => { + setIsSigningIn(true) + try { + // Simulate sign-in process + // In a real implementation, this would open a browser for authentication + setTimeout(() => { + // Simulate successful sign-in with a mock token + const mockToken = "mock-session-token-" + Date.now() + setApiConfigurationField("codexCliSessionToken", mockToken) + setIsAuthenticated(true) + setIsSigningIn(false) + }, 1000) + } catch (error) { + console.error("Failed to sign in:", error) + setIsSigningIn(false) + } + } + + const handleSignOut = async () => { + setIsSigningOut(true) + try { + // Clear the session token + setApiConfigurationField("codexCliSessionToken", undefined) + setIsAuthenticated(false) + } catch (error) { + console.error("Failed to sign out:", error) + } finally { + setIsSigningOut(false) + } + } + + const handleCustomPathChange = useCallback( + (event: any) => { + const value = inputEventTransform(event) + setCustomPath(value) + setApiConfigurationField("codexCliPath", value || undefined) + }, + [setApiConfigurationField], + ) + + return ( +
+ {/* Authentication Status */} +
+
+ Authentication Status: + {isCheckingAuth ? ( + Checking... + ) : ( + + {isAuthenticated ? "Signed In" : "Not Signed In"} + + )} +
+ + {/* Sign In/Out Buttons */} +
+ {!isAuthenticated ? ( + + {isSigningIn ? "Signing In..." : "Sign In"} + + ) : ( + + {isSigningOut ? "Signing Out..." : "Sign Out"} + + )} +
+
+ + {/* CLI Availability Status */} + {cliAvailable === false && ( +
+

+ Codex CLI not found in your system PATH. +

+

+ Please install the Codex CLI or provide a custom path below. +

+
+ )} + + {/* Custom CLI Path */} +
+ + + {showCustomPath && ( + + + + )} +
+ + {/* Information */} +
+

About Codex CLI

+
    +
  • • Same models and capabilities as OpenAI
  • +
  • • No API key management required
  • +
  • • Secure local authentication
  • +
  • • Automatic session management
  • +
+
+ + {/* Note about no API key */} +
+ This provider uses local authentication instead of API keys. Sign in once and your session will be + managed automatically. +
+
+ ) +} diff --git a/webview-ui/src/components/settings/providers/index.ts b/webview-ui/src/components/settings/providers/index.ts index fe0e6cecf9..ee38c0946f 100644 --- a/webview-ui/src/components/settings/providers/index.ts +++ b/webview-ui/src/components/settings/providers/index.ts @@ -3,6 +3,7 @@ export { Bedrock } from "./Bedrock" export { Cerebras } from "./Cerebras" export { Chutes } from "./Chutes" export { ClaudeCode } from "./ClaudeCode" +export { CodexCli } from "./CodexCli" export { DeepSeek } from "./DeepSeek" export { Doubao } from "./Doubao" export { Gemini } from "./Gemini" diff --git a/webview-ui/src/components/ui/hooks/useSelectedModel.ts b/webview-ui/src/components/ui/hooks/useSelectedModel.ts index f8a005e86a..9e78f41025 100644 --- a/webview-ui/src/components/ui/hooks/useSelectedModel.ts +++ b/webview-ui/src/components/ui/hooks/useSelectedModel.ts @@ -8,6 +8,8 @@ import { bedrockModels, cerebrasDefaultModelId, cerebrasModels, + codexCliDefaultModelId, + codexCliModels, deepSeekDefaultModelId, deepSeekModels, moonshotDefaultModelId, @@ -343,6 +345,11 @@ function getSelectedModel({ const info = qwenCodeModels[id as keyof typeof qwenCodeModels] return { id, info } } + case "codex-cli": { + const id = apiConfiguration.apiModelId ?? codexCliDefaultModelId + const info = codexCliModels[id as keyof typeof codexCliModels] + return { id, info } + } case "vercel-ai-gateway": { const id = apiConfiguration.vercelAiGatewayModelId ?? vercelAiGatewayDefaultModelId const info = routerModels["vercel-ai-gateway"]?.[id] @@ -352,7 +359,7 @@ function getSelectedModel({ // case "human-relay": // case "fake-ai": default: { - provider satisfies "anthropic" | "gemini-cli" | "qwen-code" | "human-relay" | "fake-ai" + provider satisfies "anthropic" | "gemini-cli" | "human-relay" | "fake-ai" const id = apiConfiguration.apiModelId ?? anthropicDefaultModelId const baseInfo = anthropicModels[id as keyof typeof anthropicModels]