From b5aa525dbb20a8072f08e544140c84245c13efe2 Mon Sep 17 00:00:00 2001 From: Aaron Diamond-Reivich Date: Fri, 6 Feb 2026 12:52:36 -0500 Subject: [PATCH] mito-ai: gpt-5.3-codex support --- mito-ai/mito_ai/tests/test_model_utils.py | 9 ++- mito-ai/mito_ai/utils/model_utils.py | 2 + mito-ai/mito_ai/utils/open_ai_utils.py | 2 +- mito-ai/src/components/ModelSelector.tsx | 55 +++++++++++-------- .../src/tests/AiChat/ModelSelector.test.tsx | 28 ++++++++++ mito-ai/src/utils/models.ts | 4 ++ 6 files changed, 73 insertions(+), 27 deletions(-) diff --git a/mito-ai/mito_ai/tests/test_model_utils.py b/mito-ai/mito_ai/tests/test_model_utils.py index 604a5c79f..ccca998f3 100644 --- a/mito-ai/mito_ai/tests/test_model_utils.py +++ b/mito-ai/mito_ai/tests/test_model_utils.py @@ -111,6 +111,11 @@ def test_openai_gpt_5_2_returns_gpt_4_1(self): """Test that GPT 5.2 returns GPT 4.1 (fastest OpenAI model).""" result = get_fast_model_for_selected_model("gpt-5.2") assert result == "gpt-4.1" + + def test_openai_gpt_5_3_codex_returns_gpt_4_1(self): + """Test that GPT 5.3 Codex returns GPT 4.1 (fastest OpenAI model).""" + result = get_fast_model_for_selected_model("gpt-5.3-codex") + assert result == "gpt-4.1" def test_gemini_pro_returns_flash(self): """Test that Gemini Pro returns Gemini Flash (fastest Gemini model).""" @@ -169,7 +174,7 @@ def test_gemini_flash_returns_flash(self): ( "litellm/openai/gpt-5.2", [ - "litellm/openai/gpt-5.2", # Index 1 in OPENAI_MODEL_ORDER + "litellm/openai/gpt-5.2", # Index 2 in OPENAI_MODEL_ORDER "litellm/anthropic/claude-haiku-4-5-20251001", # Index 0 in ANTHROPIC_MODEL_ORDER ], "litellm/anthropic/claude-haiku-4-5-20251001", @@ -331,7 +336,7 @@ def test_case_insensitive_provider_matching(self): ( "Abacus/gpt-5.2", [ - "Abacus/gpt-5.2", # Index 1 in OPENAI_MODEL_ORDER + "Abacus/gpt-5.2", # Index 2 in OPENAI_MODEL_ORDER "Abacus/claude-haiku-4-5-20251001", # Index 0 in ANTHROPIC_MODEL_ORDER ], "Abacus/claude-haiku-4-5-20251001", diff --git a/mito-ai/mito_ai/utils/model_utils.py b/mito-ai/mito_ai/utils/model_utils.py index 11c7325c9..504688e98 100644 --- a/mito-ai/mito_ai/utils/model_utils.py +++ b/mito-ai/mito_ai/utils/model_utils.py @@ -15,6 +15,7 @@ "gpt-4.1", # Fastest "gpt-5", "gpt-5.2", # Slower + "gpt-5.3-codex", ] GEMINI_MODEL_ORDER = [ @@ -26,6 +27,7 @@ STANDARD_MODELS = [ "gpt-4.1", "gpt-5.2", + "gpt-5.3-codex", "claude-haiku-4-5-20251001", "gemini-3-flash-preview", "gemini-3-pro-preview", diff --git a/mito-ai/mito_ai/utils/open_ai_utils.py b/mito-ai/mito_ai/utils/open_ai_utils.py index 19679b4c5..2416137ba 100644 --- a/mito-ai/mito_ai/utils/open_ai_utils.py +++ b/mito-ai/mito_ai/utils/open_ai_utils.py @@ -152,7 +152,7 @@ def get_open_ai_completion_function_params( "messages": messages, } - if model == "gpt-5.2": + if model == "gpt-5.2" or model == "gpt-5.3-codex": completion_function_params["reasoning_effort"] = "low" # If a response format is provided, we need to convert it to a json schema. diff --git a/mito-ai/src/components/ModelSelector.tsx b/mito-ai/src/components/ModelSelector.tsx index 2f6bf10f2..6d82b2364 100644 --- a/mito-ai/src/components/ModelSelector.tsx +++ b/mito-ai/src/components/ModelSelector.tsx @@ -16,6 +16,8 @@ import { GPT_4_1_MODEL_NAME, GPT_5_2_DISPLAY_NAME, GPT_5_2_MODEL_NAME, + GPT_5_3_CODEX_DISPLAY_NAME, + GPT_5_3_CODEX_MODEL_NAME, GEMINI_3_FLASH_MODEL_NAME, GEMINI_3_FLASH_DISPLAY_NAME, GEMINI_3_PRO_DISPLAY_NAME, @@ -73,6 +75,16 @@ const MODEL_MAPPINGS: ModelMapping[] = [ speed: 'Fast', complexityHandling: 'Medium' }, + { + displayName: GPT_5_3_CODEX_DISPLAY_NAME, + fullName: GPT_5_3_CODEX_MODEL_NAME, + type: 'smart', + goodFor: [...GOOD_FOR_SMART], + provider: 'OpenAI', + tokenLimit: '400K', + speed: 'Slow', + complexityHandling: 'High' + }, { displayName: CLAUDE_HAIKU_DISPLAY_NAME, fullName: CLAUDE_HAIKU_MODEL_NAME, @@ -215,23 +227,22 @@ const ModelSelector: React.FC = ({ onConfigChange }) => { void fetchModels(); }, [onConfigChange]); - const handleModelChange = (modelName: string): void => { - if (!modelName) { + const handleModelChange = (displayName: string, actualModelId?: string): void => { + if (!displayName) { return; } - setSelectedModel(modelName); + setSelectedModel(displayName); setIsOpen(false); - // For LiteLLM models (with provider prefix), modelName is already the full model name - // For standard models, we need to find the full name from MODEL_MAPPINGS + // Use actual model id from API when provided (e.g. router-prefixed); otherwise resolve from display name let fullModelName: string; - if (modelName.includes('/')) { - // LiteLLM model - use model name directly - fullModelName = modelName; + if (actualModelId !== undefined && actualModelId !== '') { + fullModelName = actualModelId; + } else if (displayName.includes('/')) { + fullModelName = displayName; } else { - // Standard model - find full name from MODEL_MAPPINGS - fullModelName = MODEL_MAPPINGS.find(m => m.displayName === modelName)?.fullName || modelName; + fullModelName = MODEL_MAPPINGS.find(m => m.displayName === displayName)?.fullName || displayName; } const newConfig = { @@ -298,19 +309,15 @@ const ModelSelector: React.FC = ({ onConfigChange }) => {
Loading models...
) : ( availableModels.map(modelName => { - // Check if it's a LiteLLM model (has provider prefix) - const isLiteLLMModel = modelName.includes('/'); - let displayName: string; - let modelMapping: ModelMapping | undefined; - - if (isLiteLLMModel) { - // LiteLLM model - use model name directly as display name - displayName = modelName; - } else { - // Standard model - find display name from MODEL_MAPPINGS - modelMapping = MODEL_MAPPINGS.find(m => m.fullName === modelName); - displayName = modelMapping?.displayName || modelName; - } + // Check if it's a LiteLLM/Abacus model (has provider prefix) + const isRouterModel = modelName.includes('/'); + const baseModelName = isRouterModel ? (modelName.split('/').pop() ?? modelName) : modelName; + // Resolve mapping by exact match, then by base name (for router-prefixed names), then case-insensitive + let modelMapping: ModelMapping | undefined = MODEL_MAPPINGS.find(m => m.fullName === modelName) + || MODEL_MAPPINGS.find(m => m.fullName === baseModelName) + || MODEL_MAPPINGS.find(m => m.fullName.toLowerCase() === modelName.toLowerCase()) + || MODEL_MAPPINGS.find(m => m.fullName.toLowerCase() === baseModelName.toLowerCase()); + const displayName = modelMapping?.displayName || (isRouterModel ? modelName : baseModelName); return (
= ({ onConfigChange }) => { className={`model-option ${displayName === selectedModel ? 'selected' : ''}`} onClick={(e) => { e.stopPropagation(); - handleModelChange(displayName); + handleModelChange(displayName, modelName); }} onMouseEnter={() => setHoveredModel(modelMapping || null)} data-testid="model-option" diff --git a/mito-ai/src/tests/AiChat/ModelSelector.test.tsx b/mito-ai/src/tests/AiChat/ModelSelector.test.tsx index d4ddc4d38..bcf071eab 100644 --- a/mito-ai/src/tests/AiChat/ModelSelector.test.tsx +++ b/mito-ai/src/tests/AiChat/ModelSelector.test.tsx @@ -12,6 +12,8 @@ import { GPT_4_1_DISPLAY_NAME, GPT_4_1_MODEL_NAME, GPT_5_2_MODEL_NAME, + GPT_5_3_CODEX_DISPLAY_NAME, + GPT_5_3_CODEX_MODEL_NAME, CLAUDE_HAIKU_MODEL_NAME, GEMINI_3_FLASH_MODEL_NAME, GEMINI_3_PRO_MODEL_NAME, @@ -42,6 +44,7 @@ describe('ModelSelector', () => { models: [ GPT_4_1_MODEL_NAME, GPT_5_2_MODEL_NAME, + GPT_5_3_CODEX_MODEL_NAME, CLAUDE_HAIKU_MODEL_NAME, GEMINI_3_FLASH_MODEL_NAME, GEMINI_3_PRO_MODEL_NAME, @@ -94,6 +97,30 @@ describe('ModelSelector', () => { }); }); + it('shows GPT 5.3 Codex in dropdown and calls onConfigChange when selected', async () => { + render(); + + await waitFor(() => { + expect(screen.queryByText('Loading models...')).not.toBeInTheDocument(); + }); + + const dropdown = screen.getByText(DEFAULT_MODEL).closest('.model-selector-dropdown'); + if (!dropdown) throw new Error('Dropdown element not found'); + fireEvent.click(dropdown); + + const modelOptionsContainer = await waitFor(() => { + return screen.getByTestId('model-selector').querySelector('.model-options'); + }); + if (!modelOptionsContainer) throw new Error('Model options container not found'); + + const modelOption = within(modelOptionsContainer as HTMLElement).getByText(GPT_5_3_CODEX_DISPLAY_NAME); + fireEvent.click(modelOption); + + expect(mockOnConfigChange).toHaveBeenCalledWith({ + model: GPT_5_3_CODEX_MODEL_NAME + }); + }); + it('defaults to default model when no storedConfig exists and GPT 4.1 is first in available models', async () => { // Mock models with GPT 4.1 first (simulating the bug scenario) mockRequestAPI.mockResolvedValue({ @@ -101,6 +128,7 @@ describe('ModelSelector', () => { models: [ GPT_4_1_MODEL_NAME, GPT_5_2_MODEL_NAME, + GPT_5_3_CODEX_MODEL_NAME, CLAUDE_HAIKU_MODEL_NAME, GEMINI_3_FLASH_MODEL_NAME, GEMINI_3_PRO_MODEL_NAME, diff --git a/mito-ai/src/utils/models.ts b/mito-ai/src/utils/models.ts index ffbf727c4..f7c21ef02 100644 --- a/mito-ai/src/utils/models.ts +++ b/mito-ai/src/utils/models.ts @@ -15,6 +15,9 @@ export const GPT_4_1_MODEL_NAME = 'gpt-4.1'; export const GPT_5_2_DISPLAY_NAME = 'GPT 5.2'; export const GPT_5_2_MODEL_NAME = 'gpt-5.2'; +export const GPT_5_3_CODEX_DISPLAY_NAME = 'GPT 5.3 Codex'; +export const GPT_5_3_CODEX_MODEL_NAME = 'gpt-5.3-codex'; + export const GEMINI_3_FLASH_DISPLAY_NAME = 'Gemini 3 Flash'; export const GEMINI_3_FLASH_MODEL_NAME = 'gemini-3-flash-preview'; @@ -40,6 +43,7 @@ export async function getAvailableModels(): Promise { return [ GPT_4_1_MODEL_NAME, GPT_5_2_MODEL_NAME, + GPT_5_3_CODEX_MODEL_NAME, CLAUDE_HAIKU_MODEL_NAME, GEMINI_3_FLASH_MODEL_NAME, GEMINI_3_PRO_MODEL_NAME,