Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions mito-ai/mito_ai/tests/test_model_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,11 @@ def test_openai_gpt_5_2_returns_gpt_4_1(self):
"""Test that GPT 5.2 returns GPT 4.1 (fastest OpenAI model)."""
result = get_fast_model_for_selected_model("gpt-5.2")
assert result == "gpt-4.1"

def test_openai_gpt_5_3_codex_returns_gpt_4_1(self):
"""Test that GPT 5.3 Codex returns GPT 4.1 (fastest OpenAI model)."""
result = get_fast_model_for_selected_model("gpt-5.3-codex")
assert result == "gpt-4.1"

def test_gemini_pro_returns_flash(self):
"""Test that Gemini Pro returns Gemini Flash (fastest Gemini model)."""
Expand Down Expand Up @@ -169,7 +174,7 @@ def test_gemini_flash_returns_flash(self):
(
"litellm/openai/gpt-5.2",
[
"litellm/openai/gpt-5.2", # Index 1 in OPENAI_MODEL_ORDER
"litellm/openai/gpt-5.2", # Index 2 in OPENAI_MODEL_ORDER
"litellm/anthropic/claude-haiku-4-5-20251001", # Index 0 in ANTHROPIC_MODEL_ORDER
],
"litellm/anthropic/claude-haiku-4-5-20251001",
Expand Down Expand Up @@ -331,7 +336,7 @@ def test_case_insensitive_provider_matching(self):
(
"Abacus/gpt-5.2",
[
"Abacus/gpt-5.2", # Index 1 in OPENAI_MODEL_ORDER
"Abacus/gpt-5.2", # Index 2 in OPENAI_MODEL_ORDER
"Abacus/claude-haiku-4-5-20251001", # Index 0 in ANTHROPIC_MODEL_ORDER
],
"Abacus/claude-haiku-4-5-20251001",
Expand Down
2 changes: 2 additions & 0 deletions mito-ai/mito_ai/utils/model_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
"gpt-4.1", # Fastest
"gpt-5",
"gpt-5.2", # Slower
"gpt-5.3-codex",
]

GEMINI_MODEL_ORDER = [
Expand All @@ -26,6 +27,7 @@
STANDARD_MODELS = [
"gpt-4.1",
"gpt-5.2",
"gpt-5.3-codex",
"claude-haiku-4-5-20251001",
"gemini-3-flash-preview",
"gemini-3-pro-preview",
Expand Down
2 changes: 1 addition & 1 deletion mito-ai/mito_ai/utils/open_ai_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def get_open_ai_completion_function_params(
"messages": messages,
}

if model == "gpt-5.2":
if model == "gpt-5.2" or model == "gpt-5.3-codex":
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Router-prefixed models miss reasoning_effort parameter setting

Medium Severity

The condition model == "gpt-5.2" or model == "gpt-5.3-codex" uses exact string matching, which fails for router-prefixed models like "Abacus/gpt-5.3-codex". In openai_client.py, the router prefix is stripped in _adjust_model_for_provider AFTER get_open_ai_completion_function_params is called, so Abacus-configured enterprise users using gpt-5.3-codex won't get the reasoning_effort parameter set, causing inconsistent behavior compared to standard deployments.

Fix in Cursor Fix in Web

completion_function_params["reasoning_effort"] = "low"

# If a response format is provided, we need to convert it to a json schema.
Expand Down
55 changes: 31 additions & 24 deletions mito-ai/src/components/ModelSelector.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
GPT_4_1_MODEL_NAME,
GPT_5_2_DISPLAY_NAME,
GPT_5_2_MODEL_NAME,
GPT_5_3_CODEX_DISPLAY_NAME,
GPT_5_3_CODEX_MODEL_NAME,
GEMINI_3_FLASH_MODEL_NAME,
GEMINI_3_FLASH_DISPLAY_NAME,
GEMINI_3_PRO_DISPLAY_NAME,
Expand Down Expand Up @@ -73,6 +75,16 @@
speed: 'Fast',
complexityHandling: 'Medium'
},
{
displayName: GPT_5_3_CODEX_DISPLAY_NAME,
fullName: GPT_5_3_CODEX_MODEL_NAME,
type: 'smart',
goodFor: [...GOOD_FOR_SMART],
provider: 'OpenAI',
tokenLimit: '400K',
speed: 'Slow',
complexityHandling: 'High'
},
{
displayName: CLAUDE_HAIKU_DISPLAY_NAME,
fullName: CLAUDE_HAIKU_MODEL_NAME,
Expand Down Expand Up @@ -215,23 +227,22 @@
void fetchModels();
}, [onConfigChange]);

const handleModelChange = (modelName: string): void => {
if (!modelName) {
const handleModelChange = (displayName: string, actualModelId?: string): void => {
if (!displayName) {
return;
}

setSelectedModel(modelName);
setSelectedModel(displayName);
setIsOpen(false);

// For LiteLLM models (with provider prefix), modelName is already the full model name
// For standard models, we need to find the full name from MODEL_MAPPINGS
// Use actual model id from API when provided (e.g. router-prefixed); otherwise resolve from display name
let fullModelName: string;
if (modelName.includes('/')) {
// LiteLLM model - use model name directly
fullModelName = modelName;
if (actualModelId !== undefined && actualModelId !== '') {
fullModelName = actualModelId;
} else if (displayName.includes('/')) {
fullModelName = displayName;
} else {
// Standard model - find full name from MODEL_MAPPINGS
fullModelName = MODEL_MAPPINGS.find(m => m.displayName === modelName)?.fullName || modelName;
fullModelName = MODEL_MAPPINGS.find(m => m.displayName === displayName)?.fullName || displayName;
}

const newConfig = {
Expand Down Expand Up @@ -298,27 +309,23 @@
<div className="model-option">Loading models...</div>
) : (
availableModels.map(modelName => {
// Check if it's a LiteLLM model (has provider prefix)
const isLiteLLMModel = modelName.includes('/');
let displayName: string;
let modelMapping: ModelMapping | undefined;

if (isLiteLLMModel) {
// LiteLLM model - use model name directly as display name
displayName = modelName;
} else {
// Standard model - find display name from MODEL_MAPPINGS
modelMapping = MODEL_MAPPINGS.find(m => m.fullName === modelName);
displayName = modelMapping?.displayName || modelName;
}
// Check if it's a LiteLLM/Abacus model (has provider prefix)
const isRouterModel = modelName.includes('/');
const baseModelName = isRouterModel ? (modelName.split('/').pop() ?? modelName) : modelName;
// Resolve mapping by exact match, then by base name (for router-prefixed names), then case-insensitive
let modelMapping: ModelMapping | undefined = MODEL_MAPPINGS.find(m => m.fullName === modelName)

Check failure on line 316 in mito-ai/src/components/ModelSelector.tsx

View workflow job for this annotation

GitHub Actions / Run linters

'modelMapping' is never reassigned. Use 'const' instead
|| MODEL_MAPPINGS.find(m => m.fullName === baseModelName)
|| MODEL_MAPPINGS.find(m => m.fullName.toLowerCase() === modelName.toLowerCase())
|| MODEL_MAPPINGS.find(m => m.fullName.toLowerCase() === baseModelName.toLowerCase());
const displayName = modelMapping?.displayName || (isRouterModel ? modelName : baseModelName);

return (
<div
key={modelName}
className={`model-option ${displayName === selectedModel ? 'selected' : ''}`}
onClick={(e) => {
e.stopPropagation();
handleModelChange(displayName);
handleModelChange(displayName, modelName);
}}
onMouseEnter={() => setHoveredModel(modelMapping || null)}
data-testid="model-option"
Expand Down
28 changes: 28 additions & 0 deletions mito-ai/src/tests/AiChat/ModelSelector.test.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ import {
GPT_4_1_DISPLAY_NAME,
GPT_4_1_MODEL_NAME,
GPT_5_2_MODEL_NAME,
GPT_5_3_CODEX_DISPLAY_NAME,
GPT_5_3_CODEX_MODEL_NAME,
CLAUDE_HAIKU_MODEL_NAME,
GEMINI_3_FLASH_MODEL_NAME,
GEMINI_3_PRO_MODEL_NAME,
Expand Down Expand Up @@ -42,6 +44,7 @@ describe('ModelSelector', () => {
models: [
GPT_4_1_MODEL_NAME,
GPT_5_2_MODEL_NAME,
GPT_5_3_CODEX_MODEL_NAME,
CLAUDE_HAIKU_MODEL_NAME,
GEMINI_3_FLASH_MODEL_NAME,
GEMINI_3_PRO_MODEL_NAME,
Expand Down Expand Up @@ -94,13 +97,38 @@ describe('ModelSelector', () => {
});
});

it('shows GPT 5.3 Codex in dropdown and calls onConfigChange when selected', async () => {
render(<ModelSelector onConfigChange={mockOnConfigChange} />);

await waitFor(() => {
expect(screen.queryByText('Loading models...')).not.toBeInTheDocument();
});

const dropdown = screen.getByText(DEFAULT_MODEL).closest('.model-selector-dropdown');
if (!dropdown) throw new Error('Dropdown element not found');
fireEvent.click(dropdown);

const modelOptionsContainer = await waitFor(() => {
return screen.getByTestId('model-selector').querySelector('.model-options');
});
if (!modelOptionsContainer) throw new Error('Model options container not found');

const modelOption = within(modelOptionsContainer as HTMLElement).getByText(GPT_5_3_CODEX_DISPLAY_NAME);
fireEvent.click(modelOption);

expect(mockOnConfigChange).toHaveBeenCalledWith({
model: GPT_5_3_CODEX_MODEL_NAME
});
});

it('defaults to default model when no storedConfig exists and GPT 4.1 is first in available models', async () => {
// Mock models with GPT 4.1 first (simulating the bug scenario)
mockRequestAPI.mockResolvedValue({
data: {
models: [
GPT_4_1_MODEL_NAME,
GPT_5_2_MODEL_NAME,
GPT_5_3_CODEX_MODEL_NAME,
CLAUDE_HAIKU_MODEL_NAME,
GEMINI_3_FLASH_MODEL_NAME,
GEMINI_3_PRO_MODEL_NAME,
Expand Down
4 changes: 4 additions & 0 deletions mito-ai/src/utils/models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@ export const GPT_4_1_MODEL_NAME = 'gpt-4.1';
export const GPT_5_2_DISPLAY_NAME = 'GPT 5.2';
export const GPT_5_2_MODEL_NAME = 'gpt-5.2';

export const GPT_5_3_CODEX_DISPLAY_NAME = 'GPT 5.3 Codex';
export const GPT_5_3_CODEX_MODEL_NAME = 'gpt-5.3-codex';

export const GEMINI_3_FLASH_DISPLAY_NAME = 'Gemini 3 Flash';
export const GEMINI_3_FLASH_MODEL_NAME = 'gemini-3-flash-preview';

Expand All @@ -40,6 +43,7 @@ export async function getAvailableModels(): Promise<string[]> {
return [
GPT_4_1_MODEL_NAME,
GPT_5_2_MODEL_NAME,
GPT_5_3_CODEX_MODEL_NAME,
CLAUDE_HAIKU_MODEL_NAME,
GEMINI_3_FLASH_MODEL_NAME,
GEMINI_3_PRO_MODEL_NAME,
Expand Down
Loading