Skip to content

Commit 656b4c2

Browse files
hydropixclaude
andcommitted
Fix: Load models dynamically for all OpenAI-compatible endpoints
Fixes #93 - Remove isLocal logic that only loaded models for localhost/127.0.0.1 - Always attempt to fetch models dynamically from any OpenAI-compatible endpoint - Fall back to static OpenAI model list only when fetch fails - Simplify code by removing conditional logic (~50 lines removed) Tested with: - LM Studio (localhost) - Remote Ollama server with OpenAI-compatible endpoint - Graceful fallback when endpoint unavailable Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
1 parent 9d651eb commit 656b4c2

File tree

2 files changed

+52
-120
lines changed

2 files changed

+52
-120
lines changed

src/api/blueprints/config_routes.py

Lines changed: 21 additions & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -220,8 +220,8 @@ def _get_openrouter_models(provided_api_key=None):
220220
def _get_openai_models(provided_api_key=None, api_endpoint=None):
221221
"""Get available models from OpenAI-compatible API
222222
223-
For local servers (llama.cpp, LM Studio, vLLM, etc.), fetches models dynamically.
224-
For official OpenAI API, returns a static list of common models.
223+
Always tries to fetch models dynamically from any OpenAI-compatible endpoint.
224+
Falls back to static list if dynamic fetch fails.
225225
"""
226226
api_key = _resolve_api_key(provided_api_key, 'OPENAI_API_KEY', OPENAI_API_KEY)
227227

@@ -232,10 +232,7 @@ def _get_openai_models(provided_api_key=None, api_endpoint=None):
232232
else:
233233
base_url = 'https://api.openai.com/v1'
234234

235-
# Check if this is a local server (llama.cpp, LM Studio, vLLM, etc.)
236-
is_local = 'localhost' in base_url or '127.0.0.1' in base_url
237-
238-
# Static list of OpenAI models (fallback for official API)
235+
# Static list of OpenAI models (fallback)
239236
openai_static_models = [
240237
{'id': 'gpt-4o', 'name': 'GPT-4o (Latest)'},
241238
{'id': 'gpt-4o-mini', 'name': 'GPT-4o Mini'},
@@ -251,7 +248,7 @@ def _get_openai_models(provided_api_key=None, api_endpoint=None):
251248
headers['Authorization'] = f'Bearer {api_key}'
252249

253250
if DEBUG_MODE:
254-
logger.debug(f"📥 Fetching models from OpenAI-compatible server: {models_url}")
251+
logger.debug(f"📥 Fetching models from OpenAI-compatible endpoint: {models_url}")
255252

256253
response = requests.get(models_url, headers=headers, timeout=10)
257254

@@ -285,91 +282,32 @@ def _get_openai_models(provided_api_key=None, api_endpoint=None):
285282
"model_names": model_ids,
286283
"default": default_model,
287284
"status": "openai_connected",
288-
"count": len(models),
289-
"is_local": is_local
285+
"count": len(models)
290286
})
291287

292288
# If we get here, either request failed or no models returned
293-
# For local servers, return error; for OpenAI, return static list
294-
if is_local:
295-
error_msg = f"Could not connect to local server at {base_url}. Make sure your OpenAI-compatible server (llama.cpp, LM Studio, vLLM, etc.) is running."
296-
return jsonify({
297-
"models": [],
298-
"model_names": [],
299-
"default": "",
300-
"status": "openai_error",
301-
"count": 0,
302-
"error": error_msg,
303-
"is_local": True
304-
})
305-
else:
306-
# Return static OpenAI models
307-
model_ids = [m['id'] for m in openai_static_models]
308-
return jsonify({
309-
"models": openai_static_models,
310-
"model_names": model_ids,
311-
"default": "gpt-4o",
312-
"status": "openai_static",
313-
"count": len(openai_static_models),
314-
"is_local": False
315-
})
316-
317-
except requests.exceptions.ConnectionError:
318-
if is_local:
319-
error_msg = f"Connection refused to {base_url}. Is your OpenAI-compatible server running?"
320-
else:
321-
error_msg = f"Connection error to OpenAI API"
322-
289+
# Fall back to static list
323290
if DEBUG_MODE:
324-
logger.debug(f"❌ OpenAI-compatible server connection error: {error_msg}")
291+
logger.debug(f"⚠️ No models returned from {base_url}, using fallback list")
325292

326-
# For official OpenAI, return static list even on error
327-
if not is_local:
328-
model_ids = [m['id'] for m in openai_static_models]
329-
return jsonify({
330-
"models": openai_static_models,
331-
"model_names": model_ids,
332-
"default": "gpt-4o",
333-
"status": "openai_static",
334-
"count": len(openai_static_models),
335-
"is_local": False
336-
})
337-
338-
return jsonify({
339-
"models": [],
340-
"model_names": [],
341-
"default": "",
342-
"status": "openai_error",
343-
"count": 0,
344-
"error": error_msg,
345-
"is_local": True
346-
})
293+
except requests.exceptions.ConnectionError as e:
294+
error_msg = f"Connection error to {base_url}"
295+
if DEBUG_MODE:
296+
logger.debug(f"❌ OpenAI-compatible endpoint connection error: {error_msg}")
347297

348298
except Exception as e:
349299
if DEBUG_MODE:
350-
logger.debug(f"❌ OpenAI-compatible server error: {e}")
351-
352-
# For official OpenAI, return static list even on error
353-
if not is_local:
354-
model_ids = [m['id'] for m in openai_static_models]
355-
return jsonify({
356-
"models": openai_static_models,
357-
"model_names": model_ids,
358-
"default": "gpt-4o",
359-
"status": "openai_static",
360-
"count": len(openai_static_models),
361-
"is_local": False
362-
})
300+
logger.debug(f"❌ OpenAI-compatible endpoint error: {e}")
363301

364-
return jsonify({
365-
"models": [],
366-
"model_names": [],
367-
"default": "",
368-
"status": "openai_error",
369-
"count": 0,
370-
"error": str(e),
371-
"is_local": True
372-
})
302+
# Fallback: return static OpenAI models
303+
model_ids = [m['id'] for m in openai_static_models]
304+
return jsonify({
305+
"models": openai_static_models,
306+
"model_names": model_ids,
307+
"default": "gpt-4o",
308+
"status": "openai_static",
309+
"count": len(openai_static_models)
310+
})
373311

374312
def _get_gemini_models(provided_api_key=None):
375313
"""Get available models from Gemini API"""

src/web/static/js/providers/provider-manager.js

Lines changed: 31 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -427,59 +427,53 @@ export const ProviderManager = {
427427

428428
/**
429429
* Load OpenAI-compatible models dynamically
430-
* For local servers (llama.cpp, LM Studio, vLLM, etc.): fetches models from the local server
431-
* For OpenAI: uses static list (dynamic fetch requires valid API key)
430+
* Always tries to fetch models dynamically from any OpenAI-compatible endpoint.
431+
* Falls back to static list if dynamic fetch fails.
432432
*/
433433
async loadOpenAIModels() {
434434
const modelSelect = DomHelpers.getElement('model');
435435
if (!modelSelect) return;
436436

437-
// Get API endpoint to determine if it's a local server or OpenAI cloud
438437
const apiEndpoint = DomHelpers.getValue('openaiEndpoint') || 'https://api.openai.com/v1/chat/completions';
439-
const isLocal = apiEndpoint.includes('localhost') || apiEndpoint.includes('127.0.0.1');
440438

439+
modelSelect.innerHTML = '<option value="">Loading models...</option>';
441440
StatusManager.setChecking();
442441

443-
if (isLocal) {
444-
// Local server (llama.cpp, LM Studio, vLLM, etc.): try to fetch models dynamically
445-
modelSelect.innerHTML = '<option value="">Loading models from local server...</option>';
442+
try {
443+
const apiKey = ApiKeyUtils.getValue('openaiApiKey');
444+
const data = await ApiClient.getModels('openai', { apiKey, apiEndpoint });
446445

447-
try {
448-
const apiKey = ApiKeyUtils.getValue('openaiApiKey');
449-
const data = await ApiClient.getModels('openai', { apiKey, apiEndpoint });
446+
if (data.models && data.models.length > 0) {
447+
MessageLogger.showMessage('', '');
450448

451-
if (data.models && data.models.length > 0) {
452-
MessageLogger.showMessage('', '');
449+
// Format models for the dropdown
450+
const formattedModels = data.models.map(m => ({
451+
value: m.id,
452+
label: m.name || m.id
453+
}));
453454

454-
// Format models for the dropdown
455-
const formattedModels = data.models.map(m => ({
456-
value: m.id,
457-
label: m.name || m.id
458-
}));
455+
const envModelApplied = populateModelSelect(formattedModels, data.default, 'openai');
456+
MessageLogger.addLog(`✅ ${data.count} model(s) loaded from OpenAI-compatible endpoint`);
459457

460-
const envModelApplied = populateModelSelect(formattedModels, data.default, 'openai');
461-
MessageLogger.addLog(`✅ ${data.count} model(s) loaded from local server`);
458+
if (envModelApplied && data.default) {
459+
SettingsManager.markEnvModelApplied();
460+
}
462461

463-
if (envModelApplied && data.default) {
464-
SettingsManager.markEnvModelApplied();
465-
}
462+
SettingsManager.applyPendingModelSelection();
463+
ModelDetector.checkAndShowRecommendation();
466464

467-
SettingsManager.applyPendingModelSelection();
468-
ModelDetector.checkAndShowRecommendation();
469-
470-
StateManager.setState('models.availableModels', formattedModels.map(m => m.value));
471-
StatusManager.setConnected('openai', data.count);
472-
return;
473-
} else {
474-
// Local server not running or no models
475-
const errorMsg = data.error || 'Local server not accessible';
476-
MessageLogger.showMessage(`⚠️ ${errorMsg}`, 'warning');
477-
MessageLogger.addLog(`⚠️ ${errorMsg}. Using fallback OpenAI models.`);
478-
}
479-
} catch (error) {
480-
MessageLogger.showMessage(`⚠️ Could not connect to local server. Using fallback models.`, 'warning');
481-
MessageLogger.addLog(`⚠️ Local server connection error: ${error.message}`);
465+
StateManager.setState('models.availableModels', formattedModels.map(m => m.value));
466+
StatusManager.setConnected('openai', data.count);
467+
return;
468+
} else {
469+
// No models returned from endpoint
470+
const errorMsg = data.error || 'No models available from endpoint';
471+
MessageLogger.showMessage(`⚠️ ${errorMsg}. Using fallback OpenAI models.`, 'warning');
472+
MessageLogger.addLog(`⚠️ ${errorMsg}. Using fallback list.`);
482473
}
474+
} catch (error) {
475+
MessageLogger.showMessage(`⚠️ Could not connect to endpoint. Using fallback OpenAI models.`, 'warning');
476+
MessageLogger.addLog(`⚠️ Connection error: ${error.message}. Using fallback list.`);
483477
}
484478

485479
// Fallback: use static OpenAI models list

0 commit comments

Comments
 (0)