Skip to content

Commit d53a5a0

Browse files
committed
refactor: centralize AI model defaults via YAML catalog
Introduce pkg/ai/models catalog to load provider metadata from a single YAML source (with env/override support) and expose a models_catalog operation. Update manager, generator, and universal strategy to consume the catalog instead of hard-coded lists. Frontend now fetches the same catalog via aiService, removes local mocks, and adjusts dropdown fallbacks. Tests cover catalog loading and new client flows.
1 parent d3858ad commit d53a5a0

File tree

15 files changed

+574
-166
lines changed

15 files changed

+574
-166
lines changed

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,9 @@ temp/
7777

7878
# AI model files and caches
7979
models/
80+
!pkg/ai/models/
81+
!pkg/ai/models/*.yaml
82+
!pkg/ai/models/*.go
8083
*.model
8184
*.cache
8285
*.pkl

frontend/src/components/AISettingsPanel.vue

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -144,11 +144,7 @@
144144
</el-option>
145145
</template>
146146
<template v-else>
147-
<el-option value="gpt-5" label="GPT-5 ⭐ Recommended" />
148-
<el-option value="gpt-5-mini" label="GPT-5 Mini" />
149-
<el-option value="gpt-5-nano" label="GPT-5 Nano" />
150-
<el-option value="gpt-5-pro" label="GPT-5 Pro" />
151-
<el-option value="gpt-4.1" label="GPT-4.1" />
147+
<el-option disabled value="no-models" :label="t('ai.welcome.noModels')" />
152148
</template>
153149
</el-select>
154150
<el-button
@@ -218,8 +214,7 @@
218214
</el-option>
219215
</template>
220216
<template v-else>
221-
<el-option value="deepseek-reasoner" label="DeepSeek Reasoner ⭐" />
222-
<el-option value="deepseek-chat" label="DeepSeek Chat" />
217+
<el-option disabled value="no-models" :label="t('ai.welcome.noModels')" />
223218
</template>
224219
</el-select>
225220
<el-button

frontend/src/composables/useAIChat.ts

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -125,8 +125,24 @@ export function useAIChat(_context: AppContext) {
125125
}
126126
} catch (error) {
127127
console.error('Failed to fetch models:', error)
128-
// Use mock models as fallback for this provider
129-
modelsByProvider.value[storeKey] = getMockModels(storeKey)
128+
const cachedFallback = catalogCache.value[storeKey]
129+
if (cachedFallback && cachedFallback.length) {
130+
modelsByProvider.value[storeKey] = cachedFallback
131+
return
132+
}
133+
134+
try {
135+
const catalog = await aiService.fetchModelCatalog(storeKey)
136+
const entry = catalog[storeKey]
137+
const fallbackModels = entry?.models ?? []
138+
modelsByProvider.value[storeKey] = fallbackModels
139+
if (fallbackModels.length) {
140+
catalogCache.value[storeKey] = fallbackModels
141+
}
142+
} catch (catalogError) {
143+
console.error('Failed to fetch catalog fallback:', catalogError)
144+
modelsByProvider.value[storeKey] = []
145+
}
130146
}
131147
}
132148

frontend/src/types/index.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,9 @@ export interface AIConfig {
4242
export interface Model {
4343
id: string
4444
name: string
45-
size: string
45+
size?: string
46+
description?: string
47+
maxTokens?: number
4648
}
4749

4850
/**

frontend/src/utils/config.ts

Lines changed: 1 addition & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import type { AIConfig, Model, DatabaseDialect } from '@/types'
1+
import type { AIConfig, DatabaseDialect } from '@/types'
22

33
export type Provider = 'ollama' | 'openai' | 'deepseek' | 'local'
44

@@ -109,30 +109,6 @@ export function getDefaultConfig(provider: string): Partial<AIConfig> {
109109
}
110110
}
111111

112-
/**
113-
* Get mock models when API fails
114-
*/
115-
export function getMockModels(provider: string): Model[] {
116-
const mocks: Record<string, Model[]> = {
117-
ollama: [
118-
{ id: 'llama3.2:3b', name: 'Llama 3.2 3B', size: '2GB' },
119-
{ id: 'gemma2:9b', name: 'Gemma 2 9B', size: '5GB' }
120-
],
121-
openai: [
122-
{ id: 'gpt-5', name: 'GPT-5 ⭐', size: 'Cloud' },
123-
{ id: 'gpt-5-mini', name: 'GPT-5 Mini', size: 'Cloud' },
124-
{ id: 'gpt-5-nano', name: 'GPT-5 Nano', size: 'Cloud' },
125-
{ id: 'gpt-5-pro', name: 'GPT-5 Pro', size: 'Cloud' },
126-
{ id: 'gpt-4.1', name: 'GPT-4.1', size: 'Cloud' }
127-
],
128-
deepseek: [
129-
{ id: 'deepseek-chat', name: 'DeepSeek Chat', size: 'Cloud' },
130-
{ id: 'deepseek-reasoner', name: 'DeepSeek Reasoner', size: 'Cloud' }
131-
]
132-
}
133-
return mocks[provider] || []
134-
}
135-
136112
/**
137113
* Generate unique ID
138114
*/

frontend/tests/composables/useAIChat.test.ts

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import type { AppContext } from '@/types'
77
vi.mock('@/services/aiService', () => ({
88
aiService: {
99
fetchModels: vi.fn(),
10+
fetchModelCatalog: vi.fn(),
1011
testConnection: vi.fn(),
1112
generateSQL: vi.fn(),
1213
saveConfig: vi.fn()
@@ -22,6 +23,7 @@ describe('useAIChat', () => {
2223

2324
// Set default mock behavior for fetchModels (called during initialization)
2425
vi.mocked(aiService.fetchModels).mockResolvedValue([])
26+
vi.mocked(aiService.fetchModelCatalog).mockResolvedValue({})
2527

2628
mockContext = {
2729
i18n: {
@@ -185,8 +187,19 @@ describe('useAIChat', () => {
185187
expect(availableModels.value[0].id).toBe('model1')
186188
})
187189

188-
it('should use mock models when API fails', async () => {
190+
it('should use catalog models when API fails', async () => {
189191
vi.mocked(aiService.fetchModels).mockRejectedValue(new Error('Network error'))
192+
vi.mocked(aiService.fetchModelCatalog).mockResolvedValueOnce({
193+
ollama: {
194+
display_name: 'Ollama',
195+
category: 'local',
196+
endpoint: 'http://localhost:11434',
197+
requires_api_key: false,
198+
models: [
199+
{ id: 'llama3.2:3b', name: 'Llama 3.2 3B', size: '2GB' }
200+
]
201+
}
202+
})
190203

191204
const { availableModels, refreshModels } = useAIChat(mockContext)
192205

frontend/tests/services/aiService.spec.ts

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,4 +84,32 @@ describe('aiService', () => {
8484
expect(health.healthy).toBe(true)
8585
expect(health.provider).toBe('ollama')
8686
})
87+
88+
it('fetchModelCatalog should normalize provider keys', async () => {
89+
const catalogPayload = {
90+
OpenAI: {
91+
display_name: 'OpenAI',
92+
category: 'cloud',
93+
endpoint: 'https://api.openai.com',
94+
requires_api_key: true,
95+
models: [
96+
{ id: 'gpt-5', name: 'GPT-5', description: 'Flagship', max_tokens: 200000 }
97+
]
98+
}
99+
}
100+
101+
fetchMock.mockResolvedValueOnce(
102+
createFetchResponse({
103+
data: [
104+
{ key: 'catalog', value: JSON.stringify(catalogPayload) },
105+
{ key: 'success', value: true }
106+
]
107+
})
108+
)
109+
110+
const catalog = await aiService.fetchModelCatalog()
111+
expect(catalog.openai).toBeDefined()
112+
expect(catalog.openai.models).toHaveLength(1)
113+
expect(catalog.openai.models[0].id).toBe('gpt-5')
114+
})
87115
})

frontend/tests/utils/config.test.ts

Lines changed: 0 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ import {
33
loadConfig,
44
saveConfig,
55
getDefaultConfig,
6-
getMockModels,
76
generateId
87
} from '@/utils/config'
98
import type { AIConfig } from '@/types'
@@ -180,34 +179,6 @@ describe('config utils', () => {
180179
})
181180
})
182181

183-
describe('getMockModels', () => {
184-
it('should return ollama mock models', () => {
185-
const models = getMockModels('ollama')
186-
187-
expect(models).toHaveLength(2)
188-
expect(models[0].id).toBe('llama3.2:3b')
189-
expect(models[0].name).toBe('Llama 3.2 3B')
190-
})
191-
192-
it('should return openai mock models', () => {
193-
const models = getMockModels('openai')
194-
195-
expect(models).toHaveLength(5)
196-
expect(models.map(model => model.id)).toEqual([
197-
'gpt-5',
198-
'gpt-5-mini',
199-
'gpt-5-nano',
200-
'gpt-5-pro',
201-
'gpt-4.1'
202-
])
203-
})
204-
205-
it('should return empty array for unknown provider', () => {
206-
const models = getMockModels('unknown')
207-
expect(models).toEqual([])
208-
})
209-
})
210-
211182
describe('generateId', () => {
212183
it('should generate unique IDs', () => {
213184
const id1 = generateId()

pkg/ai/generator.go

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ import (
2626
"sync"
2727
"time"
2828

29+
"github.com/linuxsuren/atest-ext-ai/pkg/ai/models"
2930
"github.com/linuxsuren/atest-ext-ai/pkg/ai/providers/universal"
3031
"github.com/linuxsuren/atest-ext-ai/pkg/config"
3132
"github.com/linuxsuren/atest-ext-ai/pkg/interfaces"
@@ -735,12 +736,9 @@ func createRuntimeClient(provider string, runtimeConfig map[string]any) (interfa
735736
}
736737

737738
if config.Endpoint == "" {
738-
switch normalizedProvider {
739-
case "openai":
740-
config.Endpoint = "https://api.openai.com"
741-
case "deepseek":
742-
config.Endpoint = "https://api.deepseek.com"
743-
case "custom":
739+
if endpoint := models.EndpointForProvider(normalizedProvider); endpoint != "" {
740+
config.Endpoint = endpoint
741+
} else if normalizedProvider == "custom" {
744742
return nil, fmt.Errorf("endpoint is required for custom provider")
745743
}
746744
}

pkg/ai/manager.go

Lines changed: 36 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ import (
2929
"time"
3030

3131
"github.com/linuxsuren/atest-ext-ai/pkg/ai/discovery"
32+
"github.com/linuxsuren/atest-ext-ai/pkg/ai/models"
3233
"github.com/linuxsuren/atest-ext-ai/pkg/ai/providers/universal"
3334
"github.com/linuxsuren/atest-ext-ai/pkg/config"
3435
"github.com/linuxsuren/atest-ext-ai/pkg/interfaces"
@@ -543,12 +544,9 @@ func createOpenAICompatibleClient(provider string, cfg config.AIService) (interf
543544
}
544545

545546
if uniCfg.Endpoint == "" {
546-
switch normalized {
547-
case "openai":
548-
uniCfg.Endpoint = "https://api.openai.com"
549-
case "deepseek":
550-
uniCfg.Endpoint = "https://api.deepseek.com"
551-
case "custom":
547+
if endpoint := models.EndpointForProvider(normalized); endpoint != "" {
548+
uniCfg.Endpoint = endpoint
549+
} else if normalized == "custom" {
552550
return nil, fmt.Errorf("endpoint is required for custom provider")
553551
}
554552
}
@@ -585,41 +583,42 @@ func normalizeProviderName(provider string) string {
585583

586584
// getOnlineProviders returns predefined online providers
587585
func (m *Manager) getOnlineProviders() []*ProviderInfo {
588-
return []*ProviderInfo{
589-
{
590-
Name: "deepseek",
591-
Type: "online",
592-
Available: true,
593-
Endpoint: "https://api.deepseek.com",
594-
Models: []interfaces.ModelInfo{
595-
{ID: "deepseek-chat", Name: "DeepSeek Chat", Description: "DeepSeek's flagship conversational AI model", MaxTokens: 32768},
596-
{ID: "deepseek-reasoner", Name: "DeepSeek Reasoner", Description: "DeepSeek's reasoning model with thinking capabilities", MaxTokens: 32768},
597-
},
598-
LastChecked: time.Now(),
599-
Config: map[string]interface{}{
600-
"requires_api_key": true,
601-
"provider_type": "online",
602-
},
603-
},
604-
{
605-
Name: "openai",
606-
Type: "online",
607-
Available: true,
608-
Endpoint: "https://api.openai.com",
609-
Models: []interfaces.ModelInfo{
610-
{ID: "gpt-5", Name: "GPT-5", Description: "OpenAI's flagship GPT-5 model", MaxTokens: 200000},
611-
{ID: "gpt-5-mini", Name: "GPT-5 Mini", Description: "Optimized GPT-5 model for lower latency workloads", MaxTokens: 80000},
612-
{ID: "gpt-5-nano", Name: "GPT-5 Nano", Description: "Cost-efficient GPT-5 variant for lightweight tasks", MaxTokens: 40000},
613-
{ID: "gpt-5-pro", Name: "GPT-5 Pro", Description: "High performance GPT-5 model with extended reasoning", MaxTokens: 240000},
614-
{ID: "gpt-4.1", Name: "GPT-4.1", Description: "Balanced GPT-4 series model with strong multimodal support", MaxTokens: 128000},
615-
},
586+
catalog, err := models.GetCatalog()
587+
if err != nil {
588+
logging.Logger.Warn("Failed to load model catalog", "error", err)
589+
return nil
590+
}
591+
592+
var providers []*ProviderInfo
593+
for _, name := range catalog.ProviderNames() {
594+
entry, ok := catalog.Provider(name)
595+
if !ok {
596+
continue
597+
}
598+
599+
providerType := entry.Category
600+
if providerType == "" {
601+
providerType = "cloud"
602+
}
603+
if providerType != "cloud" && providerType != "online" {
604+
continue
605+
}
606+
607+
providers = append(providers, &ProviderInfo{
608+
Name: entry.Name,
609+
Type: providerType,
610+
Available: true,
611+
Endpoint: entry.Endpoint,
612+
Models: entry.Models,
616613
LastChecked: time.Now(),
617614
Config: map[string]interface{}{
618-
"requires_api_key": true,
619-
"provider_type": "online",
615+
"requires_api_key": entry.RequiresAPIKey,
616+
"provider_type": providerType,
620617
},
621-
},
618+
})
622619
}
620+
621+
return providers
623622
}
624623

625624
// ===== Retry Logic =====

0 commit comments

Comments
 (0)