Skip to content

Commit 6e81ee4

Browse files
jsonifyclaude
andauthored
Fix Summarize and Generate Tags features (#120)
* feat: add global AI model selection for all AI features Add unified AI model preference that applies to all AI-powered features: - Template generation - Tag generation - Note summarization - Semantic search Changes: - Add noted.ai.preferredModel configuration setting - Update aiModelService to check global config first, with fallback to templates.preferredModel for backwards compatibility - Refactor TagGenerator, SummarizationService, and SemanticSearchEngine to use centralized selectAIModel() - Update "Select AI Model" command to set global preference - Add getAvailableModels() and formatModelForDisplay() helper functions - Remove hardcoded model selections (vendor: 'copilot', family: 'gpt-4o') Users can now choose their preferred LLM once for all AI features via: - Command: "Noted: Select AI Model" - Config: noted.ai.preferredModel - Priority: Claude Sonnet → Opus → GPT-4 → Gemini → any available Resolves user request for unified LLM model control across all AI features. * refactor: improve AI model selection organization Code review improvements: - Move handleSelectAIModel from templateCommands.ts to new aiCommands.ts - Better organization since this command now affects all AI features, not just templates - Simplify formatModelForDisplay return type - Remove redundant id property (detail already contains model.id) - Caller uses model.id directly from original object - Update import in extension.ts to reflect new location No functional changes, purely organizational improvements. * docs: update Select AI Model tooltip to reflect global scope Update tooltip in Templates & Recent > Manage section to clarify that Select AI Model now affects all AI features, not just templates. Old: "Choose which AI model to use for template generation" New: "Choose AI model for all features (templates, tagging, summarization, search)" This aligns the UI description with the recent changes that unified AI model selection across all features. --------- Co-authored-by: Claude <[email protected]>
1 parent 06d76a3 commit 6e81ee4

File tree

9 files changed

+134
-119
lines changed

9 files changed

+134
-119
lines changed

package.json

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1212,6 +1212,11 @@
12121212
"default": true,
12131213
"description": "Enable AI-powered summarization features"
12141214
},
1215+
"noted.ai.preferredModel": {
1216+
"type": "string",
1217+
"default": "",
1218+
"description": "Preferred AI model ID for all AI features (tagging, summarization, search, templates). Empty = automatic selection. Use 'Noted: Select AI Model' command to choose from available models."
1219+
},
12151220
"noted.ai.summaryLength": {
12161221
"type": "string",
12171222
"enum": [

src/commands/aiCommands.ts

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
import * as vscode from 'vscode';
2+
import { getAvailableModels, formatModelForDisplay } from '../services/aiModelService';
3+
4+
/**
5+
* Command: Select preferred AI model for all AI features
6+
* Sets the global AI model preference used by templates, tagging, summarization, and search
7+
*/
8+
export async function handleSelectAIModel(): Promise<void> {
9+
try {
10+
// Get all available models
11+
const models = await getAvailableModels();
12+
13+
if (models.length === 0) {
14+
vscode.window.showErrorMessage(
15+
'No AI models available. Please install GitHub Copilot, Claude, or another LLM extension.'
16+
);
17+
return;
18+
}
19+
20+
// Get current preference (check both global and template-specific for backwards compatibility)
21+
const config = vscode.workspace.getConfiguration('noted');
22+
const currentModelId = config.get<string>('ai.preferredModel') || config.get<string>('templates.preferredModel') || '';
23+
24+
// Build quick pick items
25+
interface ModelQuickPickItem extends vscode.QuickPickItem {
26+
modelId: string;
27+
}
28+
29+
const items: ModelQuickPickItem[] = models.map(model => {
30+
const formatted = formatModelForDisplay(model);
31+
const isSelected = model.id === currentModelId;
32+
const icon = isSelected ? '$(check)' : '$(circle-outline)';
33+
34+
return {
35+
modelId: model.id,
36+
label: `${icon} ${formatted.label}`,
37+
description: formatted.description,
38+
detail: formatted.detail,
39+
picked: isSelected
40+
};
41+
});
42+
43+
// Add "Automatic" option at the top
44+
items.unshift({
45+
modelId: '',
46+
label: `${currentModelId === '' ? '$(check)' : '$(circle-outline)'} Automatic (Recommended)`,
47+
description: 'Smart selection based on availability',
48+
detail: 'Priority: Claude Sonnet → Opus → GPT-4 → Gemini',
49+
picked: currentModelId === ''
50+
});
51+
52+
// Show picker
53+
const selected = await vscode.window.showQuickPick(items, {
54+
placeHolder: 'Select AI model for all AI features (templates, tagging, summarization, search)',
55+
title: 'AI Model Selection',
56+
matchOnDescription: true,
57+
matchOnDetail: true
58+
});
59+
60+
if (!selected) {
61+
return;
62+
}
63+
64+
// Save preference to global config
65+
await config.update('ai.preferredModel', selected.modelId, vscode.ConfigurationTarget.Global);
66+
67+
const modelName = selected.modelId === '' ? 'Automatic selection' : selected.label.replace(/\$\(.*?\)\s/, '');
68+
vscode.window.showInformationMessage(
69+
`AI model set to: ${modelName}\n\nThis will be used for templates, tagging, summarization, and search.`
70+
);
71+
} catch (error) {
72+
if (error instanceof Error) {
73+
vscode.window.showErrorMessage(`Failed to select AI model: ${error.message}`);
74+
} else {
75+
vscode.window.showErrorMessage('Failed to select AI model: Unknown error');
76+
}
77+
}
78+
}

src/commands/templateCommands.ts

Lines changed: 0 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -431,78 +431,6 @@ async function openTemplate(templateId: string): Promise<void> {
431431
}
432432
}
433433

434-
/**
435-
* Command: Select preferred AI model for template generation
436-
*/
437-
export async function handleSelectAIModel(): Promise<void> {
438-
try {
439-
// Get all available models
440-
const models = await templateGenerator.getAvailableModels();
441-
442-
if (models.length === 0) {
443-
vscode.window.showErrorMessage(
444-
'No AI models available. Please install GitHub Copilot, Claude, or another LLM extension.'
445-
);
446-
return;
447-
}
448-
449-
// Get current preference
450-
const config = vscode.workspace.getConfiguration('noted');
451-
const currentModelId = config.get<string>('templates.preferredModel');
452-
453-
// Build quick pick items
454-
interface ModelQuickPickItem extends vscode.QuickPickItem {
455-
modelId: string;
456-
}
457-
458-
const items: ModelQuickPickItem[] = models.map(model => {
459-
const isSelected = model.id === currentModelId;
460-
const icon = isSelected ? '$(check)' : '$(circle-outline)';
461-
462-
return {
463-
modelId: model.id,
464-
label: `${icon} ${model.name}`,
465-
description: `${model.vendor}`,
466-
detail: model.description,
467-
picked: isSelected
468-
};
469-
});
470-
471-
// Add "Automatic" option at the top
472-
items.unshift({
473-
modelId: '',
474-
label: `${currentModelId === '' ? '$(check)' : '$(circle-outline)'} Automatic (Recommended)`,
475-
description: 'Smart selection based on availability',
476-
detail: 'Uses Claude > GPT > Gemini > any available model',
477-
picked: currentModelId === ''
478-
});
479-
480-
// Show picker
481-
const selected = await vscode.window.showQuickPick(items, {
482-
placeHolder: 'Select AI model for template generation',
483-
title: 'AI Model Selection',
484-
matchOnDescription: true,
485-
matchOnDetail: true
486-
});
487-
488-
if (!selected) {
489-
return;
490-
}
491-
492-
// Save preference
493-
await config.update('templates.preferredModel', selected.modelId, vscode.ConfigurationTarget.Global);
494-
495-
const modelName = selected.modelId === '' ? 'Automatic selection' : selected.label.replace(/\$\(.*?\)\s/, '');
496-
vscode.window.showInformationMessage(`AI model set to: ${modelName}`);
497-
} catch (error) {
498-
if (error instanceof Error) {
499-
vscode.window.showErrorMessage(`Failed to select AI model: ${error.message}`);
500-
} else {
501-
vscode.window.showErrorMessage('Failed to select AI model: Unknown error');
502-
}
503-
}
504-
}
505-
506434
/**
507435
* Command: Migrate legacy templates to JSON format
508436
*/

src/extension.ts

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,10 +77,12 @@ import { SummarizationCommands } from './commands/summarizationCommands';
7777
import {
7878
handleSuggestTags
7979
} from './commands/autoTagCommands';
80+
import {
81+
handleSelectAIModel
82+
} from './commands/aiCommands';
8083
import {
8184
handleCreateTemplateWithAI,
8285
handleEnhanceTemplate,
83-
handleSelectAIModel,
8486
handleMigrateTemplates,
8587
handleCreateUserStoryWithAI
8688
} from './commands/templateCommands';

src/providers/templatesTreeProvider.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ export class TemplatesTreeProvider implements vscode.TreeDataProvider<TreeItem>
187187
'⚙️ Select AI Model',
188188
'noted.selectAIModel',
189189
'settings-gear',
190-
'Choose which AI model to use for template generation'
190+
'Choose AI model for all features (templates, tagging, summarization, search)'
191191
),
192192
new ActionButtonItem(
193193
'Template Variables',

src/search/SemanticSearchEngine.ts

Lines changed: 5 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import * as vscode from 'vscode';
77
import * as path from 'path';
88
import { SmartSearchResult, MatchInfo, SemanticSearchConfig } from './types';
99
import { readFile, getFileStats } from '../services/fileSystemService';
10+
import { selectAIModel } from '../services/aiModelService';
1011

1112
export class SemanticSearchEngine {
1213
private config: SemanticSearchConfig;
@@ -21,15 +22,12 @@ export class SemanticSearchEngine {
2122
}
2223

2324
/**
24-
* Check if Copilot/LLM is available
25+
* Check if LLM is available
2526
*/
2627
async isAvailable(): Promise<boolean> {
2728
try {
28-
const models = await vscode.lm.selectChatModels({
29-
vendor: 'copilot',
30-
family: 'gpt-4o',
31-
});
32-
return models.length > 0;
29+
await selectAIModel();
30+
return true;
3331
} catch (error) {
3432
return false;
3533
}
@@ -51,16 +49,7 @@ export class SemanticSearchEngine {
5149
}
5250

5351
// Select model once at the beginning
54-
const models = await vscode.lm.selectChatModels({
55-
vendor: 'copilot',
56-
family: 'gpt-4o',
57-
});
58-
59-
if (models.length === 0) {
60-
throw new Error('Copilot LLM is not available. Please sign in to GitHub Copilot.');
61-
}
62-
63-
const model = models[0];
52+
const model = await selectAIModel();
6453
const results: SmartSearchResult[] = [];
6554
const maxResults = options.maxResults || this.config.maxCandidates;
6655
const filesToProcess = noteFiles.slice(0, maxResults);

src/services/aiModelService.ts

Lines changed: 33 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,15 +6,19 @@ import * as vscode from 'vscode';
66
*
77
* This is a shared utility used by:
88
* - Template generation (TemplateGenerator)
9-
* - User story creation (templateCommands)
9+
* - Tag generation (TagGenerator)
10+
* - Summarization (SummarizationService)
11+
* - Semantic search (SemanticSearchEngine)
1012
* - Any other AI-powered features
1113
*
1214
* @returns Selected language model
1315
* @throws Error if no models are available
1416
*/
1517
export async function selectAIModel(): Promise<vscode.LanguageModelChat> {
1618
const config = vscode.workspace.getConfiguration('noted');
17-
const preferredModelId = config.get<string>('templates.preferredModel');
19+
20+
// Check global AI model preference first, then fall back to template-specific (backwards compatibility)
21+
const preferredModelId = config.get<string>('ai.preferredModel') || config.get<string>('templates.preferredModel');
1822

1923
// Get all available models
2024
const allModels = await vscode.lm.selectChatModels({});
@@ -96,3 +100,30 @@ export async function selectAIModel(): Promise<vscode.LanguageModelChat> {
96100
// Last resort: Return first available model
97101
return allModels[0];
98102
}
103+
104+
/**
105+
* Get all available AI models with metadata
106+
* Used by the model selection command
107+
*
108+
* @returns Array of available models with vendor, family, and ID info
109+
*/
110+
export async function getAvailableModels(): Promise<vscode.LanguageModelChat[]> {
111+
return await vscode.lm.selectChatModels({});
112+
}
113+
114+
/**
115+
* Format a model for display in quick pick
116+
* @param model - Language model to format
117+
* @returns Object with label, description, and detail for quick pick
118+
*/
119+
export function formatModelForDisplay(model: vscode.LanguageModelChat): {
120+
label: string;
121+
description: string;
122+
detail: string;
123+
} {
124+
return {
125+
label: model.name || model.id,
126+
description: `${model.vendor} - ${model.family}`,
127+
detail: model.id
128+
};
129+
}

src/services/summarizationService.ts

Lines changed: 6 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ import { readFile } from './fileSystemService';
33
import * as crypto from 'crypto';
44
import { PromptTemplate } from './promptTemplateService';
55
import { SummaryHistoryService } from './summaryHistoryService';
6+
import { selectAIModel } from './aiModelService';
67

78
/**
89
* Options for note summarization
@@ -62,12 +63,12 @@ export class SummarizationService {
6263
}
6364

6465
/**
65-
* Check if Language Model API is available (Copilot)
66+
* Check if Language Model API is available
6667
*/
6768
private async isLanguageModelAvailable(): Promise<boolean> {
6869
try {
69-
const models = await vscode.lm.selectChatModels({ vendor: 'copilot' });
70-
return models && models.length > 0;
70+
await selectAIModel();
71+
return true;
7172
} catch (error) {
7273
return false;
7374
}
@@ -333,12 +334,7 @@ Summary requirements:
333334

334335
// Call Language Model API
335336
try {
336-
const models = await vscode.lm.selectChatModels({ vendor: 'copilot' });
337-
if (!models || models.length === 0) {
338-
throw new Error('No Copilot models available');
339-
}
340-
341-
const model = models[0];
337+
const model = await selectAIModel();
342338
const messages = [vscode.LanguageModelChatMessage.User(prompt)];
343339

344340
const response = await model.sendRequest(messages, {}, new vscode.CancellationTokenSource().token);
@@ -454,12 +450,7 @@ ${note.content}
454450

455451
// Call Language Model API
456452
try {
457-
const models = await vscode.lm.selectChatModels({ vendor: 'copilot' });
458-
if (!models || models.length === 0) {
459-
throw new Error('No Copilot models available');
460-
}
461-
462-
const model = models[0];
453+
const model = await selectAIModel();
463454
const messages = [vscode.LanguageModelChatMessage.User(prompt)];
464455

465456
const response = await model.sendRequest(messages, {}, new vscode.CancellationTokenSource().token);

src/tagging/TagGenerator.ts

Lines changed: 3 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import * as vscode from 'vscode';
22
import { NoteTag } from './TagParser';
33
import * as crypto from 'crypto';
4+
import { selectAIModel } from '../services/aiModelService';
45

56
/**
67
* Cache entry for tag generation results
@@ -54,18 +55,8 @@ export class TagGenerator {
5455
return [];
5556
}
5657

57-
// Select a language model
58-
const models = await vscode.lm.selectChatModels({
59-
vendor: 'copilot',
60-
family: 'gpt-4o'
61-
});
62-
63-
if (models.length === 0) {
64-
// No model available
65-
throw new Error('GitHub Copilot is not available. Please sign in to use auto-tagging.');
66-
}
67-
68-
const model = models[0];
58+
// Select the appropriate language model using shared service
59+
const model = await selectAIModel();
6960

7061
// Build the prompt
7162
const prompt = this.buildTagPrompt(content, maxTags);

0 commit comments

Comments
 (0)