Skip to content

Commit 665b7f4

Browse files
committed
feat: ask to download model
1 parent 1d22c2a commit 665b7f4

File tree

3 files changed

+25
-2
lines changed

3 files changed

+25
-2
lines changed

README.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,10 +42,15 @@ Currently Llama Coder supports only Codellama. Model is quantized in different w
4242
* m - slow on MacOS
4343
* g - slow on older NVidia cards (pre 30xx)
4444

45+
## Troubleshooting
46+
47+
Most of the problems could be seen in output of a plugin in VS Code extension output.
48+
4549
## Changelog
4650

4751
## [0.0.10]
4852
- Adding ability to pick a custom model
53+
- Asking user if he wants to download model if it is not available
4954

5055
## [0.0.9]
5156
- Adding deepseek 1b model and making it default

src/extension.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ export function activate(context: vscode.ExtensionContext) {
2121
context.subscriptions.push(statusBarItem);
2222

2323
// Create provider
24-
const provider = new PromptProvider(statusBarItem);
24+
const provider = new PromptProvider(statusBarItem, context);
2525
let disposable = vscode.languages.registerInlineCompletionItemProvider({ pattern: '**', }, provider);
2626
context.subscriptions.push(disposable);
2727
}

src/prompts/provider.ts

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,11 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
1313

1414
lock = new AsyncLock();
1515
statusbar: vscode.StatusBarItem;
16+
context: vscode.ExtensionContext;
1617

17-
constructor(statusbar: vscode.StatusBarItem) {
18+
constructor(statusbar: vscode.StatusBarItem, context: vscode.ExtensionContext) {
1819
this.statusbar = statusbar;
20+
this.context = context;
1921
}
2022

2123
async provideInlineCompletionItems(document: vscode.TextDocument, position: vscode.Position, context: vscode.InlineCompletionContext, token: vscode.CancellationToken): Promise<vscode.InlineCompletionItem[] | vscode.InlineCompletionList | undefined | null> {
@@ -87,6 +89,22 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
8789

8890
// Download model if not exists
8991
if (!modelExists) {
92+
93+
// Check if user asked to ignore download
94+
if (this.context.globalState.get('llama-coder-download-ignored')) {
95+
info(`Ingoring since user asked to ignore download.`);
96+
return;
97+
}
98+
99+
// Ask for download
100+
let download = await vscode.window.showInformationMessage(`Model ${inferenceConfig.modelName} is not downloaded. Do you want to download it? Answering "No" would require you to manually download model.`, 'Yes', 'No');
101+
if (download === 'No') {
102+
info(`Ingoring since user asked to ignore download.`);
103+
this.context.globalState.update('llama-coder-download-ignored', true);
104+
return;
105+
}
106+
107+
// Perform download
90108
this.statusbar.text = `$(sync~spin) Downloading`;
91109
await ollamaDownloadModel(inferenceConfig.endpoint, inferenceConfig.modelName);
92110
this.statusbar.text = `$(sync~spin) Llama Coder`;

0 commit comments

Comments
 (0)