Skip to content

Commit 99b2bd9

Browse files
committed
Remove prompt cache
1 parent a9494f0 commit 99b2bd9

File tree

2 files changed

+34
-84
lines changed

2 files changed

+34
-84
lines changed

src/prompts/promptCache.ts

Lines changed: 0 additions & 28 deletions
This file was deleted.

src/prompts/provider.ts

Lines changed: 34 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ import { info, warn } from '../modules/log';
33
import { autocomplete } from './autocomplete';
44
import { preparePrompt } from './preparePrompt';
55
import { AsyncLock } from '../modules/lock';
6-
import { getFromPromptCache, setPromptToCache } from './promptCache';
76
import { isNotNeeded, isSupported } from './filter';
87
import { ollamaCheckModel } from '../modules/ollamaCheckModel';
98
import { ollamaDownloadModel } from '../modules/ollamaDownloadModel';
@@ -105,15 +104,6 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
105104
// Result
106105
let res: string | null = null;
107106

108-
// Check if in cache
109-
let cached = getFromPromptCache({
110-
prefix: prepared.prefix,
111-
suffix: prepared.suffix
112-
});
113-
114-
// If not cached
115-
if (cached === undefined) {
116-
117107
// Config
118108
let inferenceConfig = config.inference;
119109

@@ -131,59 +121,47 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
131121
// Download model if not exists
132122
if (!modelExists) {
133123

134-
// Check if user asked to ignore download
135-
if (this.context.globalState.get('llama-coder-download-ignored') === inferenceConfig.modelName) {
136-
info(`Ingoring since user asked to ignore download.`);
137-
return;
138-
}
139-
140-
// Ask for download
141-
let download = await vscode.window.showInformationMessage(`Model ${inferenceConfig.modelName} is not downloaded. Do you want to download it? Answering "No" would require you to manually download model.`, 'Yes', 'No');
142-
if (download === 'No') {
143-
info(`Ingoring since user asked to ignore download.`);
144-
this.context.globalState.update('llama-coder-download-ignored', inferenceConfig.modelName);
145-
return;
146-
}
147-
148-
// Perform download
149-
this.update('sync~spin', 'Downloading');
150-
await ollamaDownloadModel(inferenceConfig.endpoint, inferenceConfig.modelName, inferenceConfig.bearerToken);
151-
this.update('sync~spin', 'Llama Coder');
124+
// Check if user asked to ignore download
125+
if (this.context.globalState.get('llama-coder-download-ignored') === inferenceConfig.modelName) {
126+
info(`Ingoring since user asked to ignore download.`);
127+
return;
152128
}
153-
if (token.isCancellationRequested) {
154-
info(`Canceled after AI completion.`);
129+
130+
// Ask for download
131+
let download = await vscode.window.showInformationMessage(`Model ${inferenceConfig.modelName} is not downloaded. Do you want to download it? Answering "No" would require you to manually download model.`, 'Yes', 'No');
132+
if (download === 'No') {
133+
info(`Ingoring since user asked to ignore download.`);
134+
this.context.globalState.update('llama-coder-download-ignored', inferenceConfig.modelName);
155135
return;
156136
}
157137

158-
// Run AI completion
159-
info(`Running AI completion...`);
160-
res = await autocomplete({
161-
prefix: prepared.prefix,
162-
suffix: prepared.suffix,
163-
endpoint: inferenceConfig.endpoint,
164-
bearerToken: inferenceConfig.bearerToken,
165-
model: inferenceConfig.modelName,
166-
maxLines: inferenceConfig.maxLines,
167-
maxTokens: inferenceConfig.maxTokens,
168-
temperature: inferenceConfig.temperature,
169-
canceled: () => token.isCancellationRequested,
170-
});
171-
info(`AI completion completed: ${res}`);
172-
173-
// Put to cache
174-
setPromptToCache({
175-
prefix: prepared.prefix,
176-
suffix: prepared.suffix,
177-
value: res
178-
});
138+
// Perform download
139+
this.update('sync~spin', 'Downloading');
140+
await ollamaDownloadModel(inferenceConfig.endpoint, inferenceConfig.modelName, inferenceConfig.bearerToken);
141+
this.update('sync~spin', 'Llama Coder');
142+
}
143+
if (token.isCancellationRequested) {
144+
info(`Canceled after AI completion.`);
145+
return;
146+
}
147+
148+
// Run AI completion
149+
info(`Running AI completion...`);
150+
res = await autocomplete({
151+
prefix: prepared.prefix,
152+
suffix: prepared.suffix,
153+
endpoint: inferenceConfig.endpoint,
154+
bearerToken: inferenceConfig.bearerToken,
155+
model: inferenceConfig.modelName,
156+
maxLines: inferenceConfig.maxLines,
157+
maxTokens: inferenceConfig.maxTokens,
158+
temperature: inferenceConfig.temperature,
159+
canceled: () => token.isCancellationRequested,
160+
});
161+
info(`AI completion completed: ${res}`);
179162
} finally {
180163
this.update('chip', 'Llama Coder');
181164
}
182-
} else {
183-
if (cached !== null) {
184-
res = cached;
185-
}
186-
}
187165
if (token.isCancellationRequested) {
188166
info(`Canceled after AI completion.`);
189167
return;

0 commit comments

Comments
 (0)