Skip to content

Commit 7139945

Browse files
committed
cont : cleanup (#16)
1 parent f2fc583 commit 7139945

File tree

4 files changed

+33
-28
lines changed

4 files changed

+33
-28
lines changed

package.json

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -114,26 +114,11 @@
114114
"default": true,
115115
"description": "If code completion should be trggered automatically (true) or only by pressing Ctrl+l."
116116
},
117-
"llama-vscode.api_key": {
117+
"llama-vscode.api_key": {
118118
"type": "string",
119119
"default": "",
120120
"description": "llama.cpp server api key or OpenAI endpoint API key (optional)"
121121
},
122-
"llama-vscode.is_openai_compatible": {
123-
"type": "boolean",
124-
"default": false,
125-
"description": "If the server exposes an OpenAI API compatible endpoint."
126-
},
127-
"llama-vscode.openai_client_model": {
128-
"type": "string",
129-
"default": "",
130-
"description": "The FIM friendly model supported by your OpenAI compatible endpoint to be used (e.g., Qwen2.5-Coder-14B-4-bit)"
131-
},
132-
"llama-vscode.openai_prompt_template": {
133-
"type": "string",
134-
"default": "<|fim_prefix|>{inputPrefix}{prompt}<|fim_suffix|>{inputSuffix}<|fim_middle|>",
135-
"description": "The prompt template to be used for the OpenAI compatible endpoint."
136-
},
137122
"llama-vscode.n_prefix": {
138123
"type": "number",
139124
"default": 256,
@@ -204,7 +189,6 @@
204189
"default": true,
205190
"description": "Enable/disable completions"
206191
},
207-
208192
"llama-vscode.languageSettings": {
209193
"type": "object",
210194
"default": {
@@ -214,8 +198,23 @@
214198
"type": "boolean"
215199
},
216200
"description": "Enable/disable suggestions for specific languages"
217-
}
201+
},
202+
"llama-vscode.use_openai_endpoint": {
203+
"type": "boolean",
204+
"default": false,
205+
"description": "[EXPERIMENTAL] Use OAI endpoint. Slow and poor quality - avoid using"
206+
},
207+
"llama-vscode.openai_client_model": {
208+
"type": "string",
209+
"default": "",
210+
"description": "The FIM friendly model supported by your OpenAI compatible endpoint to be used (e.g., Qwen2.5-Coder-14B-4-bit)"
211+
},
212+
"llama-vscode.openai_prompt_template": {
213+
"type": "string",
214+
"default": "<|fim_prefix|>{inputPrefix}{prompt}<|fim_suffix|>{inputSuffix}<|fim_middle|>",
215+
"description": "The prompt template to be used for the OpenAI compatible endpoint."
218216
}
217+
}
219218
}
220219
},
221220
"scripts": {

src/architect.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -677,10 +677,10 @@ export class Architect {
677677

678678
// cut part of the suggestion in some special cases
679679
updateSuggestion = (suggestionLines: string[], lineSuffix: string) => {
680-
if (lineSuffix.trim() != ""){
680+
if (lineSuffix.trim() != "") {
681681
if (suggestionLines[0].endsWith(lineSuffix)) return suggestionLines[0].slice(0, -lineSuffix.length);
682682
if (suggestionLines.length > 1) return suggestionLines[0];
683-
}
683+
}
684684

685685
return suggestionLines.join("\n");
686686
}

src/configuration.ts

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,6 @@ export class Configuration {
55
// extension configs
66
enabled = true;
77
endpoint = "http=//127.0.0.1:8012";
8-
is_openai_compatible = false;
9-
openai_client: OpenAI | null = null;
10-
openai_client_model: string = "";
11-
openai_prompt_template: string = "<|fim_prefix|>{inputPrefix}{prompt}<|fim_suffix|>{inputSuffix}<|fim_middle|>";
128
auto = true;
139
api_key = "";
1410
n_prefix = 256;
@@ -24,9 +20,19 @@ export class Configuration {
2420
ring_scope = 1024;
2521
ring_update_ms = 1000;
2622
language = "en";
23+
24+
// experimental - avoid using
25+
use_openai_endpoint = false;
26+
openai_client: OpenAI | null = null;
27+
openai_client_model: string = "";
28+
openai_prompt_template: string = "<|fim_prefix|>{inputPrefix}{prompt}<|fim_suffix|>{inputSuffix}<|fim_middle|>";
29+
2730
// additional configs
31+
// TODO: change to snake_case for consistency
2832
axiosRequestConfig = {};
2933
disabledLanguages: string[] = [];
34+
35+
// TODO: change to snake_case for consistency
3036
RING_UPDATE_MIN_TIME_LAST_COMPL = 3000;
3137
MIN_TIME_BETWEEN_COMPL = 600;
3238
MAX_LAST_PICK_LINE_DISTANCE = 32;
@@ -82,7 +88,7 @@ export class Configuration {
8288
private updateConfigs = (config: vscode.WorkspaceConfiguration) => {
8389
// TODO Handle the case of wrong types for the configuration values
8490
this.endpoint = this.trimTrailingSlash(String(config.get<string>("endpoint")));
85-
this.is_openai_compatible = Boolean(config.get<boolean>("is_openai_compatible"));
91+
this.use_openai_endpoint = Boolean(config.get<boolean>("use_openai_endpoint"));
8692
this.openai_client_model = String(config.get<string>("openai_client_model"));
8793
this.openai_prompt_template = String(config.get<string>("openai_prompt_template"));
8894
this.auto = Boolean(config.get<boolean>("auto"));
@@ -139,7 +145,7 @@ export class Configuration {
139145

140146
setOpenAiClient = () => {
141147
this.openai_client = null;
142-
if (this.is_openai_compatible) {
148+
if (this.use_openai_endpoint) {
143149
const openai = new OpenAI({
144150
apiKey: this.api_key || "empty",
145151
baseURL: this.endpoint,

src/llama-server.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ export class LlamaServer {
103103
nindent: number
104104
): Promise<LlamaResponse | undefined> => {
105105
// If the server is OpenAI compatible, use the OpenAI API to get the completion
106-
if (this.extConfig.is_openai_compatible) {
106+
if (this.extConfig.use_openai_endpoint) {
107107
const response = await this.handleOpenAICompletion(chunks, inputPrefix, inputSuffix, prompt);
108108
return response || undefined;
109109
}
@@ -120,7 +120,7 @@ export class LlamaServer {
120120

121121
prepareLlamaForNextCompletion = (chunks: any[]): void => {
122122
// If the server is OpenAI compatible, use the OpenAI API to prepare for the next FIM
123-
if (this.extConfig.is_openai_compatible) {
123+
if (this.extConfig.use_openai_endpoint) {
124124
// wtg 20250207 - per @igardev ... "This makes sense only if there is a server cache"
125125
// this.handleOpenAICompletion(chunks, "", "", "", true);
126126
return;

0 commit comments

Comments
 (0)