Skip to content

Commit e439630

Browse files
authored
Model training properties (#47)
Training properties are added - launch_training_completion and launch_training_chat for commands for starting training the models. Also properties lora_completion and lora_chat were added for the location of the .gguf lora files. If not empty - options for lora adapters are added on starting the server from properties launch_completion and launch_chat. The chat with UI for now can't use Lora adapter. This will require a change in webui.
1 parent 74da580 commit e439630

File tree

6 files changed

+104
-8
lines changed

6 files changed

+104
-8
lines changed

package.json

Lines changed: 22 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -134,14 +134,34 @@
134134
"properties": {
135135
"llama-vscode.launch_completion": {
136136
"type": "string",
137-
"default": "",
137+
"default": "cd c:/ai ; ./llama-server.exe -m Qwen2.5-Coder-1.5B.Q8_0.gguf --host 0.0.0.0 --port 8012 -c 2048 -ub 1024 -b 1024 -dt 0.1 --ctx-size 0 --cache-reuse 256",
138138
"description": "Shell command for starting fim llama.cpp server, executed from the menu"
139139
},
140140
"llama-vscode.launch_chat": {
141141
"type": "string",
142-
"default": "",
142+
"default": "cd c:/ai ; ./llama-server.exe -m qwen2.5-coder-3b-instruct-q6_k.gguf -ngl 99 --port 8011 --path C:/llama.cpp/llama.cpp/examples/server/webui/dist",
143143
"description": "Shell command for starting chat llama.cpp server, executed from the menu"
144144
},
145+
"llama-vscode.launch_training_completion": {
146+
"type": "string",
147+
"default": "",
148+
"description": "Shell command for starting training a completion (fim) model from the menu"
149+
},
150+
"llama-vscode.launch_training_chat": {
151+
"type": "string",
152+
"default": "",
153+
"description": "Shell command for starting training a chat model from the menu"
154+
},
155+
"llama-vscode.lora_completion": {
156+
"type": "string",
157+
"default": "",
158+
"description": "Path to the lora adapter file for the completion model. If not empty it will be used (appends --lora lora_completion) on starting the completion server with launch_completion"
159+
},
160+
"llama-vscode.lora_chat": {
161+
"type": "string",
162+
"default": "",
163+
"description": "Path to the lora adapter file for the chat model. If not empty it will be used (appends --lora lora_chat) on starting the completion server with launch_chat"
164+
},
145165
"llama-vscode.endpoint": {
146166
"type": "string",
147167
"default": "http://127.0.0.1:8012",

src/architect.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
// TODO
2-
// Да не премигва при избор само на ред или дума (върни частично проверката за съвпадение с последния рекуест?)
2+
// Ако се използва лора за чат сървера - да се подава в заявката от webui
33
// Идеи
44
// - Използване на агенти (?)
55
// - използване lSP

src/configuration.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,10 @@ export class Configuration {
99
enabled = true;
1010
launch_completion = ""
1111
launch_chat = ""
12+
launch_training_completion = ""
13+
launch_training_chat = ""
14+
lora_completion = ""
15+
lora_chat = ""
1216
endpoint = "http=//127.0.0.1:8012";
1317
endpoint_chat = "http=//127.0.0.1:8011";
1418
auto = true;
@@ -84,6 +88,10 @@ export class Configuration {
8488
this.endpoint_chat = this.trimTrailingSlash(String(config.get<string>("endpoint_chat")));
8589
this.launch_completion = String(config.get<string>("launch_completion"));
8690
this.launch_chat = String(config.get<string>("launch_chat"));
91+
this.launch_training_completion = String(config.get<string>("launch_training_completion"));
92+
this.launch_training_chat = String(config.get<string>("launch_training_chat"));
93+
this.lora_completion = String(config.get<string>("lora_completion"));
94+
this.lora_chat = String(config.get<string>("lora_chat"));
8795
this.use_openai_endpoint = Boolean(config.get<boolean>("use_openai_endpoint"));
8896
this.openai_client_model = String(config.get<string>("openai_client_model"));
8997
this.openai_prompt_template = String(config.get<string>("openai_prompt_template"));

src/llama-server.ts

Lines changed: 27 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ export class LlamaServer {
2323
private app: Application
2424
private vsCodeFimTerminal: Terminal | undefined;
2525
private vsCodeChatTerminal: Terminal | undefined;
26+
private vsCodeTrainTerminal: Terminal | undefined;
2627
private readonly defaultRequestParams = {
2728
top_k: 40,
2829
top_p: 0.99,
@@ -96,6 +97,7 @@ export class LlamaServer {
9697
cache_prompt: true,
9798
t_max_prompt_ms: this.app.extConfig.t_max_prompt_ms,
9899
t_max_predict_ms: 1,
100+
...(this.app.extConfig.lora_completion.trim() != "" && { lora: [{ id: 0, scale: 0.5 }] })
99101
};
100102
}
101103

@@ -109,6 +111,7 @@ export class LlamaServer {
109111
...(nindent && { n_indent: nindent }),
110112
t_max_prompt_ms: this.app.extConfig.t_max_prompt_ms,
111113
t_max_predict_ms: this.app.extConfig.t_max_predict_ms,
114+
...(this.app.extConfig.lora_completion.trim() != "" && { lora: [{ id: 0, scale: 0.5 }] })
112115
};
113116
}
114117

@@ -163,7 +166,7 @@ export class LlamaServer {
163166
this.vsCodeFimTerminal.sendText(launchCmd);
164167
} catch(err){
165168
if (err instanceof Error) {
166-
vscode.window.showInformationMessage(this.app.extConfig.getUiText("Error executind command") + " " + launchCmd +" : " + err.message);
169+
vscode.window.showInformationMessage(this.app.extConfig.getUiText("Error executing command") + " " + launchCmd +" : " + err.message);
167170
}
168171
}
169172
}
@@ -181,7 +184,25 @@ export class LlamaServer {
181184
this.vsCodeChatTerminal.sendText(launchCmd);
182185
} catch(err){
183186
if (err instanceof Error) {
184-
vscode.window.showInformationMessage(this.app.extConfig.getUiText("Error executind command") + " " + launchCmd +" : " + err.message);
187+
vscode.window.showInformationMessage(this.app.extConfig.getUiText("Error executing command") + " " + launchCmd +" : " + err.message);
188+
}
189+
}
190+
}
191+
192+
shellTrainCmd = (trainCmd: string): void => {
193+
if (!trainCmd) {
194+
vscode.window.showInformationMessage(this.app.extConfig.getUiText("There is no command to execute.")??"");
195+
return;
196+
}
197+
try {
198+
this.vsCodeTrainTerminal = vscode.window.createTerminal({
199+
name: 'llama.cpp Train Terminal'
200+
});
201+
this.vsCodeTrainTerminal.show(true);
202+
this.vsCodeTrainTerminal.sendText(trainCmd);
203+
} catch(err){
204+
if (err instanceof Error) {
205+
vscode.window.showInformationMessage(this.app.extConfig.getUiText("Error executing command") + " " + trainCmd +" : " + err.message);
185206
}
186207
}
187208
}
@@ -193,4 +214,8 @@ export class LlamaServer {
193214
killChatCmd = (): void => {
194215
if (this.vsCodeChatTerminal) this.vsCodeChatTerminal.dispose();
195216
}
217+
218+
killTrainCmd = (): void => {
219+
if (this.vsCodeTrainTerminal) this.vsCodeTrainTerminal.dispose();
220+
}
196221
}

src/menu.ts

Lines changed: 40 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -80,15 +80,37 @@ export class Menu {
8080
{
8181
label: this.app.extConfig.getUiText("Start chat llama.cpp server")??"",
8282
description: this.app.extConfig.getUiText(`Runs the command from property launch_chat`)
83-
},
83+
})
84+
if (this.app.extConfig.launch_training_completion.trim() != "") {
85+
menuItems.push(
86+
{
87+
label: this.app.extConfig.getUiText("Start training completion model")??"",
88+
description: this.app.extConfig.getUiText(`Runs the command from property launch_training_completion`)
89+
})
90+
}
91+
if (this.app.extConfig.launch_training_chat.trim() != "") {
92+
menuItems.push(
93+
{
94+
label: this.app.extConfig.getUiText("Start training chat model")??"",
95+
description: this.app.extConfig.getUiText(`Runs the command from property launch_training_chat`)
96+
})
97+
}
98+
menuItems.push(
8499
{
85100
label: this.app.extConfig.getUiText("Stop completion llama.cpp server")??"",
86101
description: this.app.extConfig.getUiText(`Stops completion llama.cpp server if it was started from llama.vscode menu`)
87102
},
88103
{
89104
label: this.app.extConfig.getUiText("Stop chat llama.cpp server")??"",
90105
description: this.app.extConfig.getUiText(`Stops chat llama.cpp server if it was started from llama.vscode menu`)
106+
})
107+
if (this.app.extConfig.launch_training_completion.trim() != "" || this.app.extConfig.launch_training_chat.trim() != "") {
108+
menuItems.push(
109+
{
110+
label: this.app.extConfig.getUiText("Stop training")??"",
111+
description: this.app.extConfig.getUiText(`Stops training if it was started from llama.vscode menu`)
91112
})
113+
}
92114

93115
return menuItems.filter(Boolean) as vscode.QuickPickItem[];
94116
}
@@ -145,18 +167,33 @@ export class Menu {
145167
break;
146168
case this.app.extConfig.getUiText('Start completion llama.cpp server'):
147169
await this.app.llamaServer.killFimCmd();
148-
await this.app.llamaServer.shellFimCmd(this.app.extConfig.launch_completion);
170+
let commandCompletion = this.app.extConfig.launch_completion
171+
if (this.app.extConfig.lora_completion.trim() != "") commandCompletion += " --lora " + this.app.extConfig.lora_completion
172+
await this.app.llamaServer.shellFimCmd(commandCompletion);
149173
break;
150174
case this.app.extConfig.getUiText('Start chat llama.cpp server'):
151175
await this.app.llamaServer.killChatCmd();
152-
await this.app.llamaServer.shellChatCmd(this.app.extConfig.launch_chat);
176+
let commandChat = this.app.extConfig.launch_chat
177+
if (this.app.extConfig.lora_chat.trim() != "") commandChat += " --lora " + this.app.extConfig.lora_chat
178+
await this.app.llamaServer.shellChatCmd(commandChat);
179+
break;
180+
case this.app.extConfig.getUiText('Start training completion model'):
181+
await this.app.llamaServer.killTrainCmd();
182+
await this.app.llamaServer.shellTrainCmd(this.app.extConfig.launch_training_completion);
183+
break;
184+
case this.app.extConfig.getUiText('Start training chat model'):
185+
await this.app.llamaServer.killTrainCmd();
186+
await this.app.llamaServer.shellTrainCmd(this.app.extConfig.launch_training_chat);
153187
break;
154188
case this.app.extConfig.getUiText("Stop completion llama.cpp server"):
155189
await this.app.llamaServer.killFimCmd();
156190
break;
157191
case this.app.extConfig.getUiText("Stop chat llama.cpp server"):
158192
await this.app.llamaServer.killChatCmd();
159193
break;
194+
case this.app.extConfig.getUiText("Stop training"):
195+
await this.app.llamaServer.killTrainCmd();
196+
break;
160197
case "$(book) " + this.app.extConfig.getUiText("View Documentation..."):
161198
await vscode.env.openExternal(vscode.Uri.parse('https://github.com/ggml-org/llama.vscode'));
162199
break;

src/translations.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,4 +55,10 @@ export const translations: string[][] = [
5555
["llama-vscode extension is updated.", "Разширението llama-vscode е актуализирано.", "Die llama-vscode-Erweiterung ist aktualisiert.", "Расширение llama-vscode обновлено.", "La extensión llama-vscode está actualizada.", "llama-vscode扩展已更新。", "L'extension llama-vscode est mise à jour."],
5656
["There is no command to execute.", "Няма команда за изпълнение.", "Es gibt keinen Befehl zum Ausführen.", "Нет команды для выполнения.", "No hay comando para ejecutar.", "没有可执行的命令。", "Il n'y a aucune commande à exécuter."],
5757
["Error executing command", "Грешка при изпълнение на командата", "Fehler beim Ausführen des Befehls", "Ошибка выполнения команды", "Error al ejecutar el comando", "执行命令时出错", "Erreur lors de l'exécution de la commande"],
58+
["Start training completion model", "Стартиране на модел за завършване на обучение", "Starten des Trainingsabschlussmodells", "Запуск модели завершения обучения", "Iniciar modelo de finalización de entrenamiento", "启动训练完成模型", "Démarrer le modèle de complétion d'entraînement"],
59+
["Runs the command from property launch_training_completion", "Изпълнява командата от свойството launch_training_completion", "Führt den Befehl aus der Eigenschaft launch_training_completion aus", "Выполняет команду из свойства launch_training_completion", "Ejecuta el comando desde la propiedad launch_training_completion", "从属性 launch_training_completion 运行命令", "Exécute la commande depuis la propriété launch_training_completion"],
60+
["Start training chat model", "Стартиране на модел за чат обучение", "Starten des Chat-Trainingsmodells", "Запуск модели обучения чата", "Iniciar modelo de entrenamiento de chat", "启动训练聊天模型", "Démarrer le modèle d'entraînement de chat"],
61+
["Runs the command from property launch_training_chat", "Изпълнява командата от свойството launch_training_chat", "Führt den Befehl aus der Eigenschaft launch_training_chat aus", "Выполняет команду из свойства launch_training_chat", "Ejecuta el comando desde la propiedad launch_training_chat", "从属性 launch_training_chat 运行命令", "Exécute la commande depuis la propriété launch_training_chat"],
62+
["Stop training", "Спиране на обучението", "Training beenden", "Остановить обучение", "Detener entrenamiento", "停止训练", "Arrêter l'entraînement"],
63+
["Stops training if it was started from llama.vscode menu", "Спира обучението, ако е стартирано от менюто llama.vscode", "Stoppt das Training, wenn es über das Menü llama.vscode gestartet wurde", "Останавливает обучение, если оно было запущено из меню llama.vscode", "Detiene el entrenamiento si se inició desde el menú llama.vscode", "如果从 llama.vscode 菜单启动,则停止训练", "Arrête l'entraînement s'il a été lancé depuis le menu llama.vscode"],
5864
];

0 commit comments

Comments
 (0)