Skip to content

Commit 05fcdd4

Browse files
igardevigardev
andauthored
Edit selected text (#49)
* Edit a selected text with a command - Ctrl+Shift+e for entering prompt, Tab for accepting the change. * Code edits - reject the suggestion by pressing Escape * Prompt for text edit is improved, context menu item for editing selected text added. * Remove the context from the edit prompt as the output includes part of it; In the diff window show 25 lines before and after the change to facilitate comparison. --------- Co-authored-by: igardev <[email protected]>
1 parent de06d78 commit 05fcdd4

File tree

8 files changed

+361
-4
lines changed

8 files changed

+361
-4
lines changed

package.json

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,18 @@
6868
{
6969
"command": "extension.askAiWithContext",
7070
"title": "Ask AI With Context"
71+
},
72+
{
73+
"command": "extension.editSelectedText",
74+
"title": "Edit Selected Text with AI"
75+
},
76+
{
77+
"command": "extension.acceptTextEdit",
78+
"title": "Accept Text Edit Suggestion"
79+
},
80+
{
81+
"command": "extension.rejectTextEdit",
82+
"title": "Reject Text Edit Suggestion"
7183
}
7284
],
7385
"keybindings": [
@@ -126,6 +138,21 @@
126138
"command": "extension.askAiWithContext",
127139
"key": "ctrl+Shift+;",
128140
"when": "editorTextFocus"
141+
},
142+
{
143+
"command": "extension.editSelectedText",
144+
"key": "ctrl+shift+e",
145+
"when": "editorHasSelection"
146+
},
147+
{
148+
"command": "extension.acceptTextEdit",
149+
"key": "tab",
150+
"when": "editorTextFocus && textEditSuggestionVisible"
151+
},
152+
{
153+
"command": "extension.rejectTextEdit",
154+
"key": "escape",
155+
"when": "editorTextFocus && textEditSuggestionVisible"
129156
}
130157
],
131158
"configuration": {
@@ -283,6 +310,15 @@
283310
"description": "The prompt template to be used for the OpenAI compatible endpoint."
284311
}
285312
}
313+
},
314+
"menus": {
315+
"editor/context": [
316+
{
317+
"command": "extension.editSelectedText",
318+
"when": "editorHasSelection",
319+
"group": "llama@1"
320+
}
321+
]
286322
}
287323
},
288324
"scripts": {

src/application.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import {Menu} from "./menu";
88
import {Completion} from "./completion";
99
import {Logger} from "./logger";
1010
import { ChatWithAi } from "./chat-with-ai";
11+
import { TextEditor } from "./text-editor";
1112

1213
export class Application {
1314
private static instance: Application;
@@ -21,6 +22,7 @@ export class Application {
2122
public completion: Completion
2223
public logger: Logger
2324
public askAi: ChatWithAi
25+
public textEditor: TextEditor
2426

2527
private constructor() {
2628
this.extConfig = new Configuration()
@@ -33,6 +35,7 @@ export class Application {
3335
this.completion = new Completion(this)
3436
this.logger = new Logger(this)
3537
this.askAi = new ChatWithAi(this)
38+
this.textEditor = new TextEditor(this)
3639
}
3740

3841
public static getInstance(): Application {

src/architect.ts

Lines changed: 28 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ export class Architect {
3131
this.app.extraContext.pickChunkAroundCursor(previousEditor.selection.active.line, previousEditor.document);
3232
}, 0);
3333
}
34-
// Clarify if this should be executed if the above was executed
34+
3535
if (editor) {
3636
// Editor is now active in the UI, pick a chunk
3737
let activeDocument = editor.document;
@@ -238,9 +238,7 @@ export class Architect {
238238
let activeDocument = editor.document;
239239
const selection = editor.selection;
240240
const cursorPosition = selection.active;
241-
// setTimeout(async () => {
242241
this.app.extraContext.pickChunkAroundCursor(cursorPosition.line, activeDocument);
243-
// }, 0);
244242
// Ensure ring chunks buffer will be updated
245243
this.app.extraContext.lastComplStartTime = Date.now() - this.app.extConfig.RING_UPDATE_MIN_TIME_LAST_COMPL - 1
246244
this.app.extraContext.periodicRingBufferUpdate()
@@ -249,4 +247,31 @@ export class Architect {
249247
});
250248
context.subscriptions.push(triggerAskAiDisposable);
251249
}
250+
251+
registerCommandEditSelectedText = (context: vscode.ExtensionContext) => {
252+
const editSelectedTextDisposable = vscode.commands.registerCommand('extension.editSelectedText', async () => {
253+
const editor = vscode.window.activeTextEditor;
254+
if (!editor) {
255+
vscode.window.showErrorMessage('No active editor!');
256+
return;
257+
}
258+
await this.app.textEditor.showEditPrompt(editor);
259+
});
260+
context.subscriptions.push(editSelectedTextDisposable);
261+
}
262+
263+
registerCommandAcceptTextEdit = (context: vscode.ExtensionContext) => {
264+
const acceptTextEditDisposable = vscode.commands.registerCommand('extension.acceptTextEdit', async () => {
265+
await this.app.textEditor.acceptSuggestion();
266+
});
267+
context.subscriptions.push(acceptTextEditDisposable);
268+
}
269+
270+
registerCommandRejectTextEdit = (context: vscode.ExtensionContext) => {
271+
context.subscriptions.push(
272+
vscode.commands.registerCommand('extension.rejectTextEdit', () => {
273+
this.app.textEditor.rejectSuggestion();
274+
})
275+
);
276+
}
252277
}

src/chat-with-ai.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import {Application} from "./application";
22
import * as vscode from 'vscode';
3+
import {Utils} from "./utils";
34

45
export class ChatWithAi {
56
private app: Application
@@ -24,7 +25,7 @@ export class ChatWithAi {
2425
webviewIdentifier = 'htmlChatWithAiWithContextViewer'
2526
let chunksToSend = this.app.extraContext.chunks.filter((_, index) => !this.sentContextChunks.includes(this.app.extraContext.chunksHash[index]));
2627
let chunksToSendHash = this.app.extraContext.chunksHash.filter((item) => !this.sentContextChunks.includes(item));
27-
if (chunksToSend.length > 0) extraCont = "Here are pieces of code from different files of the project: \n" + chunksToSend.reduce((accumulator, currentValue) => accumulator + "\nFile Name: " + currentValue.filename + "\nText:\n" + currentValue.text + "\n\n" , "");
28+
if (chunksToSend.length > 0) extraCont = Utils.getChunksInPlainText(chunksToSend);
2829
this.sentContextChunks.push(...chunksToSendHash)
2930
panelTitle = this.app.extConfig.getUiText("Chat with AI with project context")??""
3031
}
@@ -149,4 +150,6 @@ export class ChatWithAi {
149150
`;
150151
}
151152

153+
154+
152155
}

src/extension.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@ export function activate(context: vscode.ExtensionContext) {
1919
app.architect.registerCommandAcceptFirstLine(context);
2020
app.architect.registerCommandAcceptFirstWord(context);
2121
app.architect.registerCommandShowMenu(context);
22+
app.architect.registerCommandEditSelectedText(context);
23+
app.architect.registerCommandAcceptTextEdit(context);
24+
app.architect.registerCommandRejectTextEdit(context);
2225
}
2326

2427
export function deactivate() {

src/llama-server.ts

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import axios from "axios";
22
import {Application} from "./application";
33
import vscode, { Terminal } from "vscode";
4+
import {Utils} from "./utils";
45

56
const STATUS_OK = 200;
67

@@ -19,6 +20,11 @@ export interface LlamaResponse {
1920
};
2021
}
2122

23+
export interface LlamaChatResponse {
24+
choices: [{message:{content?: string}}];
25+
}
26+
27+
2228
export class LlamaServer {
2329
private app: Application
2430
private vsCodeFimTerminal: Terminal | undefined;
@@ -115,6 +121,73 @@ export class LlamaServer {
115121
};
116122
}
117123

124+
private createChatRequestPayload(noPredict: boolean, instructions: string, originalText: string, chunks: any[], context: string, nindent?: number) {
125+
const CHUNKS_PLACEHOLDER = "[chunks]";
126+
const INSTRUCTIONS_PLACEHOLDER = "[instructions]";
127+
const ORIGINAL_TEXT_PLACEHOLDER = "[originalText]";
128+
const CONTEXT_PLACEHOLDER = "[context]";
129+
let editTextTemplate = `${CHUNKS_PLACEHOLDER}\n\nModify the following original code according to the instructions. Output only the modified code. No explanations.\n\ninstructions:\n${INSTRUCTIONS_PLACEHOLDER}\n\noriginal code:\n${ORIGINAL_TEXT_PLACEHOLDER}\n\nmodified code:`
130+
if (noPredict) {
131+
return {
132+
// input_extra: chunks,
133+
"messages": [
134+
{
135+
"role": "system",
136+
"content": "You are an expert coder."
137+
},
138+
{
139+
"role": "user",
140+
"content": Utils.getChunksInPlainText(chunks)
141+
}
142+
],
143+
n_predict: 0,
144+
samplers: [],
145+
cache_prompt: true,
146+
t_max_prompt_ms: this.app.extConfig.t_max_prompt_ms,
147+
t_max_predict_ms: 1,
148+
...(this.app.extConfig.lora_completion.trim() != "" && { lora: [{ id: 0, scale: 0.5 }] })
149+
};
150+
}
151+
152+
return {
153+
"messages": [
154+
{
155+
"role": "system",
156+
"content": "You are an expert coder."
157+
},
158+
{
159+
"role": "user",
160+
"content": editTextTemplate.replace(CHUNKS_PLACEHOLDER, Utils.getChunksInPlainText(chunks))
161+
.replace(INSTRUCTIONS_PLACEHOLDER, instructions).replace(ORIGINAL_TEXT_PLACEHOLDER, originalText)
162+
.replace(CONTEXT_PLACEHOLDER, context)
163+
}
164+
],
165+
"stream": false,
166+
"cache_prompt": true,
167+
"samplers": "edkypmxt",
168+
"temperature": 0.8,
169+
"dynatemp_range": 0,
170+
"dynatemp_exponent": 1,
171+
"top_k": 40,
172+
"top_p": 0.95,
173+
"min_p": 0.05,
174+
"typical_p": 1,
175+
"xtc_probability": 0,
176+
"xtc_threshold": 0.1,
177+
"repeat_last_n": 64,
178+
"repeat_penalty": 1,
179+
"presence_penalty": 0,
180+
"frequency_penalty": 0,
181+
"dry_multiplier": 0,
182+
"dry_base": 1.75,
183+
"dry_allowed_length": 2,
184+
"dry_penalty_last_n": -1,
185+
"max_tokens": -1,
186+
"timings_per_token": false,
187+
...(this.app.extConfig.lora_chat.trim() != "" && { lora: [{ id: 0, scale: 0.5 }] })
188+
};
189+
}
190+
118191

119192
getFIMCompletion = async (
120193
inputPrefix: string,
@@ -139,6 +212,22 @@ export class LlamaServer {
139212
return response.status === STATUS_OK ? response.data : undefined;
140213
};
141214

215+
getChatCompletion = async (
216+
instructions: string,
217+
originalText: string,
218+
context: string,
219+
chunks: any,
220+
nindent: number
221+
): Promise<LlamaChatResponse | undefined> => {
222+
const response = await axios.post<LlamaChatResponse>(
223+
`${this.app.extConfig.endpoint_chat}/v1/chat/completions`,
224+
this.createChatRequestPayload(false, instructions, originalText, chunks, context, nindent),
225+
this.app.extConfig.axiosRequestConfig
226+
);
227+
228+
return response.status === STATUS_OK ? response.data : undefined;
229+
};
230+
142231
updateExtraContext = (chunks: any[]): void => {
143232
// If the server is OpenAI compatible, use the OpenAI API to prepare for the next FIM
144233
if (this.app.extConfig.use_openai_endpoint) {
@@ -151,6 +240,13 @@ export class LlamaServer {
151240
this.createRequestPayload(true, "", "", chunks, "", undefined),
152241
this.app.extConfig.axiosRequestConfig
153242
);
243+
244+
// make a request to the API to prepare for the next chat request
245+
axios.post<LlamaResponse>(
246+
`${this.app.extConfig.endpoint_chat}/v1/chat/completions`,
247+
this.createChatRequestPayload(true, "", "", chunks, "", undefined),
248+
this.app.extConfig.axiosRequestConfig
249+
);
154250
};
155251

156252
shellFimCmd = (launchCmd: string): void => {

0 commit comments

Comments
 (0)