Skip to content

Commit f61889f

Browse files
committed
Enhance LLM integration by adding model specification and refactoring code completion logic; update README with upcoming features
1 parent 741f071 commit f61889f

File tree

9 files changed

+64
-50
lines changed

9 files changed

+64
-50
lines changed

README.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,12 @@ vscode.commands.registerCommand(name, fun).asInstanceOf[Dispose]
176176

177177
You can find more information and tutorials on the [Scala.js website](https://www.scala-js.org/).
178178

179+
# feedback
180+
features to be implemented:
181+
- refactoring
182+
- specify which LLM to use
183+
184+
179185
# references:
180186
- updated from [vscode-scalajs-hello](https://github.com/pme123/vscode-scalajs-hello) with Scala 3.3.3 and sbt.version=1.9.7.
181187
- [VSCode Extension Samples](https://github.com/microsoft/vscode-extension-samples) repository.
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1 +1,32 @@
11
package functorcoder.actions
2+
3+
import functorcoder.llm.llmMain.llmAgent
4+
import functorcoder.llm.llmPrompt
5+
import scala.concurrent.Future
6+
7+
object CodeCompletion {
8+
9+
/** Generates a code completion suggestion by sending a prompt to a language model.
10+
*
11+
* @param codeBefore
12+
* The code snippet preceding the hole where completion is required.
13+
* @param codeAfter
14+
* The code snippet following the hole where completion is required.
15+
* @param llm
16+
* The language model agent used to generate the completion.
17+
* @return
18+
* A `Future` containing the generated code completion as a `String`.
19+
*/
20+
def getCompletion(
21+
codeBefore: String, // code before the hole
22+
codeAfter: String, // code after the hole
23+
llm: llmAgent
24+
): Future[String] = {
25+
26+
val prompt = llmPrompt
27+
.Completion(codeWithHole = s"$codeBefore${llmPrompt.promptText.hole}$codeAfter")
28+
29+
// assistantMessage: String = promptText.prompt1
30+
llm.sendPrompt(prompt)
31+
}
32+
}

src/main/scala/functorcoder/actions/Commands.scala

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@ object Commands {
4646
statusBar.showSpininngStatusBarItem("functorcoder", llmResponse)
4747

4848
llmResponse.foreach { response =>
49-
// showMessageAndLog("add doc: " + s"${param.documentUri}, ${param.range}, ${response}")
5049
// apply the changes to the document
5150
vscode.window.activeTextEditor.toOption match {
5251
case None =>
@@ -132,6 +131,5 @@ object Commands {
132131
}
133132
)
134133
}
135-
136134
}
137135
}

src/main/scala/functorcoder/actions/createFiles.scala

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ package functorcoder.actions
33
import com.doofin.stdScala.dataTypes.Tree.TreeNode
44
import functorcoder.algo.treeParse
55
import vscextension.facade.vscodeUtils.showMessageAndLog
6+
import pprint.PPrinter.BlackWhite
67

78
/** create files and folders according to the prompt
89
*/
@@ -49,13 +50,13 @@ object createFiles {
4950
*/
5051
def createFilesAndFolders(tree: TreeNode[String], parentPath0: String): Unit = {
5152
// recursively create files and folders
52-
showMessageAndLog(s"Files and folders tree: $tree")
53+
val treeStr = BlackWhite.tokenize(tree).map(_.render).mkString("\n")
54+
showMessageAndLog(s"Files and folders tree: $treeStr")
5355
val TreeNode(root, children) = tree
5456
val parentPath: String = parentPath0 + "/" + root
57+
showMessageAndLog(s"Creating file in $parentPath, file: $root")
5558

56-
children.foreach { child =>
57-
val file = child.value
58-
showMessageAndLog(s"Creating file in $parentPath, file: $file")
59+
children.toSeq.foreach { child =>
5960
createFilesAndFolders(child, parentPath)
6061
}
6162
}

src/main/scala/functorcoder/editorUI/editorConfig.scala

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,11 @@ package functorcoder.editorUI
22

33
// https://code.visualstudio.com/api/references/contribution-points#contributes.configuration
44
object editorConfig {
5-
case class Config(openaiApiKey: String, openaiUrl: String, maxTokens: Int)
5+
case class Config(
6+
openaiApiKey: String, //
7+
openaiUrl: String,
8+
maxTokens: Int,
9+
model: String = "gpt-4o-mini"
10+
)
611

712
}

src/main/scala/functorcoder/llm/llmMain.scala

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,11 @@ import scala.concurrent.Future
1313

1414
import functorcoder.editorUI.editorConfig
1515

16-
/** large language model (LLM) AI main
17-
*
18-
* use node-fetch for network requests
16+
/** large language model (LLM) main entry
1917
*/
2018
object llmMain {
2119

22-
/** generate a completion prompt
20+
/** prompt data to string
2321
*
2422
* change the model here if needed
2523
*
@@ -37,7 +35,7 @@ object llmMain {
3735
openaiReq.Message(roles.user, inputPrompt.generatePrompt),
3836
openaiReq.Message(roles.system, inputPrompt.getAssistantMessage)
3937
),
40-
openaiReq.models.gpt4oMini,
38+
editorCfg.model,
4139
max_tokens = Some(editorCfg.maxTokens)
4240
)
4341

src/main/scala/functorcoder/llm/llmPrompt.scala

Lines changed: 6 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ object llmPrompt {
5151
case class Completion(
5252
codeWithHole: String, // code with a hole to fill like {{FILL_HERE}}
5353
// taskRequirement: String, // like "Fill the {{FILL_HERE}} hole."
54-
assistantMessage: String = promptText.prompt1
54+
assistantMessage: String = promptText.promptComp1
5555
) extends Prompt(assistantMessage) {
5656
def generatePrompt = {
5757

@@ -87,14 +87,14 @@ object llmPrompt {
8787
case class CreateFiles(
8888
userRequest: String,
8989
assistantMessage: String =
90-
s"You are given a user requirement wrapped in ${tagsInUse.queryStart} and ${tagsInUse.queryEnd}, and a TASK requirement ${tagsInUse.task}. " +
91-
"You are going to return the code snippet according to the TASK requirement. "
90+
s"an input is wrapped in ${tagsInUse.queryStart} and ${tagsInUse.queryEnd}, and the requirement is inside ${tagsInUse.task}. " +
91+
"from input and requirement, You return the code snippet"
9292
) extends Prompt(assistantMessage) {
9393
def generatePrompt = {
9494
import functorcoder.algo.treeParse
9595

9696
val task =
97-
s"parse the prompt response to tree of files and folders in the format: ${treeParse.exampleSyntax}. An example input is: ${treeParse.exampleInput}. return the tree data structure in that format."
97+
s" return tree of files and folders in the format: ${treeParse.exampleSyntax}. An example input is: ${treeParse.exampleInput}. return the tree data structure in that format."
9898

9999
s"""${tagsInUse.queryStart}
100100
|${userRequest}
@@ -110,12 +110,12 @@ object llmPrompt {
110110
*/
111111
object promptText {
112112
val hole = "{{FILL_HERE}}"
113-
val prompt1 =
113+
val promptComp1 =
114114
"You are a code or text autocompletion assistant. " +
115115
s"In the provided input, missing code or text are marked as $hole. " +
116116
"Your task is to output only the snippet that replace the placeholder, " +
117117
"ensuring that indentation and formatting remain consistent with the context. Don't quote your output"
118-
val prompt2 =
118+
val promptComp2 =
119119
"You are a hole filler," +
120120
"You are given a string with a hole: " +
121121
s"$hole in the string, " +
@@ -144,27 +144,4 @@ function sum_evens(lim) {
144144
</QUERY>
145145
146146
TASK: Fill the {{FILL_HERE}} hole.
147-
148-
## CORRECT COMPLETION
149-
150-
<COMPLETION>if (i % 2 === 0) {
151-
sum += i;
152-
}</COMPLETION>
153-
154-
## EXAMPLE QUERY:
155-
156-
<QUERY>
157-
def sum_list(lst):
158-
total = 0
159-
for x in lst:
160-
{{FILL_HERE}}
161-
return total
162-
163-
print sum_list([1, 2, 3])
164-
</QUERY>
165-
166-
## CORRECT COMPLETION:
167-
168-
<COMPLETION> total += x</COMPLETION>
169-
170147
*/

src/main/scala/vscextension/inlineCompletions.scala

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,10 @@ import scala.concurrent.ExecutionContext.Implicits.global
55

66
import scala.scalajs.js
77
import scala.scalajs.js.JSConverters.*
8-
98
import scala.scalajs.js.Promise
10-
import functorcoder.llm.llmPrompt
9+
1110
import functorcoder.llm.llmMain.llmAgent
11+
import functorcoder.actions.CodeCompletion
1212
import vscextension.facade.vscodeUtils.showMessageAndLog
1313

1414
/** demonstrates how to provide inline completions in the editor. like the github copilot
@@ -29,11 +29,7 @@ object inlineCompletions {
2929
val codeBefore = document.getText(new vscode.Range(new vscode.Position(0, 0), position))
3030
val codeAfter = document.getText(new vscode.Range(position, document.positionAt(document.getText().length)))
3131

32-
val prompt = llmPrompt
33-
.Completion(codeWithHole = s"$codeBefore${llmPrompt.promptText.hole}$codeAfter")
34-
35-
// assistantMessage: String = promptText.prompt1
36-
val promptResponseF = llm.sendPrompt(prompt)
32+
val promptResponseF = CodeCompletion.getCompletion(codeBefore, codeAfter, llm)
3733

3834
val providerResultF: Promise[scala.scalajs.js.Array[vscode.InlineCompletionItem]] =
3935
promptResponseF.map { completionText =>
@@ -46,7 +42,8 @@ object inlineCompletions {
4642
)
4743
}.toJSPromise
4844

49-
statusBar.showSpininngStatusBarItem("functorcoder", providerResultF)
45+
statusBar.showSpininngStatusBarItem(s"functorcoder(${editorAPI.getLanguage()})", providerResultF)
46+
5047
providerResultF.asInstanceOf[typings.vscode.mod.ProviderResult[
5148
scala.scalajs.js.Array[typings.vscode.mod.InlineCompletionItem] | typings.vscode.mod.InlineCompletionList
5249
]]

src/main/scala/vscextension/settings.scala

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,8 @@ object settings {
1818
config.getStringOrEmpty(key = "openaiUrl", default = "https://api.openai.com/v1/chat/completions")
1919

2020
val maxTokens = config.get[Int]("maxTokens").getOrElse(1000)
21-
Config(openaiApiKey, openaiUrl, maxTokens)
21+
val model = config.getStringOrEmpty("model", default = "gpt-4o-mini")
22+
Config(openaiApiKey, openaiUrl, maxTokens, model)
2223
}
2324

2425
extension (config: vscode.WorkspaceConfiguration) {

0 commit comments

Comments
 (0)