1
1
import ollama , { ListResponse } from "ollama" ;
2
2
import * as vscode from "vscode" ;
3
3
import Statement from "../../database/statement" ;
4
- import { AiConfig , AiProvider } from "../aiConfig" ;
5
4
import {
6
5
canTalkToDb ,
7
6
findPossibleTables ,
8
- getDefaultSchema ,
7
+ getCurrentSchema ,
9
8
getSystemStatus ,
10
- refsToMarkdown ,
11
9
} from "../context" ;
12
- import { chatRequest } from "./send" ;
13
10
import { JobManager } from "../../config" ;
14
11
15
12
const CHAT_ID = `vscode-db2i.chat` ;
16
- let usingSchema = getDefaultSchema ( ) ;
17
13
18
14
interface IDB2ChatResult extends vscode . ChatResult {
19
15
metadata : {
20
16
command : string ;
21
17
} ;
22
18
}
23
19
24
- interface ModelQuickPickItem extends vscode . QuickPickItem {
25
- provider : AiProvider ;
26
- family : string ;
27
- }
28
-
29
20
export function activateChat ( context : vscode . ExtensionContext ) {
30
21
// chatHandler deals with the input from the chat windows,
31
22
// and uses streamModelResponse to send the response back to the chat window
@@ -35,9 +26,11 @@ export function activateChat(context: vscode.ExtensionContext) {
35
26
stream : vscode . ChatResponseStream ,
36
27
token : vscode . CancellationToken
37
28
) : Promise < IDB2ChatResult > => {
29
+ const copilotFamily = request . model . family ;
38
30
let messages : vscode . LanguageModelChatMessage [ ] ;
39
31
40
32
if ( canTalkToDb ( ) ) {
33
+ let usingSchema = getCurrentSchema ( ) ;
41
34
42
35
switch ( request . command ) {
43
36
case `activity` :
@@ -56,7 +49,7 @@ export function activateChat(context: vscode.ExtensionContext) {
56
49
vscode . LanguageModelChatMessage . User ( request . prompt ) ,
57
50
] ;
58
51
59
- await streamModelResponse ( messages , stream , token ) ;
52
+ await copilotRequest ( copilotFamily , messages , { } , token , stream ) ;
60
53
61
54
return { metadata : { command : "activity" } } ;
62
55
@@ -125,7 +118,7 @@ export function activateChat(context: vscode.ExtensionContext) {
125
118
126
119
messages . push ( vscode . LanguageModelChatMessage . User ( request . prompt ) )
127
120
128
- await streamModelResponse ( messages , stream , token ) ;
121
+ await copilotRequest ( request . model . family , messages , { } , token , stream ) ;
129
122
130
123
return { metadata : { command : "build" } } ;
131
124
}
@@ -139,98 +132,23 @@ export function activateChat(context: vscode.ExtensionContext) {
139
132
const chat = vscode . chat . createChatParticipant ( CHAT_ID , chatHandler ) ;
140
133
chat . iconPath = new vscode . ThemeIcon ( `database` ) ;
141
134
142
- const changeModelCommand = vscode . commands . registerCommand (
143
- `vscode-db2i.ai.changeModel` ,
144
- selectProviderAndModel
145
- ) ;
146
-
147
- context . subscriptions . push ( chat , changeModelCommand ) ;
148
- }
149
-
150
- let lastSelectedModel : string | null = null ;
151
-
152
- async function showModelProviderIfNeeded (
153
- stream : vscode . ChatResponseStream ,
154
- chosenProvider : AiProvider ,
155
- chosenModel : string
156
- ) {
157
- const currentModel = AiConfig . getModel ( ) ;
158
-
159
- if ( lastSelectedModel === null || lastSelectedModel !== currentModel ) {
160
- stream . markdown (
161
- `**Provider👨💻:** ${ chosenProvider } \n\n**Model🧠:** ${ chosenModel } \n\n***\n\n`
162
- ) ;
163
- lastSelectedModel = currentModel ;
164
- }
135
+ context . subscriptions . push ( chat ) ;
165
136
}
166
137
167
- async function streamModelResponse (
138
+ async function copilotRequest (
139
+ model : string ,
168
140
messages : vscode . LanguageModelChatMessage [ ] ,
169
- stream : vscode . ChatResponseStream ,
170
- token : vscode . CancellationToken
171
- ) {
172
- const chosenProvider = AiConfig . getProvider ( ) ;
173
- const chosenModel = AiConfig . getModel ( ) ;
174
-
175
- if ( chosenProvider === `none` ) {
176
- stream . markdown (
177
- `No AI provider selected. Please select an AI provider and model.`
178
- ) ;
179
- stream . button ( {
180
- command : `vscode-db2i.ai.changeModel` ,
181
- title : `Select AI Provider and Model` ,
182
- } ) ;
183
- return ;
184
- }
185
-
186
- showModelProviderIfNeeded ( stream , chosenProvider , chosenModel ) ;
187
- stream . progress ( `Provider: ${ chosenProvider } Model: ${ chosenModel } ` ) ;
188
-
189
- return chatRequest ( chosenProvider , messages , { } , token , stream ) ;
190
- }
191
-
192
- async function selectProviderAndModel ( ) {
193
- const selected = AiConfig . getModel ( ) ;
194
- const copilotModels = await vscode . lm . selectChatModels ( ) ;
195
- let ollamaModels : ListResponse = { models : [ ] } ;
196
-
197
- // try {
198
- // ollamaModels = await ollama.list();
199
- // } catch (e) {}
200
-
201
- const provider = await vscode . window . showQuickPick (
202
- [
203
- { kind : vscode . QuickPickItemKind . Separator , label : "Ollama Models" } ,
204
- ...ollamaModels . models . map (
205
- ( model ) : ModelQuickPickItem => ( {
206
- label : model . name ,
207
- family : model . name ,
208
- provider : "Ollama" ,
209
- iconPath : new vscode . ThemeIcon ( "heart" ) ,
210
- description : selected === model . name ? "Selected" : "" ,
211
- } )
212
- ) ,
213
- {
214
- kind : vscode . QuickPickItemKind . Separator ,
215
- label : "GitHub Copilot Models" ,
216
- } ,
217
- ...copilotModels . map (
218
- ( model ) : ModelQuickPickItem => ( {
219
- label : model . name ,
220
- family : model . family ,
221
- provider : "GitHub Copilot" ,
222
- iconPath : new vscode . ThemeIcon ( "copilot" ) ,
223
- description : selected === model . family ? "Selected" : "" ,
224
- } )
225
- ) ,
226
- ] ,
227
- {
228
- title : "Select the AI model" ,
141
+ options : vscode . LanguageModelChatRequestOptions ,
142
+ token : vscode . CancellationToken ,
143
+ stream : vscode . ChatResponseStream
144
+ ) : Promise < void > {
145
+ const models = await vscode . lm . selectChatModels ( { family : model } ) ;
146
+ if ( models . length > 0 ) {
147
+ const [ first ] = models ;
148
+ const response = await first . sendRequest ( messages , options , token ) ;
149
+
150
+ for await ( const fragment of response . text ) {
151
+ stream . markdown ( fragment ) ;
229
152
}
230
- ) ;
231
-
232
- if ( provider && "provider" in provider && "family" in provider ) {
233
- AiConfig . setProvider ( provider . provider ) ;
234
- AiConfig . setModel ( provider . family ) ;
235
153
}
236
- }
154
+ }
0 commit comments