@@ -9,40 +9,46 @@ import {
9
9
import Configuration from "../configuration" ;
10
10
import { AiConfig , AiProvider } from "./aiConfig" ;
11
11
12
- export async function chatRequest (
12
+ export function chatRequest (
13
13
provider : AiProvider ,
14
14
messages : LanguageModelChatMessage [ ] ,
15
15
options : LanguageModelChatRequestOptions ,
16
- token ?: CancellationToken
17
- ) : Promise < Thenable < LanguageModelChatResponse > > {
16
+ token : CancellationToken ,
17
+ stream : vscode . ChatResponseStream
18
+ ) : Promise < void > {
18
19
const chosenModel = AiConfig . getModel ( ) ;
19
20
20
21
switch ( provider ) {
21
22
case "Ollama" :
22
- return ollamaRequest ( chosenModel , messages ) ;
23
+ return ollamaRequest ( chosenModel , messages , stream ) ;
23
24
case "GitHub Copilot" :
24
- return copilotRequest ( chosenModel , messages , options , token ) ;
25
+ return copilotRequest ( chosenModel , messages , options , token , stream ) ;
25
26
}
26
27
}
27
28
28
29
async function copilotRequest (
29
30
model : string ,
30
31
messages : LanguageModelChatMessage [ ] ,
31
32
options : LanguageModelChatRequestOptions ,
32
- token ?: CancellationToken
33
- ) : Promise < LanguageModelChatResponse > {
33
+ token : CancellationToken ,
34
+ stream : vscode . ChatResponseStream
35
+ ) : Promise < void > {
34
36
const models = await vscode . lm . selectChatModels ( { family : model } ) ;
35
37
if ( models . length > 0 ) {
36
38
const [ first ] = models ;
37
39
const response = await first . sendRequest ( messages , options , token ) ;
38
- return response ;
40
+
41
+ for await ( const fragment of response . text ) {
42
+ stream . markdown ( fragment ) ;
43
+ }
39
44
}
40
45
}
41
46
42
47
async function ollamaRequest (
43
48
model : string ,
44
- messages : LanguageModelChatMessage [ ]
45
- ) : Promise < LanguageModelChatResponse > {
49
+ messages : LanguageModelChatMessage [ ] ,
50
+ stream : vscode . ChatResponseStream
51
+ ) : Promise < void > {
46
52
const chats = [ ] ;
47
53
for ( const message of messages ) {
48
54
chats . push ( {
@@ -58,11 +64,5 @@ async function ollamaRequest(
58
64
59
65
console . log ( response . message . content ) ;
60
66
61
- return {
62
- text : {
63
- [ Symbol . asyncIterator ] : async function * ( ) {
64
- yield response . message . content ;
65
- } ,
66
- } ,
67
- } ;
67
+ stream . markdown ( response . message . content ) ;
68
68
}
0 commit comments