Skip to content

Commit 8fd7d49

Browse files
committed
Pass stream all the way down
1 parent b8cf527 commit 8fd7d49

File tree

2 files changed

+25
-36
lines changed

2 files changed

+25
-36
lines changed

src/chat/chat.ts

Lines changed: 8 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -138,26 +138,13 @@ async function streamModelResponse(
138138
return;
139139
}
140140

141-
try {
142-
stream.progress(`Using ${chosenProvider}...`);
143-
144-
const chatResponse = await chatRequest(chosenProvider, messages, {}, token);
141+
stream.progress(`Using ${chosenProvider}...`);
145142

146-
for await (const fragement of chatResponse.text) {
147-
stream.markdown(fragement);
148-
}
149-
} catch (err) {
150-
if (err instanceof vscode.LanguageModelError) {
151-
console.log(err.message, err.code, err.stack);
152-
} else {
153-
console.log(err);
154-
}
155-
156-
stream.markdown(`Failed to get a response from ${chosenProvider}.`);
157-
}
143+
await chatRequest(chosenProvider, messages, {}, token, stream);
158144
}
159145

160146
async function selectProviderAndModel() {
147+
const selected = AiConfig.getModel();
161148
const copilotModels = await vscode.lm.selectChatModels();
162149
let ollamaModels: ListResponse = {models: []};
163150

@@ -167,15 +154,17 @@ async function selectProviderAndModel() {
167154

168155
const provider = await vscode.window.showQuickPick(
169156
[
170-
...ollamaModels.models.map((model): ModelQuickPickItem => ({ label: model.name, family: model.name, provider: "Ollama", iconPath: new vscode.ThemeIcon("heart") })),
171-
...copilotModels.map((model): ModelQuickPickItem => ({ label: model.name, family: model.family, provider: "GitHub Copilot", iconPath: new vscode.ThemeIcon("copilot") })),
157+
{kind: vscode.QuickPickItemKind.Separator, label: "Ollama Models"},
158+
...ollamaModels.models.map((model): ModelQuickPickItem => ({ label: model.name, family: model.name, provider: "Ollama", iconPath: new vscode.ThemeIcon("heart"), picked: model.name === selected})),
159+
{kind: vscode.QuickPickItemKind.Separator, label: "GitHub Copilot Models"},
160+
...copilotModels.map((model): ModelQuickPickItem => ({ label: model.name, family: model.family, provider: "GitHub Copilot", iconPath: new vscode.ThemeIcon("copilot")})),
172161
],
173162
{
174163
title: "Select the AI model",
175164
}
176165
);
177166

178-
if (provider) {
167+
if (provider && 'provider' in provider && 'family' in provider) {
179168
AiConfig.setProvider(provider.provider);
180169
AiConfig.setModel(provider.family);
181170
}

src/chat/send.ts

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -9,40 +9,46 @@ import {
99
import Configuration from "../configuration";
1010
import { AiConfig, AiProvider } from "./aiConfig";
1111

12-
export async function chatRequest(
12+
export function chatRequest(
1313
provider: AiProvider,
1414
messages: LanguageModelChatMessage[],
1515
options: LanguageModelChatRequestOptions,
16-
token?: CancellationToken
17-
): Promise<Thenable<LanguageModelChatResponse>> {
16+
token: CancellationToken,
17+
stream: vscode.ChatResponseStream
18+
): Promise<void> {
1819
const chosenModel = AiConfig.getModel();
1920

2021
switch (provider) {
2122
case "Ollama":
22-
return ollamaRequest(chosenModel, messages);
23+
return ollamaRequest(chosenModel, messages, stream);
2324
case "GitHub Copilot":
24-
return copilotRequest(chosenModel, messages, options, token);
25+
return copilotRequest(chosenModel, messages, options, token, stream);
2526
}
2627
}
2728

2829
async function copilotRequest(
2930
model: string,
3031
messages: LanguageModelChatMessage[],
3132
options: LanguageModelChatRequestOptions,
32-
token?: CancellationToken
33-
): Promise<LanguageModelChatResponse> {
33+
token: CancellationToken,
34+
stream: vscode.ChatResponseStream
35+
): Promise<void> {
3436
const models = await vscode.lm.selectChatModels({ family: model });
3537
if (models.length > 0) {
3638
const [first] = models;
3739
const response = await first.sendRequest(messages, options, token);
38-
return response;
40+
41+
for await (const fragment of response.text) {
42+
stream.markdown(fragment);
43+
}
3944
}
4045
}
4146

4247
async function ollamaRequest(
4348
model: string,
44-
messages: LanguageModelChatMessage[]
45-
): Promise<LanguageModelChatResponse> {
49+
messages: LanguageModelChatMessage[],
50+
stream: vscode.ChatResponseStream
51+
): Promise<void> {
4652
const chats = [];
4753
for (const message of messages) {
4854
chats.push({
@@ -58,11 +64,5 @@ async function ollamaRequest(
5864

5965
console.log(response.message.content);
6066

61-
return {
62-
text: {
63-
[Symbol.asyncIterator]: async function* () {
64-
yield response.message.content;
65-
},
66-
},
67-
};
67+
stream.markdown(response.message.content);
6868
}

0 commit comments

Comments
 (0)