Skip to content

Commit d05e242

Browse files
committed
Ability to select model
1 parent 6a058cf commit d05e242

File tree

5 files changed

+146
-101
lines changed

5 files changed

+146
-101
lines changed

package.json

Lines changed: 9 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -156,8 +156,9 @@
156156
"order": 0,
157157
"type": "string",
158158
"description": "Model Provider",
159-
"default": "Ollama",
159+
"default": "none",
160160
"enum": [
161+
"none",
161162
"Ollama",
162163
"GitHub Copilot"
163164
],
@@ -166,27 +167,11 @@
166167
"GitHub Copilot. Requires the GitHub Copilot extension to be installed"
167168
]
168169
},
169-
"vscode-db2i.ai.ollama.model": {
170+
"vscode-db2i.ai.model": {
170171
"order": 1,
171-
"if": "vscode-db2i.ai.provider == 'Ollama'",
172172
"type": "string",
173-
"description": "Model to use inside of Ollama",
173+
"description": "Model to use with the provider",
174174
"default": "ibm-granite"
175-
},
176-
"vscode-db2i.ai.ghCopilot.model": {
177-
"order": 2,
178-
"if": "vscode-db2i.ai.provider == 'GitHub Copilot'",
179-
"type": "string",
180-
"description": "Model to use inside of GitHub Copilot",
181-
"default": "gpt-4",
182-
"enum": [
183-
"gpt-4",
184-
"gpt-3.5-turbo"
185-
],
186-
"enumDescriptions": [
187-
"Copilot GPT-4. Requires GitHub Copilot",
188-
"Copilot GPT-3.5 Turbo. Requires GitHub Copilot"
189-
]
190175
}
191176
}
192177
},
@@ -396,6 +381,11 @@
396381
}
397382
],
398383
"commands": [
384+
{
385+
"command": "vscode-db2i.ai.changeModel",
386+
"title": "Change AI Model",
387+
"category": "Db2 for i"
388+
},
399389
{
400390
"command": "vscode-db2i.notebook.open",
401391
"title": "New Notebook",

src/chat/aiConfig.ts

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,24 @@
11
import Configuration from "../configuration";
22

3-
export type AiProvider = "Ollama"|"GitHub Copilot";
3+
/**
4+
* Matches config vscode-db2i.ai.provider
5+
*/
6+
export type AiProvider = "none"|"Ollama"|"GitHub Copilot";
47

58
export class AiConfig {
69
static getProvider(): AiProvider {
710
return Configuration.get<AiProvider>(`ai.provider`);
811
}
912

10-
static getModel(provider: AiProvider): string {
11-
switch (provider) {
12-
case "Ollama":
13-
return Configuration.get<string>("ai.ollama.model");
14-
case "GitHub Copilot":
15-
return Configuration.get<string>("ai.ghCopilot.model");
16-
}
13+
static getModel(): string {
14+
return Configuration.get<string>("ai.model");
15+
}
16+
17+
static setProvider(provider: AiProvider) {
18+
return Configuration.set(`ai.provider`, provider);
19+
}
20+
21+
static setModel(model: string) {
22+
return Configuration.set(`ai.model`, model);
1723
}
1824
}

src/chat/chat.ts

Lines changed: 117 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,9 @@ import { JobManager } from "../config";
33
import Statement from "../database/statement";
44
import { chatRequest } from "./send";
55
import Configuration from "../configuration";
6-
import { getDefaultSchema, findPossibleTables, refsToMarkdown, getSystemStatus } from "./context";
7-
import { AiConfig } from "./aiConfig";
6+
import { getDefaultSchema, findPossibleTables, refsToMarkdown, getSystemStatus, canTalkToDb } from "./context";
7+
import { AiConfig, AiProvider } from "./aiConfig";
8+
import ollama, { ListResponse } from "ollama";
89

910
const CHAT_ID = `vscode-db2i.chat`;
1011

@@ -14,96 +15,111 @@ interface IDB2ChatResult extends vscode.ChatResult {
1415
};
1516
}
1617

18+
interface ModelQuickPickItem extends vscode.QuickPickItem {
19+
provider: AiProvider;
20+
family: string;
21+
}
22+
1723
export function activateChat(context: vscode.ExtensionContext) {
1824
// chatHandler deals with the input from the chat windows,
1925
// and uses streamModelResponse to send the response back to the chat window
20-
const chatHandler: vscode.ChatRequestHandler =async (
26+
const chatHandler: vscode.ChatRequestHandler = async (
2127
request: vscode.ChatRequest,
2228
context: vscode.ChatContext,
2329
stream: vscode.ChatResponseStream,
2430
token: vscode.CancellationToken
2531
): Promise<IDB2ChatResult> => {
2632
let messages: vscode.LanguageModelChatMessage[];
2733

28-
const usingSchema = getDefaultSchema();
29-
30-
switch (request.command) {
31-
case `activity`:
32-
stream.progress(`Grabbing Information about IBM i system`);
33-
const data = await getSystemStatus();
34-
console.log(
35-
`summarize the following data in a readable paragraph: ${data}`
36-
);
37-
messages = [
38-
vscode.LanguageModelChatMessage.User(
39-
`You are a an IBM i savant speciallizing in database features in Db2 for i. Please provide a summary of the current IBM i system state based on the developer requirement.`
40-
),
41-
vscode.LanguageModelChatMessage.User(
42-
`Here is the current IBM i state: ${data}`
43-
),
44-
vscode.LanguageModelChatMessage.User(request.prompt),
45-
];
46-
47-
await streamModelResponse(messages, stream, token);
48-
49-
return { metadata: { command: "activity" } };
50-
51-
default:
52-
context;
53-
stream.progress(
54-
`Getting information from ${Statement.prettyName(usingSchema)}...`
55-
);
56-
let refs = await findPossibleTables(
57-
usingSchema,
58-
request.prompt.split(` `)
59-
);
60-
61-
messages = [
62-
vscode.LanguageModelChatMessage.User(
63-
`You are a an IBM i savant speciallizing in database features in Db2 for i. Your job is to help developers write and debug their SQL along with offering SQL programming advice.`
64-
),
65-
];
66-
67-
if (Object.keys(refs).length === 0) {
68-
stream.progress(`No references found. Doing bigger lookup...`);
69-
refs = await findPossibleTables(usingSchema, []);
70-
}
71-
72-
if (Object.keys(refs).length > 0) {
73-
stream.progress(`Building response...`);
74-
messages.push(
34+
if (canTalkToDb()) {
35+
36+
const usingSchema = getDefaultSchema();
37+
38+
switch (request.command) {
39+
case `activity`:
40+
stream.progress(`Grabbing Information about IBM i system`);
41+
const data = await getSystemStatus();
42+
console.log(
43+
`summarize the following data in a readable paragraph: ${data}`
44+
);
45+
messages = [
7546
vscode.LanguageModelChatMessage.User(
76-
`Give the developer an SQL statement or information based on the prompt and following table references. Always include code examples where is makes sense. Do not make suggestions for reference you do not have.`
47+
`You are a an IBM i savant speciallizing in database features in Db2 for i. Please provide a summary of the current IBM i system state based on the developer requirement.`
7748
),
7849
vscode.LanguageModelChatMessage.User(
79-
`Here are the table references for current schema ${usingSchema}\n${refsToMarkdown(
80-
refs
81-
)}`
50+
`Here is the current IBM i state: ${data}`
8251
),
83-
vscode.LanguageModelChatMessage.User(request.prompt)
52+
vscode.LanguageModelChatMessage.User(request.prompt),
53+
];
54+
55+
await streamModelResponse(messages, stream, token);
56+
57+
return { metadata: { command: "activity" } };
58+
59+
default:
60+
context;
61+
stream.progress(
62+
`Getting information from ${Statement.prettyName(usingSchema)}...`
8463
);
85-
} else {
86-
stream.progress(`No references found.`);
87-
messages.push(
88-
vscode.LanguageModelChatMessage.User(
89-
`Warn the developer that their request is not clear or that no references were found. Provide a suggestion or ask for more information.`
90-
),
91-
vscode.LanguageModelChatMessage.User(
92-
`The developers current schema is ${usingSchema}.`
93-
)
64+
let refs = await findPossibleTables(
65+
usingSchema,
66+
request.prompt.split(` `)
9467
);
95-
}
96-
97-
await streamModelResponse(messages, stream, token);
9868

99-
return { metadata: { command: "build" } };
69+
messages = [
70+
vscode.LanguageModelChatMessage.User(
71+
`You are a an IBM i savant speciallizing in database features in Db2 for i. Your job is to help developers write and debug their SQL along with offering SQL programming advice.`
72+
),
73+
];
74+
75+
if (Object.keys(refs).length === 0) {
76+
stream.progress(`No references found. Doing bigger lookup...`);
77+
refs = await findPossibleTables(usingSchema, []);
78+
}
79+
80+
if (Object.keys(refs).length > 0) {
81+
stream.progress(`Building response...`);
82+
messages.push(
83+
vscode.LanguageModelChatMessage.User(
84+
`Give the developer an SQL statement or information based on the prompt and following table references. Always include code examples where is makes sense. Do not make suggestions for reference you do not have.`
85+
),
86+
vscode.LanguageModelChatMessage.User(
87+
`Here are the table references for current schema ${usingSchema}\n${refsToMarkdown(
88+
refs
89+
)}`
90+
),
91+
vscode.LanguageModelChatMessage.User(request.prompt)
92+
);
93+
} else {
94+
stream.progress(`No references found.`);
95+
messages.push(
96+
vscode.LanguageModelChatMessage.User(
97+
`Warn the developer that their request is not clear or that no references were found. Provide a suggestion or ask for more information.`
98+
),
99+
vscode.LanguageModelChatMessage.User(
100+
`The developers current schema is ${usingSchema}.`
101+
)
102+
);
103+
}
104+
105+
await streamModelResponse(messages, stream, token);
106+
107+
return { metadata: { command: "build" } };
108+
}
109+
} else {
110+
throw new Error(`Not connected to the database. Please check your configuration.`)
100111
}
101112
};
102113

103114
const chat = vscode.chat.createChatParticipant(CHAT_ID, chatHandler);
104115
chat.iconPath = new vscode.ThemeIcon(`database`);
105116

106-
context.subscriptions.push(chat);
117+
const changeModelCommand = vscode.commands.registerCommand(`vscode-db2i.ai.changeModel`, selectProviderAndModel);
118+
119+
context.subscriptions.push(
120+
chat,
121+
changeModelCommand
122+
);
107123
}
108124

109125
async function streamModelResponse(
@@ -113,6 +129,15 @@ async function streamModelResponse(
113129
) {
114130
const chosenProvider = AiConfig.getProvider();
115131

132+
if (chosenProvider === `none`) {
133+
stream.markdown(`No AI provider selected. Please select an AI provider and model.`);
134+
stream.button({
135+
command: `vscode-db2i.ai.changeModel`,
136+
title: `Select AI Provider and Model`,
137+
});
138+
return;
139+
}
140+
116141
try {
117142
stream.progress(`Using model ${chosenProvider} with Ollama...`);
118143

@@ -132,4 +157,26 @@ async function streamModelResponse(
132157
}
133158
}
134159

135-
export function deactivate() {}
160+
async function selectProviderAndModel() {
161+
const copilotModels = await vscode.lm.selectChatModels();
162+
let ollamaModels: ListResponse = {models: []};
163+
164+
try {
165+
ollamaModels = await ollama.list();
166+
} catch (e) {}
167+
168+
const provider = await vscode.window.showQuickPick(
169+
[
170+
...ollamaModels.models.map((model): ModelQuickPickItem => ({ label: model.name, family: model.name, provider: "Ollama", iconPath: new vscode.ThemeIcon("heart") })),
171+
...copilotModels.map((model): ModelQuickPickItem => ({ label: model.name, family: model.family, provider: "GitHub Copilot", iconPath: new vscode.ThemeIcon("copilot") })),
172+
],
173+
{
174+
title: "Select the AI model",
175+
}
176+
);
177+
178+
if (provider) {
179+
AiConfig.setProvider(provider.provider);
180+
AiConfig.setModel(provider.family);
181+
}
182+
}

src/chat/context.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,10 @@
11
import { JobManager } from "../config";
22
import Statement from "../database/statement";
33

4+
export function canTalkToDb() {
5+
return JobManager.getSelection() !== undefined;
6+
}
7+
48
export function getDefaultSchema(): string {
59
const currentJob = JobManager.getSelection();
610
return currentJob && currentJob.job.options.libraries[0]

src/chat/send.ts

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,16 +15,14 @@ export async function chatRequest(
1515
options: LanguageModelChatRequestOptions,
1616
token?: CancellationToken
1717
): Promise<Thenable<LanguageModelChatResponse>> {
18-
const chosenModel = AiConfig.getModel(provider);
18+
const chosenModel = AiConfig.getModel();
1919

20-
switch (chosenModel) {
20+
switch (provider) {
2121
case "Ollama":
2222
return ollamaRequest(chosenModel, messages);
2323
case "GitHub Copilot":
2424
return copilotRequest(chosenModel, messages, options, token);
2525
}
26-
27-
return ollamaRequest(chosenModel, messages);
2826
}
2927

3028
async function copilotRequest(

0 commit comments

Comments
 (0)