Skip to content

Commit 50cae9b

Browse files
authored
Merge pull request #367 from RooVetGit/vscode-lm-provider
Add VSCode-LM as a provider
2 parents c3c2fd9 + cff5b35 commit 50cae9b

File tree

18 files changed

+1562
-8
lines changed

18 files changed

+1562
-8
lines changed

.changeset/eleven-papayas-fold.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"roo-cline": patch
3+
---
4+
5+
Experimental support for VS Code Language Models (thanks @RaySinner / @julesmons!)

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ Give it a try and let us know what you think in the reddit: https://www.reddit.c
6060
- Support for Glama
6161
- Support for listing models from OpenAI-compatible providers
6262
- Support for adding OpenAI-compatible models with or without streaming
63+
- Experimental support for VS Code Language Models (e.g. Copilot)
6364
- Per-tool MCP auto-approval
6465
- Enable/disable individual MCP servers
6566
- Enable/disable the MCP feature overall

package.json

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,10 @@
4242
"ai",
4343
"llama"
4444
],
45-
"activationEvents": [],
45+
"activationEvents": [
46+
"onLanguage",
47+
"onStartupFinished"
48+
],
4649
"main": "./dist/extension.js",
4750
"contributes": {
4851
"viewsContainers": {
@@ -151,6 +154,20 @@
151154
"git show"
152155
],
153156
"description": "Commands that can be auto-executed when 'Always approve execute operations' is enabled"
157+
},
158+
"roo-cline.vsCodeLmModelSelector": {
159+
"type": "object",
160+
"properties": {
161+
"vendor": {
162+
"type": "string",
163+
"description": "The vendor of the language model (e.g. copilot)"
164+
},
165+
"family": {
166+
"type": "string",
167+
"description": "The family of the language model (e.g. gpt-4)"
168+
}
169+
},
170+
"description": "Settings for VSCode Language Model API"
154171
}
155172
}
156173
}

src/api/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ import { LmStudioHandler } from "./providers/lmstudio"
1111
import { GeminiHandler } from "./providers/gemini"
1212
import { OpenAiNativeHandler } from "./providers/openai-native"
1313
import { DeepSeekHandler } from "./providers/deepseek"
14+
import { VsCodeLmHandler } from "./providers/vscode-lm"
1415
import { ApiStream } from "./transform/stream"
1516

1617
export interface SingleCompletionHandler {
Lines changed: 289 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,289 @@
1+
import * as vscode from 'vscode';
2+
import { VsCodeLmHandler } from '../vscode-lm';
3+
import { ApiHandlerOptions } from '../../../shared/api';
4+
import { Anthropic } from '@anthropic-ai/sdk';
5+
6+
// Mock vscode namespace
7+
jest.mock('vscode', () => {
8+
class MockLanguageModelTextPart {
9+
type = 'text';
10+
constructor(public value: string) {}
11+
}
12+
13+
class MockLanguageModelToolCallPart {
14+
type = 'tool_call';
15+
constructor(
16+
public callId: string,
17+
public name: string,
18+
public input: any
19+
) {}
20+
}
21+
22+
return {
23+
workspace: {
24+
onDidChangeConfiguration: jest.fn((callback) => ({
25+
dispose: jest.fn()
26+
}))
27+
},
28+
CancellationTokenSource: jest.fn(() => ({
29+
token: {
30+
isCancellationRequested: false,
31+
onCancellationRequested: jest.fn()
32+
},
33+
cancel: jest.fn(),
34+
dispose: jest.fn()
35+
})),
36+
CancellationError: class CancellationError extends Error {
37+
constructor() {
38+
super('Operation cancelled');
39+
this.name = 'CancellationError';
40+
}
41+
},
42+
LanguageModelChatMessage: {
43+
Assistant: jest.fn((content) => ({
44+
role: 'assistant',
45+
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
46+
})),
47+
User: jest.fn((content) => ({
48+
role: 'user',
49+
content: Array.isArray(content) ? content : [new MockLanguageModelTextPart(content)]
50+
}))
51+
},
52+
LanguageModelTextPart: MockLanguageModelTextPart,
53+
LanguageModelToolCallPart: MockLanguageModelToolCallPart,
54+
lm: {
55+
selectChatModels: jest.fn()
56+
}
57+
};
58+
});
59+
60+
const mockLanguageModelChat = {
61+
id: 'test-model',
62+
name: 'Test Model',
63+
vendor: 'test-vendor',
64+
family: 'test-family',
65+
version: '1.0',
66+
maxInputTokens: 4096,
67+
sendRequest: jest.fn(),
68+
countTokens: jest.fn()
69+
};
70+
71+
describe('VsCodeLmHandler', () => {
72+
let handler: VsCodeLmHandler;
73+
const defaultOptions: ApiHandlerOptions = {
74+
vsCodeLmModelSelector: {
75+
vendor: 'test-vendor',
76+
family: 'test-family'
77+
}
78+
};
79+
80+
beforeEach(() => {
81+
jest.clearAllMocks();
82+
handler = new VsCodeLmHandler(defaultOptions);
83+
});
84+
85+
afterEach(() => {
86+
handler.dispose();
87+
});
88+
89+
describe('constructor', () => {
90+
it('should initialize with provided options', () => {
91+
expect(handler).toBeDefined();
92+
expect(vscode.workspace.onDidChangeConfiguration).toHaveBeenCalled();
93+
});
94+
95+
it('should handle configuration changes', () => {
96+
const callback = (vscode.workspace.onDidChangeConfiguration as jest.Mock).mock.calls[0][0];
97+
callback({ affectsConfiguration: () => true });
98+
// Should reset client when config changes
99+
expect(handler['client']).toBeNull();
100+
});
101+
});
102+
103+
describe('createClient', () => {
104+
it('should create client with selector', async () => {
105+
const mockModel = { ...mockLanguageModelChat };
106+
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
107+
108+
const client = await handler['createClient']({
109+
vendor: 'test-vendor',
110+
family: 'test-family'
111+
});
112+
113+
expect(client).toBeDefined();
114+
expect(client.id).toBe('test-model');
115+
expect(vscode.lm.selectChatModels).toHaveBeenCalledWith({
116+
vendor: 'test-vendor',
117+
family: 'test-family'
118+
});
119+
});
120+
121+
it('should return default client when no models available', async () => {
122+
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([]);
123+
124+
const client = await handler['createClient']({});
125+
126+
expect(client).toBeDefined();
127+
expect(client.id).toBe('default-lm');
128+
expect(client.vendor).toBe('vscode');
129+
});
130+
});
131+
132+
describe('createMessage', () => {
133+
beforeEach(() => {
134+
const mockModel = { ...mockLanguageModelChat };
135+
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
136+
mockLanguageModelChat.countTokens.mockResolvedValue(10);
137+
});
138+
139+
it('should stream text responses', async () => {
140+
const systemPrompt = 'You are a helpful assistant';
141+
const messages: Anthropic.Messages.MessageParam[] = [{
142+
role: 'user' as const,
143+
content: 'Hello'
144+
}];
145+
146+
const responseText = 'Hello! How can I help you?';
147+
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
148+
stream: (async function* () {
149+
yield new vscode.LanguageModelTextPart(responseText);
150+
return;
151+
})(),
152+
text: (async function* () {
153+
yield responseText;
154+
return;
155+
})()
156+
});
157+
158+
const stream = handler.createMessage(systemPrompt, messages);
159+
const chunks = [];
160+
for await (const chunk of stream) {
161+
chunks.push(chunk);
162+
}
163+
164+
expect(chunks).toHaveLength(2); // Text chunk + usage chunk
165+
expect(chunks[0]).toEqual({
166+
type: 'text',
167+
text: responseText
168+
});
169+
expect(chunks[1]).toMatchObject({
170+
type: 'usage',
171+
inputTokens: expect.any(Number),
172+
outputTokens: expect.any(Number)
173+
});
174+
});
175+
176+
it('should handle tool calls', async () => {
177+
const systemPrompt = 'You are a helpful assistant';
178+
const messages: Anthropic.Messages.MessageParam[] = [{
179+
role: 'user' as const,
180+
content: 'Calculate 2+2'
181+
}];
182+
183+
const toolCallData = {
184+
name: 'calculator',
185+
arguments: { operation: 'add', numbers: [2, 2] },
186+
callId: 'call-1'
187+
};
188+
189+
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
190+
stream: (async function* () {
191+
yield new vscode.LanguageModelToolCallPart(
192+
toolCallData.callId,
193+
toolCallData.name,
194+
toolCallData.arguments
195+
);
196+
return;
197+
})(),
198+
text: (async function* () {
199+
yield JSON.stringify({ type: 'tool_call', ...toolCallData });
200+
return;
201+
})()
202+
});
203+
204+
const stream = handler.createMessage(systemPrompt, messages);
205+
const chunks = [];
206+
for await (const chunk of stream) {
207+
chunks.push(chunk);
208+
}
209+
210+
expect(chunks).toHaveLength(2); // Tool call chunk + usage chunk
211+
expect(chunks[0]).toEqual({
212+
type: 'text',
213+
text: JSON.stringify({ type: 'tool_call', ...toolCallData })
214+
});
215+
});
216+
217+
it('should handle errors', async () => {
218+
const systemPrompt = 'You are a helpful assistant';
219+
const messages: Anthropic.Messages.MessageParam[] = [{
220+
role: 'user' as const,
221+
content: 'Hello'
222+
}];
223+
224+
mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error('API Error'));
225+
226+
await expect(async () => {
227+
const stream = handler.createMessage(systemPrompt, messages);
228+
for await (const _ of stream) {
229+
// consume stream
230+
}
231+
}).rejects.toThrow('API Error');
232+
});
233+
});
234+
235+
describe('getModel', () => {
236+
it('should return model info when client exists', async () => {
237+
const mockModel = { ...mockLanguageModelChat };
238+
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
239+
240+
// Initialize client
241+
await handler['getClient']();
242+
243+
const model = handler.getModel();
244+
expect(model.id).toBe('test-model');
245+
expect(model.info).toBeDefined();
246+
expect(model.info.contextWindow).toBe(4096);
247+
});
248+
249+
it('should return fallback model info when no client exists', () => {
250+
const model = handler.getModel();
251+
expect(model.id).toBe('test-vendor/test-family');
252+
expect(model.info).toBeDefined();
253+
});
254+
});
255+
256+
describe('completePrompt', () => {
257+
it('should complete single prompt', async () => {
258+
const mockModel = { ...mockLanguageModelChat };
259+
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
260+
261+
const responseText = 'Completed text';
262+
mockLanguageModelChat.sendRequest.mockResolvedValueOnce({
263+
stream: (async function* () {
264+
yield new vscode.LanguageModelTextPart(responseText);
265+
return;
266+
})(),
267+
text: (async function* () {
268+
yield responseText;
269+
return;
270+
})()
271+
});
272+
273+
const result = await handler.completePrompt('Test prompt');
274+
expect(result).toBe(responseText);
275+
expect(mockLanguageModelChat.sendRequest).toHaveBeenCalled();
276+
});
277+
278+
it('should handle errors during completion', async () => {
279+
const mockModel = { ...mockLanguageModelChat };
280+
(vscode.lm.selectChatModels as jest.Mock).mockResolvedValueOnce([mockModel]);
281+
282+
mockLanguageModelChat.sendRequest.mockRejectedValueOnce(new Error('Completion failed'));
283+
284+
await expect(handler.completePrompt('Test prompt'))
285+
.rejects
286+
.toThrow('VSCode LM completion error: Completion failed');
287+
});
288+
});
289+
});

0 commit comments

Comments
 (0)