Skip to content

Commit 98b9007

Browse files
committed
merge(upstream): merge upstream changes keeping VSCode LM provider and adding Glama support
2 parents 9d62a7b + 6beb90e commit 98b9007

29 files changed

+2057
-297
lines changed

CHANGELOG.md

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,17 @@
11
# Roo Cline Changelog
22

3+
## [2.2.42]
4+
5+
- Add a Git section to the context mentions
6+
7+
## [2.2.41]
8+
9+
- Checkbox to disable streaming for OpenAI-compatible providers
10+
11+
## [2.2.40]
12+
13+
- Add the Glama provider (thanks @punkpeye!)
14+
315
## [2.2.39]
416

517
- Add toggle to enable/disable the MCP-related sections of the system prompt (thanks @daniel-lxs!)

README.md

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ A fork of Cline, an autonomous coding agent, with some additional experimental f
66

77
- Drag and drop images into chats
88
- Delete messages from chats
9+
- @-mention Git commits to include their context in the chat
910
- "Enhance prompt" button (OpenRouter models only for now)
1011
- Sound effects for feedback
1112
- Option to use browsers of different sizes and adjust screenshot quality
@@ -16,7 +17,9 @@ A fork of Cline, an autonomous coding agent, with some additional experimental f
1617
- Language selection for Cline's communication (English, Japanese, Spanish, French, German, and more)
1718
- Support for DeepSeek V3
1819
- Support for Amazon Nova and Meta 3, 3.1, and 3.2 models via AWS Bedrock
20+
- Support for Glama
1921
- Support for listing models from OpenAI-compatible providers
22+
- Support for adding OpenAI-compatible models with or without streaming
2023
- Per-tool MCP auto-approval
2124
- Enable/disable individual MCP servers
2225
- Enable/disable the MCP feature overall
@@ -135,7 +138,7 @@ Thanks to [Claude 3.5 Sonnet's agentic coding capabilities](https://www-cdn.ant
135138

136139
### Use any API and Model
137140

138-
Cline supports API providers like OpenRouter, Anthropic, OpenAI, Google Gemini, AWS Bedrock, Azure, and GCP Vertex. You can also configure any OpenAI compatible API, or use a local model through LM Studio/Ollama. If you're using OpenRouter, the extension fetches their latest model list, allowing you to use the newest models as soon as they're available.
141+
Cline supports API providers like OpenRouter, Anthropic, Glama, OpenAI, Google Gemini, AWS Bedrock, Azure, and GCP Vertex. You can also configure any OpenAI compatible API, or use a local model through LM Studio/Ollama. If you're using OpenRouter, the extension fetches their latest model list, allowing you to use the newest models as soon as they're available.
139142

140143
The extension also keeps track of total tokens and API usage cost for the entire task loop and individual requests, keeping you informed of spend every step of the way.
141144

package-lock.json

Lines changed: 3 additions & 3 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
"displayName": "Roo Cline",
44
"description": "A fork of Cline, an autonomous coding agent, with some added experimental configuration and automation features.",
55
"publisher": "RooVeterinaryInc",
6-
"version": "2.2.39",
6+
"version": "2.2.42",
77
"icon": "assets/icons/rocket.png",
88
"galleryBanner": {
99
"color": "#617A91",
@@ -231,7 +231,7 @@
231231
"isbinaryfile": "^5.0.2",
232232
"mammoth": "^1.8.0",
233233
"monaco-vscode-textmate-theme-converter": "^0.1.7",
234-
"openai": "^4.61.0",
234+
"openai": "^4.73.1",
235235
"os-name": "^6.0.0",
236236
"p-wait-for": "^5.0.2",
237237
"pdf-parse": "^1.1.1",

src/api/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
2+
import { GlamaHandler } from "./providers/glama"
23
import { ApiConfiguration, ModelInfo } from "../shared/api"
34
import { AnthropicHandler } from "./providers/anthropic"
45
import { AwsBedrockHandler } from "./providers/bedrock"
@@ -28,6 +29,8 @@ export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
2829
switch (apiProvider) {
2930
case "anthropic":
3031
return new AnthropicHandler(options)
32+
case "glama":
33+
return new GlamaHandler(options)
3134
case "openrouter":
3235
return new OpenRouterHandler(options)
3336
case "bedrock":
Lines changed: 192 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,192 @@
1+
import { OpenAiHandler } from '../openai'
2+
import { ApiHandlerOptions, openAiModelInfoSaneDefaults } from '../../../shared/api'
3+
import OpenAI, { AzureOpenAI } from 'openai'
4+
import { Anthropic } from '@anthropic-ai/sdk'
5+
6+
// Mock dependencies
7+
jest.mock('openai')
8+
9+
describe('OpenAiHandler', () => {
10+
const mockOptions: ApiHandlerOptions = {
11+
openAiApiKey: 'test-key',
12+
openAiModelId: 'gpt-4',
13+
openAiStreamingEnabled: true,
14+
openAiBaseUrl: 'https://api.openai.com/v1'
15+
}
16+
17+
beforeEach(() => {
18+
jest.clearAllMocks()
19+
})
20+
21+
test('constructor initializes with correct options', () => {
22+
const handler = new OpenAiHandler(mockOptions)
23+
expect(handler).toBeInstanceOf(OpenAiHandler)
24+
expect(OpenAI).toHaveBeenCalledWith({
25+
apiKey: mockOptions.openAiApiKey,
26+
baseURL: mockOptions.openAiBaseUrl
27+
})
28+
})
29+
30+
test('constructor initializes Azure client when Azure URL is provided', () => {
31+
const azureOptions: ApiHandlerOptions = {
32+
...mockOptions,
33+
openAiBaseUrl: 'https://example.azure.com',
34+
azureApiVersion: '2023-05-15'
35+
}
36+
const handler = new OpenAiHandler(azureOptions)
37+
expect(handler).toBeInstanceOf(OpenAiHandler)
38+
expect(AzureOpenAI).toHaveBeenCalledWith({
39+
baseURL: azureOptions.openAiBaseUrl,
40+
apiKey: azureOptions.openAiApiKey,
41+
apiVersion: azureOptions.azureApiVersion
42+
})
43+
})
44+
45+
test('getModel returns correct model info', () => {
46+
const handler = new OpenAiHandler(mockOptions)
47+
const result = handler.getModel()
48+
49+
expect(result).toEqual({
50+
id: mockOptions.openAiModelId,
51+
info: openAiModelInfoSaneDefaults
52+
})
53+
})
54+
55+
test('createMessage handles streaming correctly when enabled', async () => {
56+
const handler = new OpenAiHandler({
57+
...mockOptions,
58+
openAiStreamingEnabled: true,
59+
includeMaxTokens: true
60+
})
61+
62+
const mockStream = {
63+
async *[Symbol.asyncIterator]() {
64+
yield {
65+
choices: [{
66+
delta: {
67+
content: 'test response'
68+
}
69+
}],
70+
usage: {
71+
prompt_tokens: 10,
72+
completion_tokens: 5
73+
}
74+
}
75+
}
76+
}
77+
78+
const mockCreate = jest.fn().mockResolvedValue(mockStream)
79+
;(OpenAI as jest.MockedClass<typeof OpenAI>).prototype.chat = {
80+
completions: { create: mockCreate }
81+
} as any
82+
83+
const systemPrompt = 'test system prompt'
84+
const messages: Anthropic.Messages.MessageParam[] = [
85+
{ role: 'user', content: 'test message' }
86+
]
87+
88+
const generator = handler.createMessage(systemPrompt, messages)
89+
const chunks = []
90+
91+
for await (const chunk of generator) {
92+
chunks.push(chunk)
93+
}
94+
95+
expect(chunks).toEqual([
96+
{
97+
type: 'text',
98+
text: 'test response'
99+
},
100+
{
101+
type: 'usage',
102+
inputTokens: 10,
103+
outputTokens: 5
104+
}
105+
])
106+
107+
expect(mockCreate).toHaveBeenCalledWith({
108+
model: mockOptions.openAiModelId,
109+
messages: [
110+
{ role: 'system', content: systemPrompt },
111+
{ role: 'user', content: 'test message' }
112+
],
113+
temperature: 0,
114+
stream: true,
115+
stream_options: { include_usage: true },
116+
max_tokens: openAiModelInfoSaneDefaults.maxTokens
117+
})
118+
})
119+
120+
test('createMessage handles non-streaming correctly when disabled', async () => {
121+
const handler = new OpenAiHandler({
122+
...mockOptions,
123+
openAiStreamingEnabled: false
124+
})
125+
126+
const mockResponse = {
127+
choices: [{
128+
message: {
129+
content: 'test response'
130+
}
131+
}],
132+
usage: {
133+
prompt_tokens: 10,
134+
completion_tokens: 5
135+
}
136+
}
137+
138+
const mockCreate = jest.fn().mockResolvedValue(mockResponse)
139+
;(OpenAI as jest.MockedClass<typeof OpenAI>).prototype.chat = {
140+
completions: { create: mockCreate }
141+
} as any
142+
143+
const systemPrompt = 'test system prompt'
144+
const messages: Anthropic.Messages.MessageParam[] = [
145+
{ role: 'user', content: 'test message' }
146+
]
147+
148+
const generator = handler.createMessage(systemPrompt, messages)
149+
const chunks = []
150+
151+
for await (const chunk of generator) {
152+
chunks.push(chunk)
153+
}
154+
155+
expect(chunks).toEqual([
156+
{
157+
type: 'text',
158+
text: 'test response'
159+
},
160+
{
161+
type: 'usage',
162+
inputTokens: 10,
163+
outputTokens: 5
164+
}
165+
])
166+
167+
expect(mockCreate).toHaveBeenCalledWith({
168+
model: mockOptions.openAiModelId,
169+
messages: [
170+
{ role: 'user', content: systemPrompt },
171+
{ role: 'user', content: 'test message' }
172+
]
173+
})
174+
})
175+
176+
test('createMessage handles API errors', async () => {
177+
const handler = new OpenAiHandler(mockOptions)
178+
const mockStream = {
179+
async *[Symbol.asyncIterator]() {
180+
throw new Error('API Error')
181+
}
182+
}
183+
184+
const mockCreate = jest.fn().mockResolvedValue(mockStream)
185+
;(OpenAI as jest.MockedClass<typeof OpenAI>).prototype.chat = {
186+
completions: { create: mockCreate }
187+
} as any
188+
189+
const generator = handler.createMessage('test', [])
190+
await expect(generator.next()).rejects.toThrow('API Error')
191+
})
192+
})

0 commit comments

Comments
 (0)