Skip to content

Commit 6d76367

Browse files
committed
Merge branch 'main' into cte/nix-develop
2 parents 12b462e + bc5b00e commit 6d76367

32 files changed

+1057
-166
lines changed

CHANGELOG.md

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,20 @@
11
# Roo Code Changelog
22

3+
## [3.3.6]
4+
5+
- Add a "new task" tool that allows Roo to start new tasks with an initial message and mode
6+
- Fix a bug that was preventing the use of qwen-max and potentially other OpenAI-compatible providers (thanks @Szpadel!)
7+
- Add support for perplexity/sonar-reasoning (thanks @Szpadel!)
8+
- Visual fixes to dropdowns (thanks @psv2522!)
9+
- Add the [Unbound](https://getunbound.ai/) provider (thanks @vigneshsubbiah16!)
10+
11+
## [3.3.5]
12+
13+
- Make information about the conversation's context window usage visible in the task header for humans and in the environment for models (thanks @MuriloFP!)
14+
- Add checkboxes to auto-approve mode switch requests (thanks @MuriloFP!)
15+
- Add new experimental editing tools `insert_content` (for inserting blocks of text at a line number) and `search_and_replace` (for replacing all instances of a phrase or regex) to complement diff editing and whole file editing (thanks @samhvw8!)
16+
- Improved DeepSeek R1 support by capturing reasoning from DeepSeek API as well as more OpenRouter variants, not using system messages, and fixing a crash on empty chunks. Still depends on the DeepSeek API staying up but we'll be in a better place when it does! (thanks @Szpadel!)
17+
318
## [3.3.4]
419

520
- Add per-server MCP network timeout configuration ranging from 15 seconds to an hour

package-lock.json

Lines changed: 2 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
"displayName": "Roo Code (prev. Roo Cline)",
44
"description": "A VS Code plugin that enhances coding with AI-powered automation, multi-model support, and experimental features.",
55
"publisher": "RooVeterinaryInc",
6-
"version": "3.3.4",
6+
"version": "3.3.6",
77
"icon": "assets/icons/rocket.png",
88
"galleryBanner": {
99
"color": "#617A91",
@@ -40,7 +40,9 @@
4040
"chatgpt",
4141
"sonnet",
4242
"ai",
43-
"llama"
43+
"llama",
44+
"roo code",
45+
"roocode"
4446
],
4547
"activationEvents": [
4648
"onLanguage",

src/api/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ import { DeepSeekHandler } from "./providers/deepseek"
1414
import { MistralHandler } from "./providers/mistral"
1515
import { VsCodeLmHandler } from "./providers/vscode-lm"
1616
import { ApiStream } from "./transform/stream"
17+
import { UnboundHandler } from "./providers/unbound"
1718

1819
export interface SingleCompletionHandler {
1920
completePrompt(prompt: string): Promise<string>
@@ -53,6 +54,8 @@ export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
5354
return new VsCodeLmHandler(options)
5455
case "mistral":
5556
return new MistralHandler(options)
57+
case "unbound":
58+
return new UnboundHandler(options)
5659
default:
5760
return new AnthropicHandler(options)
5861
}
Lines changed: 240 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,240 @@
1+
import { UnboundHandler } from "../unbound"
2+
import { ApiHandlerOptions } from "../../../shared/api"
3+
import { Anthropic } from "@anthropic-ai/sdk"
4+
5+
// Mock OpenAI client
6+
const mockCreate = jest.fn()
7+
const mockWithResponse = jest.fn()
8+
9+
jest.mock("openai", () => {
10+
return {
11+
__esModule: true,
12+
default: jest.fn().mockImplementation(() => ({
13+
chat: {
14+
completions: {
15+
create: (...args: any[]) => {
16+
const stream = {
17+
[Symbol.asyncIterator]: async function* () {
18+
// First chunk with content
19+
yield {
20+
choices: [
21+
{
22+
delta: { content: "Test response" },
23+
index: 0,
24+
},
25+
],
26+
}
27+
// Second chunk with usage data
28+
yield {
29+
choices: [{ delta: {}, index: 0 }],
30+
usage: {
31+
prompt_tokens: 10,
32+
completion_tokens: 5,
33+
total_tokens: 15,
34+
},
35+
}
36+
// Third chunk with cache usage data
37+
yield {
38+
choices: [{ delta: {}, index: 0 }],
39+
usage: {
40+
prompt_tokens: 8,
41+
completion_tokens: 4,
42+
total_tokens: 12,
43+
cache_creation_input_tokens: 3,
44+
cache_read_input_tokens: 2,
45+
},
46+
}
47+
},
48+
}
49+
50+
const result = mockCreate(...args)
51+
if (args[0].stream) {
52+
mockWithResponse.mockReturnValue(
53+
Promise.resolve({
54+
data: stream,
55+
response: { headers: new Map() },
56+
}),
57+
)
58+
result.withResponse = mockWithResponse
59+
}
60+
return result
61+
},
62+
},
63+
},
64+
})),
65+
}
66+
})
67+
68+
describe("UnboundHandler", () => {
69+
let handler: UnboundHandler
70+
let mockOptions: ApiHandlerOptions
71+
72+
beforeEach(() => {
73+
mockOptions = {
74+
apiModelId: "anthropic/claude-3-5-sonnet-20241022",
75+
unboundApiKey: "test-api-key",
76+
}
77+
handler = new UnboundHandler(mockOptions)
78+
mockCreate.mockClear()
79+
mockWithResponse.mockClear()
80+
81+
// Default mock implementation for non-streaming responses
82+
mockCreate.mockResolvedValue({
83+
id: "test-completion",
84+
choices: [
85+
{
86+
message: { role: "assistant", content: "Test response" },
87+
finish_reason: "stop",
88+
index: 0,
89+
},
90+
],
91+
})
92+
})
93+
94+
describe("constructor", () => {
95+
it("should initialize with provided options", () => {
96+
expect(handler).toBeInstanceOf(UnboundHandler)
97+
expect(handler.getModel().id).toBe(mockOptions.apiModelId)
98+
})
99+
})
100+
101+
describe("createMessage", () => {
102+
const systemPrompt = "You are a helpful assistant."
103+
const messages: Anthropic.Messages.MessageParam[] = [
104+
{
105+
role: "user",
106+
content: "Hello!",
107+
},
108+
]
109+
110+
it("should handle streaming responses with text and usage data", async () => {
111+
const stream = handler.createMessage(systemPrompt, messages)
112+
const chunks: Array<{ type: string } & Record<string, any>> = []
113+
for await (const chunk of stream) {
114+
chunks.push(chunk)
115+
}
116+
117+
expect(chunks.length).toBe(3)
118+
119+
// Verify text chunk
120+
expect(chunks[0]).toEqual({
121+
type: "text",
122+
text: "Test response",
123+
})
124+
125+
// Verify regular usage data
126+
expect(chunks[1]).toEqual({
127+
type: "usage",
128+
inputTokens: 10,
129+
outputTokens: 5,
130+
})
131+
132+
// Verify usage data with cache information
133+
expect(chunks[2]).toEqual({
134+
type: "usage",
135+
inputTokens: 8,
136+
outputTokens: 4,
137+
cacheWriteTokens: 3,
138+
cacheReadTokens: 2,
139+
})
140+
141+
expect(mockCreate).toHaveBeenCalledWith(
142+
expect.objectContaining({
143+
model: "claude-3-5-sonnet-20241022",
144+
messages: expect.any(Array),
145+
stream: true,
146+
}),
147+
expect.objectContaining({
148+
headers: {
149+
"X-Unbound-Metadata": expect.stringContaining("roo-code"),
150+
},
151+
}),
152+
)
153+
})
154+
155+
it("should handle API errors", async () => {
156+
mockCreate.mockImplementationOnce(() => {
157+
throw new Error("API Error")
158+
})
159+
160+
const stream = handler.createMessage(systemPrompt, messages)
161+
const chunks = []
162+
163+
try {
164+
for await (const chunk of stream) {
165+
chunks.push(chunk)
166+
}
167+
fail("Expected error to be thrown")
168+
} catch (error) {
169+
expect(error).toBeInstanceOf(Error)
170+
expect(error.message).toBe("API Error")
171+
}
172+
})
173+
})
174+
175+
describe("completePrompt", () => {
176+
it("should complete prompt successfully", async () => {
177+
const result = await handler.completePrompt("Test prompt")
178+
expect(result).toBe("Test response")
179+
expect(mockCreate).toHaveBeenCalledWith(
180+
expect.objectContaining({
181+
model: "claude-3-5-sonnet-20241022",
182+
messages: [{ role: "user", content: "Test prompt" }],
183+
temperature: 0,
184+
max_tokens: 8192,
185+
}),
186+
)
187+
})
188+
189+
it("should handle API errors", async () => {
190+
mockCreate.mockRejectedValueOnce(new Error("API Error"))
191+
await expect(handler.completePrompt("Test prompt")).rejects.toThrow("Unbound completion error: API Error")
192+
})
193+
194+
it("should handle empty response", async () => {
195+
mockCreate.mockResolvedValueOnce({
196+
choices: [{ message: { content: "" } }],
197+
})
198+
const result = await handler.completePrompt("Test prompt")
199+
expect(result).toBe("")
200+
})
201+
202+
it("should not set max_tokens for non-Anthropic models", async () => {
203+
mockCreate.mockClear()
204+
205+
const nonAnthropicOptions = {
206+
apiModelId: "openai/gpt-4o",
207+
unboundApiKey: "test-key",
208+
}
209+
const nonAnthropicHandler = new UnboundHandler(nonAnthropicOptions)
210+
211+
await nonAnthropicHandler.completePrompt("Test prompt")
212+
expect(mockCreate).toHaveBeenCalledWith(
213+
expect.objectContaining({
214+
model: "gpt-4o",
215+
messages: [{ role: "user", content: "Test prompt" }],
216+
temperature: 0,
217+
}),
218+
)
219+
expect(mockCreate.mock.calls[0][0]).not.toHaveProperty("max_tokens")
220+
})
221+
})
222+
223+
describe("getModel", () => {
224+
it("should return model info", () => {
225+
const modelInfo = handler.getModel()
226+
expect(modelInfo.id).toBe(mockOptions.apiModelId)
227+
expect(modelInfo.info).toBeDefined()
228+
})
229+
230+
it("should return default model when invalid model provided", () => {
231+
const handlerWithInvalidModel = new UnboundHandler({
232+
...mockOptions,
233+
apiModelId: "invalid/model",
234+
})
235+
const modelInfo = handlerWithInvalidModel.getModel()
236+
expect(modelInfo.id).toBe("openai/gpt-4o") // Default model
237+
expect(modelInfo.info).toBeDefined()
238+
})
239+
})
240+
})

src/api/providers/openai.ts

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,13 +62,15 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
6262
const stream = await this.client.chat.completions.create(requestOptions)
6363

6464
for await (const chunk of stream) {
65-
const delta = chunk.choices[0]?.delta
66-
if (delta?.content) {
65+
const delta = chunk.choices[0]?.delta ?? {}
66+
67+
if (delta.content) {
6768
yield {
6869
type: "text",
6970
text: delta.content,
7071
}
7172
}
73+
7274
if ("reasoning_content" in delta && delta.reasoning_content) {
7375
yield {
7476
type: "reasoning",

src/api/providers/openrouter.ts

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -114,13 +114,21 @@ export class OpenRouterHandler implements ApiHandler, SingleCompletionHandler {
114114
}
115115

116116
let temperature = 0
117-
switch (this.getModel().id) {
118-
case "deepseek/deepseek-r1":
119-
// Recommended temperature for DeepSeek reasoning models
120-
temperature = 0.6
121-
// DeepSeek highly recommends using user instead of system role
122-
openAiMessages[0].role = "user"
123-
openAiMessages = convertToR1Format([{ role: "user", content: systemPrompt }, ...messages])
117+
let topP: number | undefined = undefined
118+
119+
// Handle models based on deepseek-r1
120+
if (
121+
this.getModel().id === "deepseek/deepseek-r1" ||
122+
this.getModel().id.startsWith("deepseek/deepseek-r1:") ||
123+
this.getModel().id === "perplexity/sonar-reasoning"
124+
) {
125+
// Recommended temperature for DeepSeek reasoning models
126+
temperature = 0.6
127+
// DeepSeek highly recommends using user instead of system
128+
// role
129+
openAiMessages = convertToR1Format([{ role: "user", content: systemPrompt }, ...messages])
130+
// Some provider support topP and 0.95 is value that Deepseek used in their benchmarks
131+
topP = 0.95
124132
}
125133

126134
// https://openrouter.ai/docs/transforms
@@ -129,6 +137,7 @@ export class OpenRouterHandler implements ApiHandler, SingleCompletionHandler {
129137
model: this.getModel().id,
130138
max_tokens: maxTokens,
131139
temperature: temperature,
140+
top_p: topP,
132141
messages: openAiMessages,
133142
stream: true,
134143
include_reasoning: true,

0 commit comments

Comments
 (0)