Skip to content

Commit 4d22ae5

Browse files
committed
Merge remote-tracking branch 'upstream/main' into feat/vertex-prompt-caching
2 parents 9b267e9 + 0c44241 commit 4d22ae5

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

61 files changed

+3144
-1693
lines changed

.changeset/sour-parents-hug.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"roo-cline": patch
3+
---
4+
5+
Stop removing commas from terminal output

CHANGELOG.md

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,25 @@
11
# Roo Code Changelog
22

3+
## [3.7.6]
4+
5+
- Handle really long text better in the in the ChatRow similar to TaskHeader (thanks @joemanley201!)
6+
- Support multiple files in drag-and-drop
7+
- Truncate search_file output to avoid crashing the extension
8+
- Better OpenRouter error handling (no more "Provider Error")
9+
- Add slider to control max output tokens for thinking models
10+
11+
## [3.7.5]
12+
13+
- Fix context window truncation math (see [#1173](https://github.com/RooVetGit/Roo-Code/issues/1173))
14+
- Fix various issues with the model picker (thanks @System233!)
15+
- Fix model input / output cost parsing (thanks @System233!)
16+
- Add drag-and-drop for files
17+
- Enable the "Thinking Budget" slider for Claude 3.7 Sonnet on OpenRouter
18+
19+
## [3.7.4]
20+
21+
- Fix a bug that prevented the "Thinking" setting from properly updating when switching profiles.
22+
323
## [3.7.3]
424

525
- Support for ["Thinking"](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking) Sonnet 3.7 when using the Anthropic provider.

package-lock.json

Lines changed: 2 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
{
22
"name": "roo-cline",
33
"displayName": "Roo Code (prev. Roo Cline)",
4-
"description": "An AI-powered autonomous coding agent that lives in your editor.",
4+
"description": "A whole dev team of AI agents in your editor.",
55
"publisher": "RooVeterinaryInc",
6-
"version": "3.7.3",
6+
"version": "3.7.6",
77
"icon": "assets/icons/rocket.png",
88
"galleryBanner": {
99
"color": "#617A91",

src/api/providers/anthropic.ts

Lines changed: 20 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,6 @@ import { ApiStream } from "../transform/stream"
1414

1515
const ANTHROPIC_DEFAULT_TEMPERATURE = 0
1616

17-
const THINKING_MODELS = ["claude-3-7-sonnet-20250219"]
18-
1917
export class AnthropicHandler implements ApiHandler, SingleCompletionHandler {
2018
private options: ApiHandlerOptions
2119
private client: Anthropic
@@ -32,16 +30,27 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler {
3230
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
3331
let stream: AnthropicStream<Anthropic.Messages.RawMessageStreamEvent>
3432
const cacheControl: CacheControlEphemeral = { type: "ephemeral" }
35-
const modelId = this.getModel().id
36-
const maxTokens = this.getModel().info.maxTokens || 8192
33+
let { id: modelId, info: modelInfo } = this.getModel()
34+
const maxTokens = this.options.modelMaxTokens || modelInfo.maxTokens || 8192
3735
let temperature = this.options.modelTemperature ?? ANTHROPIC_DEFAULT_TEMPERATURE
3836
let thinking: BetaThinkingConfigParam | undefined = undefined
3937

40-
if (THINKING_MODELS.includes(modelId)) {
41-
thinking = this.options.anthropicThinking
42-
? { type: "enabled", budget_tokens: this.options.anthropicThinking }
43-
: { type: "disabled" }
44-
38+
// Anthropic "Thinking" models require a temperature of 1.0.
39+
if (modelId === "claude-3-7-sonnet-20250219:thinking") {
40+
// The `:thinking` variant is a virtual identifier for the
41+
// `claude-3-7-sonnet-20250219` model with a thinking budget.
42+
// We can handle this more elegantly in the future.
43+
modelId = "claude-3-7-sonnet-20250219"
44+
45+
// Clamp the thinking budget to be at most 80% of max tokens and at
46+
// least 1024 tokens.
47+
const maxBudgetTokens = Math.floor(maxTokens * 0.8)
48+
const budgetTokens = Math.max(
49+
Math.min(this.options.anthropicThinking ?? maxBudgetTokens, maxBudgetTokens),
50+
1024,
51+
)
52+
53+
thinking = { type: "enabled", budget_tokens: budgetTokens }
4554
temperature = 1.0
4655
}
4756

@@ -114,8 +123,8 @@ export class AnthropicHandler implements ApiHandler, SingleCompletionHandler {
114123
default: {
115124
stream = (await this.client.messages.create({
116125
model: modelId,
117-
max_tokens: this.getModel().info.maxTokens || 8192,
118-
temperature: this.options.modelTemperature ?? ANTHROPIC_DEFAULT_TEMPERATURE,
126+
max_tokens: maxTokens,
127+
temperature,
119128
system: [{ text: systemPrompt, type: "text" }],
120129
messages,
121130
// tools,

src/api/providers/glama.ts

Lines changed: 46 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,12 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22
import axios from "axios"
33
import OpenAI from "openai"
4-
import { ApiHandler, SingleCompletionHandler } from "../"
4+
55
import { ApiHandlerOptions, ModelInfo, glamaDefaultModelId, glamaDefaultModelInfo } from "../../shared/api"
6+
import { parseApiPrice } from "../../utils/cost"
67
import { convertToOpenAiMessages } from "../transform/openai-format"
78
import { ApiStream } from "../transform/stream"
9+
import { ApiHandler, SingleCompletionHandler } from "../"
810

911
const GLAMA_DEFAULT_TEMPERATURE = 0
1012

@@ -69,7 +71,7 @@ export class GlamaHandler implements ApiHandler, SingleCompletionHandler {
6971
let maxTokens: number | undefined
7072

7173
if (this.getModel().id.startsWith("anthropic/")) {
72-
maxTokens = 8_192
74+
maxTokens = this.getModel().info.maxTokens
7375
}
7476

7577
const requestOptions: OpenAI.Chat.ChatCompletionCreateParams = {
@@ -177,7 +179,7 @@ export class GlamaHandler implements ApiHandler, SingleCompletionHandler {
177179
}
178180

179181
if (this.getModel().id.startsWith("anthropic/")) {
180-
requestOptions.max_tokens = 8192
182+
requestOptions.max_tokens = this.getModel().info.maxTokens
181183
}
182184

183185
const response = await this.client.chat.completions.create(requestOptions)
@@ -190,3 +192,44 @@ export class GlamaHandler implements ApiHandler, SingleCompletionHandler {
190192
}
191193
}
192194
}
195+
196+
export async function getGlamaModels() {
197+
const models: Record<string, ModelInfo> = {}
198+
199+
try {
200+
const response = await axios.get("https://glama.ai/api/gateway/v1/models")
201+
const rawModels = response.data
202+
203+
for (const rawModel of rawModels) {
204+
const modelInfo: ModelInfo = {
205+
maxTokens: rawModel.maxTokensOutput,
206+
contextWindow: rawModel.maxTokensInput,
207+
supportsImages: rawModel.capabilities?.includes("input:image"),
208+
supportsComputerUse: rawModel.capabilities?.includes("computer_use"),
209+
supportsPromptCache: rawModel.capabilities?.includes("caching"),
210+
inputPrice: parseApiPrice(rawModel.pricePerToken?.input),
211+
outputPrice: parseApiPrice(rawModel.pricePerToken?.output),
212+
description: undefined,
213+
cacheWritesPrice: parseApiPrice(rawModel.pricePerToken?.cacheWrite),
214+
cacheReadsPrice: parseApiPrice(rawModel.pricePerToken?.cacheRead),
215+
}
216+
217+
switch (rawModel.id) {
218+
case rawModel.id.startsWith("anthropic/claude-3-7-sonnet"):
219+
modelInfo.maxTokens = 16384
220+
break
221+
case rawModel.id.startsWith("anthropic/"):
222+
modelInfo.maxTokens = 8192
223+
break
224+
default:
225+
break
226+
}
227+
228+
models[rawModel.id] = modelInfo
229+
}
230+
} catch (error) {
231+
console.error(`Error fetching Glama models: ${JSON.stringify(error, Object.getOwnPropertyNames(error), 2)}`)
232+
}
233+
234+
return models
235+
}

src/api/providers/lmstudio.ts

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22
import OpenAI from "openai"
3+
import axios from "axios"
4+
35
import { ApiHandler, SingleCompletionHandler } from "../"
46
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api"
57
import { convertToOpenAiMessages } from "../transform/openai-format"
@@ -72,3 +74,17 @@ export class LmStudioHandler implements ApiHandler, SingleCompletionHandler {
7274
}
7375
}
7476
}
77+
78+
export async function getLmStudioModels(baseUrl = "http://localhost:1234") {
79+
try {
80+
if (!URL.canParse(baseUrl)) {
81+
return []
82+
}
83+
84+
const response = await axios.get(`${baseUrl}/v1/models`)
85+
const modelsArray = response.data?.data?.map((model: any) => model.id) || []
86+
return [...new Set<string>(modelsArray)]
87+
} catch (error) {
88+
return []
89+
}
90+
}

src/api/providers/ollama.ts

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22
import OpenAI from "openai"
3+
import axios from "axios"
4+
35
import { ApiHandler, SingleCompletionHandler } from "../"
46
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api"
57
import { convertToOpenAiMessages } from "../transform/openai-format"
@@ -88,3 +90,17 @@ export class OllamaHandler implements ApiHandler, SingleCompletionHandler {
8890
}
8991
}
9092
}
93+
94+
export async function getOllamaModels(baseUrl = "http://localhost:11434") {
95+
try {
96+
if (!URL.canParse(baseUrl)) {
97+
return []
98+
}
99+
100+
const response = await axios.get(`${baseUrl}/api/tags`)
101+
const modelsArray = response.data?.models?.map((model: any) => model.name) || []
102+
return [...new Set<string>(modelsArray)]
103+
} catch (error) {
104+
return []
105+
}
106+
}

src/api/providers/openai.ts

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22
import OpenAI, { AzureOpenAI } from "openai"
3+
import axios from "axios"
34

45
import {
56
ApiHandlerOptions,
@@ -166,3 +167,27 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
166167
}
167168
}
168169
}
170+
171+
export async function getOpenAiModels(baseUrl?: string, apiKey?: string) {
172+
try {
173+
if (!baseUrl) {
174+
return []
175+
}
176+
177+
if (!URL.canParse(baseUrl)) {
178+
return []
179+
}
180+
181+
const config: Record<string, any> = {}
182+
183+
if (apiKey) {
184+
config["headers"] = { Authorization: `Bearer ${apiKey}` }
185+
}
186+
187+
const response = await axios.get(`${baseUrl}/models`, config)
188+
const modelsArray = response.data?.data?.map((model: any) => model.id) || []
189+
return [...new Set<string>(modelsArray)]
190+
} catch (error) {
191+
return []
192+
}
193+
}

0 commit comments

Comments
 (0)