Skip to content

Commit ffae0b9

Browse files
Welcome page + new cline provider (RooCodeInc#1519)
* welcome page now shows login button * adding cline apikey to context * added cline provider + making sure welcome page closes * adjusting welcome page language * swapped cline provider to use openai flavor requests * handling credit limit errors (super hacky ugh) * welcome view hiding api options by default * added back account page and relying more heavily on the firebase custom token to ensure persistent login * persisting login state through window reloads * renaming Account -> Cline Account * use openrouter format with cline api * Update welcome experience * Fix error handling * added generation details tracking to cline provider * small edit to account view * logging cline instead of openrouter * logging cline instead of openrouter * response.data instead of response.data.data * set isdev to false for prerelease * changeset added * Update tricky-zebras-talk.md * Update tricky-zebras-talk.md * welcome page language * more logging and updated authentication system to be more robust and reliable * only removing custom token on explicit log out action * even more aggressive custom token fetching + storing in apiConfig * moving firebase auth logic to webview because nodejs firebase token refresh is not supported * cleaned up all old logic for extension server side auth persistence * reconciling merge conflicts with main * reconciling merge conflicts * package-lock.json * package lock * package-lock * packagejson + lock * use standard API Request Failed title for all API errors while maintaining detailed credit limit information in error content * deleting packagejson line for vertexai * reverting package-lock changes * package-lock.json straight from main * removing redundant csp allowance * Fix package-lock * Add missing options to cline stream * Remove duplicate csp * Remove email subscribe * Update welcome page and cline account view * Fixes * Fix errors --------- Co-authored-by: Saoud Rizwan <[email protected]>
1 parent b4b307e commit ffae0b9

26 files changed

+1785
-544
lines changed

.changeset/tricky-zebras-talk.md

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
---
2+
"claude-dev": minor
3+
---
4+
5+
Added Cline as a new provider with native authentication, account management, and credit limit handling. Updates include:
6+
- Native authentication flow with email and Google OAuth
7+
- Account management page showing user information
8+
- Credit limit error handling and UI
9+
- Updated welcome experience with Cline login option
10+
- Integration using OpenRouter request format
11+
- Firebase customer state persistence"

src/api/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ import { TogetherHandler } from "./providers/together"
1616
import { QwenHandler } from "./providers/qwen"
1717
import { MistralHandler } from "./providers/mistral"
1818
import { VsCodeLmHandler } from "./providers/vscode-lm"
19+
import { ClineHandler } from "./providers/cline"
1920
import { LiteLlmHandler } from "./providers/litellm"
2021
import { AskSageHandler } from "./providers/asksage"
2122
import { XAIHandler } from "./providers/xai"
@@ -62,6 +63,8 @@ export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
6263
return new MistralHandler(options)
6364
case "vscode-lm":
6465
return new VsCodeLmHandler(options)
66+
case "cline":
67+
return new ClineHandler(options)
6568
case "litellm":
6669
return new LiteLlmHandler(options)
6770
case "asksage":

src/api/providers/cline.ts

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
import { Anthropic } from "@anthropic-ai/sdk"
2+
import OpenAI from "openai"
3+
import { ApiHandler } from "../"
4+
import { ApiHandlerOptions, ModelInfo, openRouterDefaultModelId, openRouterDefaultModelInfo } from "../../shared/api"
5+
import { streamOpenRouterFormatRequest } from "../transform/openrouter-stream"
6+
import { ApiStream } from "../transform/stream"
7+
import axios from "axios"
8+
9+
export class ClineHandler implements ApiHandler {
10+
private options: ApiHandlerOptions
11+
private client: OpenAI
12+
13+
constructor(options: ApiHandlerOptions) {
14+
this.options = options
15+
this.client = new OpenAI({
16+
baseURL: "https://api.cline.bot/v1",
17+
apiKey: this.options.clineApiKey || "",
18+
})
19+
}
20+
21+
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
22+
const model = this.getModel()
23+
const genId = yield* streamOpenRouterFormatRequest(
24+
this.client,
25+
systemPrompt,
26+
messages,
27+
model,
28+
this.options.o3MiniReasoningEffort,
29+
this.options.thinkingBudgetTokens,
30+
)
31+
32+
try {
33+
const response = await axios.get(`https://api.cline.bot/v1/generation?id=${genId}`, {
34+
headers: {
35+
Authorization: `Bearer ${this.options.clineApiKey}`,
36+
},
37+
timeout: 5_000, // this request hangs sometimes
38+
})
39+
40+
const generation = response.data
41+
console.log("cline generation details:", generation)
42+
yield {
43+
type: "usage",
44+
inputTokens: generation?.native_tokens_prompt || 0,
45+
outputTokens: generation?.native_tokens_completion || 0,
46+
totalCost: generation?.total_cost || 0,
47+
}
48+
} catch (error) {
49+
// ignore if fails
50+
console.error("Error fetching cline generation details:", error)
51+
}
52+
}
53+
54+
getModel(): { id: string; info: ModelInfo } {
55+
const modelId = this.options.openRouterModelId
56+
const modelInfo = this.options.openRouterModelInfo
57+
if (modelId && modelInfo) {
58+
return { id: modelId, info: modelInfo }
59+
}
60+
return { id: openRouterDefaultModelId, info: openRouterDefaultModelInfo }
61+
}
62+
}

src/api/providers/openrouter.ts

Lines changed: 9 additions & 192 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import OpenAI from "openai"
55
import { withRetry } from "../retry"
66
import { ApiHandler } from "../"
77
import { ApiHandlerOptions, ModelInfo, openRouterDefaultModelId, openRouterDefaultModelInfo } from "../../shared/api"
8-
import { convertToOpenAiMessages } from "../transform/openai-format"
8+
import { streamOpenRouterFormatRequest } from "../transform/openrouter-stream"
99
import { ApiStream } from "../transform/stream"
1010
import { convertToR1Format } from "../transform/r1-format"
1111
import { OpenRouterErrorResponse } from "./types"
@@ -29,197 +29,14 @@ export class OpenRouterHandler implements ApiHandler {
2929
@withRetry()
3030
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
3131
const model = this.getModel()
32-
33-
// Convert Anthropic messages to OpenAI format
34-
let openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
35-
{ role: "system", content: systemPrompt },
36-
...convertToOpenAiMessages(messages),
37-
]
38-
39-
// prompt caching: https://openrouter.ai/docs/prompt-caching
40-
// this is specifically for claude models (some models may 'support prompt caching' automatically without this)
41-
switch (model.id) {
42-
case "anthropic/claude-3.7-sonnet":
43-
case "anthropic/claude-3.7-sonnet:beta":
44-
case "anthropic/claude-3.7-sonnet:thinking":
45-
case "anthropic/claude-3-7-sonnet":
46-
case "anthropic/claude-3-7-sonnet:beta":
47-
case "anthropic/claude-3.5-sonnet":
48-
case "anthropic/claude-3.5-sonnet:beta":
49-
case "anthropic/claude-3.5-sonnet-20240620":
50-
case "anthropic/claude-3.5-sonnet-20240620:beta":
51-
case "anthropic/claude-3-5-haiku":
52-
case "anthropic/claude-3-5-haiku:beta":
53-
case "anthropic/claude-3-5-haiku-20241022":
54-
case "anthropic/claude-3-5-haiku-20241022:beta":
55-
case "anthropic/claude-3-haiku":
56-
case "anthropic/claude-3-haiku:beta":
57-
case "anthropic/claude-3-opus":
58-
case "anthropic/claude-3-opus:beta":
59-
openAiMessages[0] = {
60-
role: "system",
61-
content: [
62-
{
63-
type: "text",
64-
text: systemPrompt,
65-
// @ts-ignore-next-line
66-
cache_control: { type: "ephemeral" },
67-
},
68-
],
69-
}
70-
// Add cache_control to the last two user messages
71-
// (note: this works because we only ever add one user message at a time, but if we added multiple we'd need to mark the user message before the last assistant message)
72-
const lastTwoUserMessages = openAiMessages.filter((msg) => msg.role === "user").slice(-2)
73-
lastTwoUserMessages.forEach((msg) => {
74-
if (typeof msg.content === "string") {
75-
msg.content = [{ type: "text", text: msg.content }]
76-
}
77-
if (Array.isArray(msg.content)) {
78-
// NOTE: this is fine since env details will always be added at the end. but if it weren't there, and the user added a image_url type message, it would pop a text part before it and then move it after to the end.
79-
let lastTextPart = msg.content.filter((part) => part.type === "text").pop()
80-
81-
if (!lastTextPart) {
82-
lastTextPart = { type: "text", text: "..." }
83-
msg.content.push(lastTextPart)
84-
}
85-
// @ts-ignore-next-line
86-
lastTextPart["cache_control"] = { type: "ephemeral" }
87-
}
88-
})
89-
break
90-
default:
91-
break
92-
}
93-
94-
// Not sure how openrouter defaults max tokens when no value is provided, but the anthropic api requires this value and since they offer both 4096 and 8192 variants, we should ensure 8192.
95-
// (models usually default to max tokens allowed)
96-
let maxTokens: number | undefined
97-
switch (model.id) {
98-
case "anthropic/claude-3.7-sonnet":
99-
case "anthropic/claude-3.7-sonnet:beta":
100-
case "anthropic/claude-3.7-sonnet:thinking":
101-
case "anthropic/claude-3-7-sonnet":
102-
case "anthropic/claude-3-7-sonnet:beta":
103-
case "anthropic/claude-3.5-sonnet":
104-
case "anthropic/claude-3.5-sonnet:beta":
105-
case "anthropic/claude-3.5-sonnet-20240620":
106-
case "anthropic/claude-3.5-sonnet-20240620:beta":
107-
case "anthropic/claude-3-5-haiku":
108-
case "anthropic/claude-3-5-haiku:beta":
109-
case "anthropic/claude-3-5-haiku-20241022":
110-
case "anthropic/claude-3-5-haiku-20241022:beta":
111-
maxTokens = 8_192
112-
break
113-
}
114-
115-
let temperature: number | undefined = 0
116-
let topP: number | undefined = undefined
117-
if (this.getModel().id.startsWith("deepseek/deepseek-r1") || this.getModel().id === "perplexity/sonar-reasoning") {
118-
// Recommended values from DeepSeek
119-
temperature = 0.7
120-
topP = 0.95
121-
openAiMessages = convertToR1Format([{ role: "user", content: systemPrompt }, ...messages])
122-
}
123-
124-
let reasoning: { max_tokens: number } | undefined = undefined
125-
switch (model.id) {
126-
case "anthropic/claude-3.7-sonnet":
127-
case "anthropic/claude-3.7-sonnet:beta":
128-
case "anthropic/claude-3.7-sonnet:thinking":
129-
case "anthropic/claude-3-7-sonnet":
130-
case "anthropic/claude-3-7-sonnet:beta":
131-
let budget_tokens = this.options.thinkingBudgetTokens || 0
132-
const reasoningOn = budget_tokens !== 0 ? true : false
133-
if (reasoningOn) {
134-
temperature = undefined // extended thinking does not support non-1 temperature
135-
reasoning = { max_tokens: budget_tokens }
136-
}
137-
break
138-
}
139-
140-
// Removes messages in the middle when close to context window limit. Should not be applied to models that support prompt caching since it would continuously break the cache.
141-
let shouldApplyMiddleOutTransform = !model.info.supportsPromptCache
142-
// except for deepseek (which we set supportsPromptCache to true for), where because the context window is so small our truncation algo might miss and we should use openrouter's middle-out transform as a fallback to ensure we don't exceed the context window (FIXME: once we have a more robust token estimator we should not rely on this)
143-
if (model.id === "deepseek/deepseek-chat") {
144-
shouldApplyMiddleOutTransform = true
145-
}
146-
147-
// @ts-ignore-next-line
148-
const stream = await this.client.chat.completions.create({
149-
model: model.id,
150-
max_tokens: maxTokens,
151-
temperature: temperature,
152-
top_p: topP,
153-
messages: openAiMessages,
154-
stream: true,
155-
transforms: shouldApplyMiddleOutTransform ? ["middle-out"] : undefined,
156-
include_reasoning: true,
157-
...(model.id === "openai/o3-mini" ? { reasoning_effort: this.options.o3MiniReasoningEffort || "medium" } : {}),
158-
...(reasoning ? { reasoning } : {}),
159-
})
160-
161-
let genId: string | undefined
162-
163-
for await (const chunk of stream) {
164-
// openrouter returns an error object instead of the openai sdk throwing an error
165-
if ("error" in chunk) {
166-
const error = chunk.error as OpenRouterErrorResponse["error"]
167-
console.error(`OpenRouter API Error: ${error?.code} - ${error?.message}`)
168-
// Include metadata in the error message if available
169-
const metadataStr = error.metadata ? `\nMetadata: ${JSON.stringify(error.metadata, null, 2)}` : ""
170-
throw new Error(`OpenRouter API Error ${error.code}: ${error.message}${metadataStr}`)
171-
}
172-
173-
if (!genId && chunk.id) {
174-
genId = chunk.id
175-
}
176-
177-
const delta = chunk.choices[0]?.delta
178-
if (delta?.content) {
179-
yield {
180-
type: "text",
181-
text: delta.content,
182-
}
183-
}
184-
185-
// Reasoning tokens are returned separately from the content
186-
if ("reasoning" in delta && delta.reasoning) {
187-
// console.log("reasoning", delta.reasoning)
188-
yield {
189-
type: "reasoning",
190-
// @ts-ignore-next-line
191-
reasoning: delta.reasoning,
192-
}
193-
194-
// if (didStreamThinkTagInReasoning) {
195-
// yield {
196-
// type: "text",
197-
// // @ts-ignore-next-line
198-
// text: delta.reasoning,
199-
// }
200-
// } else {
201-
// yield {
202-
// type: "reasoning",
203-
// // @ts-ignore-next-line
204-
// text: delta.reasoning,
205-
// }
206-
207-
// // @ts-ignore-next-line
208-
// reasoningResponse += delta.reasoning
209-
// if (reasoningResponse.includes("</think>")) {
210-
// didStreamThinkTagInReasoning = true
211-
// console.log("did hit think tag", reasoningResponse)
212-
// }
213-
// }
214-
}
215-
// if (chunk.usage) {
216-
// yield {
217-
// type: "usage",
218-
// inputTokens: chunk.usage.prompt_tokens || 0,
219-
// outputTokens: chunk.usage.completion_tokens || 0,
220-
// }
221-
// }
222-
}
32+
const genId = yield* streamOpenRouterFormatRequest(
33+
this.client,
34+
systemPrompt,
35+
messages,
36+
model,
37+
this.options.o3MiniReasoningEffort,
38+
this.options.thinkingBudgetTokens,
39+
)
22340

22441
if (genId) {
22542
await delay(500) // FIXME: necessary delay to ensure generation endpoint is ready

0 commit comments

Comments
 (0)