Skip to content

Commit 548338d

Browse files
Add alibaba qwen models plus/max/coder-plus/turbo both stable and latest to use. (RooCodeInc#1648)
* add alibaba qwen-max qwen-plus qwen-turbo qwen-coder-plus stable/latest models * add alibaba qwen-max qwen-plus qwen-turbo qwen-coder-plus stable/latest models * Provide the api line choice for international user * Remove redundant code * Copy fixes * Create dry-socks-talk.md --------- Co-authored-by: Saoud Rizwan <[email protected]>
1 parent c88b5c6 commit 548338d

File tree

8 files changed

+257
-0
lines changed

8 files changed

+257
-0
lines changed

.changeset/dry-socks-talk.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"claude-dev": patch
3+
---
4+
5+
Add Alibaba qwen models plus/max/coder-plus/turbo

src/api/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ import { GeminiHandler } from "./providers/gemini"
1111
import { OpenAiNativeHandler } from "./providers/openai-native"
1212
import { ApiStream } from "./transform/stream"
1313
import { DeepSeekHandler } from "./providers/deepseek"
14+
import { QwenHandler } from "./providers/qwen"
1415
import { MistralHandler } from "./providers/mistral"
1516
import { VsCodeLmHandler } from "./providers/vscode-lm"
1617
import { LiteLlmHandler } from "./providers/litellm"
@@ -47,6 +48,8 @@ export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
4748
return new OpenAiNativeHandler(options)
4849
case "deepseek":
4950
return new DeepSeekHandler(options)
51+
case "qwen":
52+
return new QwenHandler(options)
5053
case "mistral":
5154
return new MistralHandler(options)
5255
case "vscode-lm":

src/api/providers/qwen.ts

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
import { Anthropic } from "@anthropic-ai/sdk"
2+
import OpenAI from "openai"
3+
import { ApiHandler } from "../"
4+
import { ApiHandlerOptions, QwenModelId, ModelInfo, qwenDefaultModelId, qwenModels } from "../../shared/api"
5+
import { convertToOpenAiMessages } from "../transform/openai-format"
6+
import { ApiStream } from "../transform/stream"
7+
8+
export class QwenHandler implements ApiHandler {
9+
private options: ApiHandlerOptions
10+
private client: OpenAI
11+
12+
constructor(options: ApiHandlerOptions) {
13+
this.options = options
14+
this.client = new OpenAI({
15+
baseURL: this.options.qwenApiLine || "https://dashscope-intl.aliyuncs.com/compatible-mode/v1",
16+
apiKey: this.options.qwenApiKey,
17+
})
18+
}
19+
20+
getModel(): { id: QwenModelId; info: ModelInfo } {
21+
const modelId = this.options.apiModelId
22+
if (modelId && modelId in qwenModels) {
23+
const id = modelId as QwenModelId
24+
return { id, info: qwenModels[id] }
25+
}
26+
return {
27+
id: qwenDefaultModelId,
28+
info: qwenModels[qwenDefaultModelId],
29+
}
30+
}
31+
32+
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
33+
const model = this.getModel()
34+
let openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
35+
{ role: "system", content: systemPrompt },
36+
...convertToOpenAiMessages(messages),
37+
]
38+
39+
const stream = await this.client.chat.completions.create({
40+
model: model.id,
41+
max_completion_tokens: model.info.maxTokens,
42+
messages: openAiMessages,
43+
stream: true,
44+
stream_options: { include_usage: true },
45+
})
46+
47+
for await (const chunk of stream) {
48+
const delta = chunk.choices[0]?.delta
49+
if (delta?.content) {
50+
yield {
51+
type: "text",
52+
text: delta.content,
53+
}
54+
}
55+
56+
if (delta && "reasoning_content" in delta && delta.reasoning_content) {
57+
yield {
58+
type: "reasoning",
59+
reasoning: (delta.reasoning_content as string | undefined) || "",
60+
}
61+
}
62+
63+
if (chunk.usage) {
64+
yield {
65+
type: "usage",
66+
inputTokens: chunk.usage.prompt_tokens || 0,
67+
outputTokens: chunk.usage.completion_tokens || 0,
68+
// @ts-ignore-next-line
69+
cacheReadTokens: chunk.usage.prompt_cache_hit_tokens || 0,
70+
// @ts-ignore-next-line
71+
cacheWriteTokens: chunk.usage.prompt_cache_miss_tokens || 0,
72+
}
73+
}
74+
}
75+
}
76+
}

src/core/webview/ClineProvider.ts

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ type SecretKey =
4545
| "geminiApiKey"
4646
| "openAiNativeApiKey"
4747
| "deepSeekApiKey"
48+
| "qwenApiKey"
4849
| "mistralApiKey"
4950
| "authToken"
5051
| "authNonce"
@@ -78,6 +79,7 @@ type GlobalStateKey =
7879
| "previousModeModelInfo"
7980
| "liteLlmBaseUrl"
8081
| "liteLlmModelId"
82+
| "qwenApiLine"
8183

8284
export const GlobalFileNames = {
8385
apiConversationHistory: "api_conversation_history.json",
@@ -440,13 +442,15 @@ export class ClineProvider implements vscode.WebviewViewProvider {
440442
geminiApiKey,
441443
openAiNativeApiKey,
442444
deepSeekApiKey,
445+
qwenApiKey,
443446
mistralApiKey,
444447
azureApiVersion,
445448
openRouterModelId,
446449
openRouterModelInfo,
447450
vsCodeLmModelSelector,
448451
liteLlmBaseUrl,
449452
liteLlmModelId,
453+
qwenApiLine,
450454
} = message.apiConfiguration
451455
await this.updateGlobalState("apiProvider", apiProvider)
452456
await this.updateGlobalState("apiModelId", apiModelId)
@@ -470,13 +474,15 @@ export class ClineProvider implements vscode.WebviewViewProvider {
470474
await this.storeSecret("geminiApiKey", geminiApiKey)
471475
await this.storeSecret("openAiNativeApiKey", openAiNativeApiKey)
472476
await this.storeSecret("deepSeekApiKey", deepSeekApiKey)
477+
await this.storeSecret("qwenApiKey", qwenApiKey)
473478
await this.storeSecret("mistralApiKey", mistralApiKey)
474479
await this.updateGlobalState("azureApiVersion", azureApiVersion)
475480
await this.updateGlobalState("openRouterModelId", openRouterModelId)
476481
await this.updateGlobalState("openRouterModelInfo", openRouterModelInfo)
477482
await this.updateGlobalState("vsCodeLmModelSelector", vsCodeLmModelSelector)
478483
await this.updateGlobalState("liteLlmBaseUrl", liteLlmBaseUrl)
479484
await this.updateGlobalState("liteLlmModelId", liteLlmModelId)
485+
await this.updateGlobalState("qwenApiLine", qwenApiLine)
480486
if (this.cline) {
481487
this.cline.api = buildApiHandler(message.apiConfiguration)
482488
}
@@ -1365,6 +1371,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
13651371
geminiApiKey,
13661372
openAiNativeApiKey,
13671373
deepSeekApiKey,
1374+
qwenApiKey,
13681375
mistralApiKey,
13691376
azureApiVersion,
13701377
openRouterModelId,
@@ -1383,6 +1390,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
13831390
previousModeApiProvider,
13841391
previousModeModelId,
13851392
previousModeModelInfo,
1393+
qwenApiLine,
13861394
] = await Promise.all([
13871395
this.getGlobalState("apiProvider") as Promise<ApiProvider | undefined>,
13881396
this.getGlobalState("apiModelId") as Promise<string | undefined>,
@@ -1406,6 +1414,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
14061414
this.getSecret("geminiApiKey") as Promise<string | undefined>,
14071415
this.getSecret("openAiNativeApiKey") as Promise<string | undefined>,
14081416
this.getSecret("deepSeekApiKey") as Promise<string | undefined>,
1417+
this.getSecret("qwenApiKey") as Promise<string | undefined>,
14091418
this.getSecret("mistralApiKey") as Promise<string | undefined>,
14101419
this.getGlobalState("azureApiVersion") as Promise<string | undefined>,
14111420
this.getGlobalState("openRouterModelId") as Promise<string | undefined>,
@@ -1424,6 +1433,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
14241433
this.getGlobalState("previousModeApiProvider") as Promise<ApiProvider | undefined>,
14251434
this.getGlobalState("previousModeModelId") as Promise<string | undefined>,
14261435
this.getGlobalState("previousModeModelInfo") as Promise<ModelInfo | undefined>,
1436+
this.getGlobalState("qwenApiLine") as Promise<string | undefined>,
14271437
])
14281438

14291439
let apiProvider: ApiProvider
@@ -1464,6 +1474,8 @@ export class ClineProvider implements vscode.WebviewViewProvider {
14641474
geminiApiKey,
14651475
openAiNativeApiKey,
14661476
deepSeekApiKey,
1477+
qwenApiKey,
1478+
qwenApiLine,
14671479
mistralApiKey,
14681480
azureApiVersion,
14691481
openRouterModelId,
@@ -1559,6 +1571,7 @@ export class ClineProvider implements vscode.WebviewViewProvider {
15591571
"geminiApiKey",
15601572
"openAiNativeApiKey",
15611573
"deepSeekApiKey",
1574+
"qwenApiKey",
15621575
"mistralApiKey",
15631576
"authToken",
15641577
]

src/shared/api.ts

Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ export type ApiProvider =
99
| "gemini"
1010
| "openai-native"
1111
| "deepseek"
12+
| "qwen"
1213
| "mistral"
1314
| "vscode-lm"
1415
| "litellm"
@@ -39,9 +40,11 @@ export interface ApiHandlerOptions {
3940
geminiApiKey?: string
4041
openAiNativeApiKey?: string
4142
deepSeekApiKey?: string
43+
qwenApiKey?: string
4244
mistralApiKey?: string
4345
azureApiVersion?: string
4446
vsCodeLmModelSelector?: any
47+
qwenApiLine?: string
4548
}
4649

4750
export type ApiConfiguration = ApiHandlerOptions & {
@@ -432,6 +435,93 @@ export const deepSeekModels = {
432435
},
433436
} as const satisfies Record<string, ModelInfo>
434437

438+
// Qwen
439+
// https://bailian.console.aliyun.com/
440+
export type QwenModelId = keyof typeof qwenModels
441+
export const qwenDefaultModelId: QwenModelId = "qwen-coder-plus-latest"
442+
export const qwenModels = {
443+
"qwen-coder-plus-latest": {
444+
maxTokens: 129_024,
445+
contextWindow: 131_072,
446+
supportsImages: false,
447+
supportsPromptCache: false,
448+
inputPrice: 0.0035,
449+
outputPrice: 0.007,
450+
cacheWritesPrice: 0.0035,
451+
cacheReadsPrice: 0.007,
452+
},
453+
"qwen-plus-latest": {
454+
maxTokens: 129_024,
455+
contextWindow: 131_072,
456+
supportsImages: false,
457+
supportsPromptCache: false,
458+
inputPrice: 0.0008,
459+
outputPrice: 0.002,
460+
cacheWritesPrice: 0.0004,
461+
cacheReadsPrice: 0.001,
462+
},
463+
"qwen-turbo-latest": {
464+
maxTokens: 1_000_000,
465+
contextWindow: 1_000_000,
466+
supportsImages: false,
467+
supportsPromptCache: false,
468+
inputPrice: 0.0003,
469+
outputPrice: 0.0006,
470+
cacheWritesPrice: 0.00015,
471+
cacheReadsPrice: 0.0003,
472+
},
473+
"qwen-max-latest": {
474+
maxTokens: 30_720,
475+
contextWindow: 32_768,
476+
supportsImages: false,
477+
supportsPromptCache: false,
478+
inputPrice: 0.0112,
479+
outputPrice: 0.0448,
480+
cacheWritesPrice: 0.0056,
481+
cacheReadsPrice: 0.0224,
482+
},
483+
"qwen-coder-plus": {
484+
maxTokens: 129_024,
485+
contextWindow: 131_072,
486+
supportsImages: false,
487+
supportsPromptCache: false,
488+
inputPrice: 0.0035,
489+
outputPrice: 0.007,
490+
cacheWritesPrice: 0.0035,
491+
cacheReadsPrice: 0.007,
492+
},
493+
"qwen-plus": {
494+
maxTokens: 129_024,
495+
contextWindow: 131_072,
496+
supportsImages: false,
497+
supportsPromptCache: false,
498+
inputPrice: 0.0008,
499+
outputPrice: 0.002,
500+
cacheWritesPrice: 0.0004,
501+
cacheReadsPrice: 0.001,
502+
},
503+
"qwen-turbo": {
504+
maxTokens: 1_000_000,
505+
contextWindow: 1_000_000,
506+
supportsImages: false,
507+
supportsPromptCache: false,
508+
inputPrice: 0.0003,
509+
outputPrice: 0.0006,
510+
cacheWritesPrice: 0.00015,
511+
cacheReadsPrice: 0.0003,
512+
},
513+
"qwen-max": {
514+
maxTokens: 30_720,
515+
contextWindow: 32_768,
516+
supportsImages: false,
517+
supportsPromptCache: false,
518+
inputPrice: 0.0112,
519+
outputPrice: 0.0448,
520+
cacheWritesPrice: 0.0056,
521+
cacheReadsPrice: 0.0224,
522+
},
523+
} as const satisfies Record<string, ModelInfo>
524+
435525
// Mistral
436526
// https://docs.mistral.ai/getting-started/models/models_overview/
437527
export type MistralModelId = keyof typeof mistralModels

0 commit comments

Comments
 (0)