|
| 1 | +import { promises as fs } from "node:fs" |
| 2 | +import { Anthropic } from "@anthropic-ai/sdk" |
| 3 | +import OpenAI from "openai" |
| 4 | +import * as os from "os" |
| 5 | +import * as path from "path" |
| 6 | + |
| 7 | +import type { ModelInfo } from "@roo-code/types" |
| 8 | +import type { ApiHandlerOptions } from "../../shared/api" |
| 9 | + |
| 10 | +import { convertToOpenAiMessages } from "../transform/openai-format" |
| 11 | +import { ApiStream } from "../transform/stream" |
| 12 | +import { BaseProvider } from "./base-provider" |
| 13 | +import type { SingleCompletionHandler } from "../index" |
| 14 | + |
| 15 | +// --- Constants for Qwen OAuth2 --- |
| 16 | +const QWEN_OAUTH_BASE_URL = "https://chat.qwen.ai" |
| 17 | +const QWEN_OAUTH_TOKEN_ENDPOINT = `${QWEN_OAUTH_BASE_URL}/api/v1/oauth2/token` |
| 18 | +const QWEN_OAUTH_CLIENT_ID = "f0304373b74a44d2b584a3fb70ca9e56" |
| 19 | +const QWEN_DIR = ".qwen" |
| 20 | +const QWEN_CREDENTIAL_FILENAME = "oauth_creds.json" |
| 21 | + |
| 22 | +interface QwenOAuthCredentials { |
| 23 | + access_token: string |
| 24 | + refresh_token: string |
| 25 | + token_type: string |
| 26 | + expiry_date: number |
| 27 | + resource_url?: string |
| 28 | +} |
| 29 | + |
| 30 | +interface QwenCodeHandlerOptions extends ApiHandlerOptions { |
| 31 | + qwenCodeOauthPath?: string |
| 32 | +} |
| 33 | + |
| 34 | +function getQwenCachedCredentialPath(customPath?: string): string { |
| 35 | + if (customPath) { |
| 36 | + // Support custom path that starts with ~/ or is absolute |
| 37 | + if (customPath.startsWith("~/")) { |
| 38 | + return path.join(os.homedir(), customPath.slice(2)) |
| 39 | + } |
| 40 | + return path.resolve(customPath) |
| 41 | + } |
| 42 | + return path.join(os.homedir(), QWEN_DIR, QWEN_CREDENTIAL_FILENAME) |
| 43 | +} |
| 44 | + |
| 45 | +function objectToUrlEncoded(data: Record<string, string>): string { |
| 46 | + return Object.keys(data) |
| 47 | + .map((key) => `${encodeURIComponent(key)}=${encodeURIComponent(data[key])}`) |
| 48 | + .join("&") |
| 49 | +} |
| 50 | + |
| 51 | +export class QwenCodeHandler extends BaseProvider implements SingleCompletionHandler { |
| 52 | + protected options: QwenCodeHandlerOptions |
| 53 | + private credentials: QwenOAuthCredentials | null = null |
| 54 | + private client: OpenAI | undefined |
| 55 | + |
| 56 | + constructor(options: QwenCodeHandlerOptions) { |
| 57 | + super() |
| 58 | + this.options = options |
| 59 | + } |
| 60 | + |
| 61 | + private ensureClient(): OpenAI { |
| 62 | + if (!this.client) { |
| 63 | + // Create the client instance with dummy key initially |
| 64 | + // The API key will be updated dynamically via ensureAuthenticated |
| 65 | + this.client = new OpenAI({ |
| 66 | + apiKey: "dummy-key-will-be-replaced", |
| 67 | + baseURL: "https://dashscope.aliyuncs.com/compatible-mode/v1", |
| 68 | + }) |
| 69 | + } |
| 70 | + return this.client |
| 71 | + } |
| 72 | + |
| 73 | + private async loadCachedQwenCredentials(): Promise<QwenOAuthCredentials> { |
| 74 | + try { |
| 75 | + const keyFile = getQwenCachedCredentialPath(this.options.qwenCodeOauthPath) |
| 76 | + const credsStr = await fs.readFile(keyFile, "utf-8") |
| 77 | + return JSON.parse(credsStr) |
| 78 | + } catch (error) { |
| 79 | + console.error( |
| 80 | + `Error reading or parsing credentials file at ${getQwenCachedCredentialPath(this.options.qwenCodeOauthPath)}`, |
| 81 | + ) |
| 82 | + throw new Error(`Failed to load Qwen OAuth credentials: ${error}`) |
| 83 | + } |
| 84 | + } |
| 85 | + |
| 86 | + private async refreshAccessToken(credentials: QwenOAuthCredentials): Promise<QwenOAuthCredentials> { |
| 87 | + if (!credentials.refresh_token) { |
| 88 | + throw new Error("No refresh token available in credentials.") |
| 89 | + } |
| 90 | + |
| 91 | + const bodyData = { |
| 92 | + grant_type: "refresh_token", |
| 93 | + refresh_token: credentials.refresh_token, |
| 94 | + client_id: QWEN_OAUTH_CLIENT_ID, |
| 95 | + } |
| 96 | + |
| 97 | + const response = await fetch(QWEN_OAUTH_TOKEN_ENDPOINT, { |
| 98 | + method: "POST", |
| 99 | + headers: { |
| 100 | + "Content-Type": "application/x-www-form-urlencoded", |
| 101 | + Accept: "application/json", |
| 102 | + }, |
| 103 | + body: objectToUrlEncoded(bodyData), |
| 104 | + }) |
| 105 | + |
| 106 | + if (!response.ok) { |
| 107 | + const errorText = await response.text() |
| 108 | + throw new Error(`Token refresh failed: ${response.status} ${response.statusText}. Response: ${errorText}`) |
| 109 | + } |
| 110 | + |
| 111 | + const tokenData = await response.json() |
| 112 | + |
| 113 | + if (tokenData.error) { |
| 114 | + throw new Error(`Token refresh failed: ${tokenData.error} - ${tokenData.error_description}`) |
| 115 | + } |
| 116 | + |
| 117 | + const newCredentials = { |
| 118 | + ...credentials, |
| 119 | + access_token: tokenData.access_token, |
| 120 | + token_type: tokenData.token_type, |
| 121 | + refresh_token: tokenData.refresh_token || credentials.refresh_token, |
| 122 | + expiry_date: Date.now() + tokenData.expires_in * 1000, |
| 123 | + } |
| 124 | + |
| 125 | + const filePath = getQwenCachedCredentialPath(this.options.qwenCodeOauthPath) |
| 126 | + await fs.writeFile(filePath, JSON.stringify(newCredentials, null, 2)) |
| 127 | + |
| 128 | + return newCredentials |
| 129 | + } |
| 130 | + |
| 131 | + private isTokenValid(credentials: QwenOAuthCredentials): boolean { |
| 132 | + const TOKEN_REFRESH_BUFFER_MS = 30 * 1000 // 30s buffer |
| 133 | + if (!credentials.expiry_date) { |
| 134 | + return false |
| 135 | + } |
| 136 | + return Date.now() < credentials.expiry_date - TOKEN_REFRESH_BUFFER_MS |
| 137 | + } |
| 138 | + |
| 139 | + private async ensureAuthenticated(): Promise<void> { |
| 140 | + if (!this.credentials) { |
| 141 | + this.credentials = await this.loadCachedQwenCredentials() |
| 142 | + } |
| 143 | + |
| 144 | + if (!this.isTokenValid(this.credentials)) { |
| 145 | + this.credentials = await this.refreshAccessToken(this.credentials) |
| 146 | + } |
| 147 | + |
| 148 | + // After authentication, update the apiKey and baseURL on the existing client |
| 149 | + const client = this.ensureClient() |
| 150 | + client.apiKey = this.credentials.access_token |
| 151 | + client.baseURL = this.getBaseUrl(this.credentials) |
| 152 | + } |
| 153 | + |
| 154 | + private getBaseUrl(creds: QwenOAuthCredentials): string { |
| 155 | + let baseUrl = creds.resource_url || "https://dashscope.aliyuncs.com/compatible-mode/v1" |
| 156 | + if (!baseUrl.startsWith("http://") && !baseUrl.startsWith("https://")) { |
| 157 | + baseUrl = `https://${baseUrl}` |
| 158 | + } |
| 159 | + return baseUrl.endsWith("/v1") ? baseUrl : `${baseUrl}/v1` |
| 160 | + } |
| 161 | + |
| 162 | + private async callApiWithRetry<T>(apiCall: () => Promise<T>): Promise<T> { |
| 163 | + try { |
| 164 | + return await apiCall() |
| 165 | + } catch (error: any) { |
| 166 | + if (error.status === 401) { |
| 167 | + // Token expired, refresh and retry |
| 168 | + this.credentials = await this.refreshAccessToken(this.credentials!) |
| 169 | + const client = this.ensureClient() |
| 170 | + client.apiKey = this.credentials.access_token |
| 171 | + client.baseURL = this.getBaseUrl(this.credentials) |
| 172 | + return await apiCall() |
| 173 | + } else { |
| 174 | + throw error |
| 175 | + } |
| 176 | + } |
| 177 | + } |
| 178 | + |
| 179 | + override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { |
| 180 | + await this.ensureAuthenticated() |
| 181 | + const client = this.ensureClient() |
| 182 | + const model = this.getModel() |
| 183 | + |
| 184 | + const systemMessage: OpenAI.Chat.ChatCompletionSystemMessageParam = { |
| 185 | + role: "system", |
| 186 | + content: systemPrompt, |
| 187 | + } |
| 188 | + |
| 189 | + const convertedMessages = [systemMessage, ...convertToOpenAiMessages(messages)] |
| 190 | + |
| 191 | + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { |
| 192 | + model: model.id, |
| 193 | + temperature: 0, |
| 194 | + messages: convertedMessages, |
| 195 | + stream: true, |
| 196 | + stream_options: { include_usage: true }, |
| 197 | + max_completion_tokens: model.info.maxTokens, |
| 198 | + } |
| 199 | + |
| 200 | + const stream = await this.callApiWithRetry(() => client.chat.completions.create(requestOptions)) |
| 201 | + |
| 202 | + let fullContent = "" |
| 203 | + |
| 204 | + for await (const apiChunk of stream) { |
| 205 | + const delta = apiChunk.choices[0]?.delta ?? {} |
| 206 | + |
| 207 | + if (delta.content) { |
| 208 | + let newText = delta.content |
| 209 | + if (newText.startsWith(fullContent)) { |
| 210 | + newText = newText.substring(fullContent.length) |
| 211 | + } |
| 212 | + fullContent = delta.content |
| 213 | + |
| 214 | + if (newText) { |
| 215 | + // Check for thinking blocks |
| 216 | + if (newText.includes("<think>") || newText.includes("</think>")) { |
| 217 | + // Simple parsing for thinking blocks |
| 218 | + const parts = newText.split(/<\/?think>/g) |
| 219 | + for (let i = 0; i < parts.length; i++) { |
| 220 | + if (parts[i]) { |
| 221 | + if (i % 2 === 0) { |
| 222 | + // Outside thinking block |
| 223 | + yield { |
| 224 | + type: "text", |
| 225 | + text: parts[i], |
| 226 | + } |
| 227 | + } else { |
| 228 | + // Inside thinking block |
| 229 | + yield { |
| 230 | + type: "reasoning", |
| 231 | + text: parts[i], |
| 232 | + } |
| 233 | + } |
| 234 | + } |
| 235 | + } |
| 236 | + } else { |
| 237 | + yield { |
| 238 | + type: "text", |
| 239 | + text: newText, |
| 240 | + } |
| 241 | + } |
| 242 | + } |
| 243 | + } |
| 244 | + |
| 245 | + // Handle reasoning content (o1-style) |
| 246 | + if ("reasoning_content" in delta && delta.reasoning_content) { |
| 247 | + yield { |
| 248 | + type: "reasoning", |
| 249 | + text: (delta.reasoning_content as string | undefined) || "", |
| 250 | + } |
| 251 | + } |
| 252 | + |
| 253 | + if (apiChunk.usage) { |
| 254 | + yield { |
| 255 | + type: "usage", |
| 256 | + inputTokens: apiChunk.usage.prompt_tokens || 0, |
| 257 | + outputTokens: apiChunk.usage.completion_tokens || 0, |
| 258 | + } |
| 259 | + } |
| 260 | + } |
| 261 | + } |
| 262 | + |
| 263 | + override getModel(): { id: string; info: ModelInfo } { |
| 264 | + const modelId = this.options.apiModelId |
| 265 | + const { qwenCodeModels, qwenCodeDefaultModelId } = require("@roo-code/types") |
| 266 | + if (modelId && modelId in qwenCodeModels) { |
| 267 | + const id = modelId |
| 268 | + return { id, info: qwenCodeModels[id] } |
| 269 | + } |
| 270 | + return { |
| 271 | + id: qwenCodeDefaultModelId, |
| 272 | + info: qwenCodeModels[qwenCodeDefaultModelId], |
| 273 | + } |
| 274 | + } |
| 275 | + |
| 276 | + async completePrompt(prompt: string): Promise<string> { |
| 277 | + await this.ensureAuthenticated() |
| 278 | + const client = this.ensureClient() |
| 279 | + const model = this.getModel() |
| 280 | + |
| 281 | + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { |
| 282 | + model: model.id, |
| 283 | + messages: [{ role: "user", content: prompt }], |
| 284 | + max_completion_tokens: model.info.maxTokens, |
| 285 | + } |
| 286 | + |
| 287 | + const response = await this.callApiWithRetry(() => client.chat.completions.create(requestOptions)) |
| 288 | + |
| 289 | + return response.choices[0]?.message.content || "" |
| 290 | + } |
| 291 | +} |
0 commit comments