|
| 1 | +import type { ApiStream, ModelInfo, Message, TextBlock } from "../../types.d.ts"; |
| 2 | + |
| 3 | +interface OpenRouterOptions { |
| 4 | + model: string; |
| 5 | + apiKey: string; |
| 6 | +} |
| 7 | + |
| 8 | +export class OpenRouterHandler { |
| 9 | + private apiKey: string; |
| 10 | + private model: string; |
| 11 | + |
| 12 | + constructor(options: OpenRouterOptions) { |
| 13 | + this.apiKey = options.apiKey; |
| 14 | + this.model = options.model; |
| 15 | + } |
| 16 | + |
| 17 | + async *createMessage(systemPrompt: string, messages: Message[]): ApiStream { |
| 18 | + try { |
| 19 | + // Convert our messages to OpenRouter format |
| 20 | + const openRouterMessages = [ |
| 21 | + { role: "system", content: systemPrompt }, |
| 22 | + ...messages.map(msg => ({ |
| 23 | + role: msg.role, |
| 24 | + content: Array.isArray(msg.content) |
| 25 | + ? msg.content.map(c => c.text).join("\n") |
| 26 | + : msg.content |
| 27 | + })) |
| 28 | + ]; |
| 29 | + |
| 30 | + const response = await fetch("https://openrouter.ai/api/v1/chat/completions", { |
| 31 | + method: "POST", |
| 32 | + headers: { |
| 33 | + "Authorization": `Bearer ${this.apiKey}`, |
| 34 | + "Content-Type": "application/json", |
| 35 | + "HTTP-Referer": "https://github.com/mattvr/roo-cline", |
| 36 | + "X-Title": "Cline CLI" |
| 37 | + }, |
| 38 | + body: JSON.stringify({ |
| 39 | + model: this.model, |
| 40 | + messages: openRouterMessages, |
| 41 | + stream: true, |
| 42 | + temperature: 0.7, |
| 43 | + max_tokens: 4096 |
| 44 | + }) |
| 45 | + }); |
| 46 | + |
| 47 | + if (!response.ok) { |
| 48 | + const errorData = await response.json().catch(() => null); |
| 49 | + throw new Error(`OpenRouter API error: ${response.statusText}${errorData ? ` - ${JSON.stringify(errorData)}` : ""}`); |
| 50 | + } |
| 51 | + |
| 52 | + if (!response.body) { |
| 53 | + throw new Error("No response body received"); |
| 54 | + } |
| 55 | + |
| 56 | + const reader = response.body.getReader(); |
| 57 | + const decoder = new TextDecoder(); |
| 58 | + let buffer = ""; |
| 59 | + let content = ""; |
| 60 | + |
| 61 | + while (true) { |
| 62 | + const { done, value } = await reader.read(); |
| 63 | + if (done) break; |
| 64 | + |
| 65 | + // Add new chunk to buffer and split into lines |
| 66 | + buffer += decoder.decode(value, { stream: true }); |
| 67 | + const lines = buffer.split("\n"); |
| 68 | + |
| 69 | + // Process all complete lines |
| 70 | + buffer = lines.pop() || ""; // Keep the last incomplete line in buffer |
| 71 | + |
| 72 | + for (const line of lines) { |
| 73 | + if (line.trim() === "") continue; |
| 74 | + if (line === "data: [DONE]") continue; |
| 75 | + |
| 76 | + if (line.startsWith("data: ")) { |
| 77 | + try { |
| 78 | + const data = JSON.parse(line.slice(6)); |
| 79 | + if (data.choices?.[0]?.delta?.content) { |
| 80 | + const text = data.choices[0].delta.content; |
| 81 | + content += text; |
| 82 | + yield { type: "text", text }; |
| 83 | + } |
| 84 | + } catch (e) { |
| 85 | + // Ignore parse errors for incomplete chunks |
| 86 | + continue; |
| 87 | + } |
| 88 | + } |
| 89 | + } |
| 90 | + } |
| 91 | + |
| 92 | + // Process any remaining content in buffer |
| 93 | + if (buffer.trim() && buffer.startsWith("data: ")) { |
| 94 | + try { |
| 95 | + const data = JSON.parse(buffer.slice(6)); |
| 96 | + if (data.choices?.[0]?.delta?.content) { |
| 97 | + const text = data.choices[0].delta.content; |
| 98 | + content += text; |
| 99 | + yield { type: "text", text }; |
| 100 | + } |
| 101 | + } catch (e) { |
| 102 | + // Ignore parse errors for final incomplete chunk |
| 103 | + } |
| 104 | + } |
| 105 | + |
| 106 | + // Estimate token usage (4 chars per token is a rough estimate) |
| 107 | + const inputText = systemPrompt + messages.reduce((acc, msg) => |
| 108 | + acc + (typeof msg.content === "string" ? |
| 109 | + msg.content : |
| 110 | + msg.content.reduce((a, b) => a + b.text, "")), ""); |
| 111 | + |
| 112 | + const inputTokens = Math.ceil(inputText.length / 4); |
| 113 | + const outputTokens = Math.ceil(content.length / 4); |
| 114 | + |
| 115 | + yield { |
| 116 | + type: "usage", |
| 117 | + inputTokens, |
| 118 | + outputTokens, |
| 119 | + totalCost: this.calculateCost(inputTokens, outputTokens) |
| 120 | + }; |
| 121 | + |
| 122 | + } catch (error) { |
| 123 | + console.error("Error in OpenRouter API call:", error); |
| 124 | + throw error; |
| 125 | + } |
| 126 | + } |
| 127 | + |
| 128 | + getModel(): { id: string; info: ModelInfo } { |
| 129 | + return { |
| 130 | + id: this.model, |
| 131 | + info: { |
| 132 | + contextWindow: 128000, // This varies by model |
| 133 | + supportsComputerUse: true, |
| 134 | + inputPricePerToken: 0.000002, // Approximate, varies by model |
| 135 | + outputPricePerToken: 0.000002 |
| 136 | + } |
| 137 | + }; |
| 138 | + } |
| 139 | + |
| 140 | + private calculateCost(inputTokens: number, outputTokens: number): number { |
| 141 | + const { inputPricePerToken, outputPricePerToken } = this.getModel().info; |
| 142 | + return ( |
| 143 | + (inputTokens * (inputPricePerToken || 0)) + |
| 144 | + (outputTokens * (outputPricePerToken || 0)) |
| 145 | + ); |
| 146 | + } |
| 147 | +} |
0 commit comments