|
| 1 | +import OpenAI from "openai" |
| 2 | +import { Anthropic } from "@anthropic-ai/sdk" // Keep for type usage only |
| 3 | + |
| 4 | +import { ApiHandlerOptions, litellmDefaultModelId, litellmDefaultModelInfo } from "../../shared/api" |
| 5 | +import { ApiStream, ApiStreamUsageChunk } from "../transform/stream" |
| 6 | +import { convertToOpenAiMessages } from "../transform/openai-format" |
| 7 | +import { SingleCompletionHandler } from "../index" |
| 8 | +import { RouterProvider } from "./router-provider" |
| 9 | + |
| 10 | +/** |
| 11 | + * LiteLLM provider handler |
| 12 | + * |
| 13 | + * This handler uses the LiteLLM API to proxy requests to various LLM providers. |
| 14 | + * It follows the OpenAI API format for compatibility. |
| 15 | + */ |
| 16 | +export class LiteLLMHandler extends RouterProvider implements SingleCompletionHandler { |
| 17 | + constructor(options: ApiHandlerOptions) { |
| 18 | + super({ |
| 19 | + options, |
| 20 | + name: "litellm", |
| 21 | + baseURL: `${options.litellmBaseUrl || "http://localhost:4000"}`, |
| 22 | + apiKey: options.litellmApiKey || "dummy-key", |
| 23 | + modelId: options.litellmModelId, |
| 24 | + defaultModelId: litellmDefaultModelId, |
| 25 | + defaultModelInfo: litellmDefaultModelInfo, |
| 26 | + }) |
| 27 | + } |
| 28 | + |
| 29 | + override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { |
| 30 | + const { id: modelId, info } = await this.fetchModel() |
| 31 | + |
| 32 | + const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [ |
| 33 | + { role: "system", content: systemPrompt }, |
| 34 | + ...convertToOpenAiMessages(messages), |
| 35 | + ] |
| 36 | + |
| 37 | + // Required by some providers; others default to max tokens allowed |
| 38 | + let maxTokens: number | undefined = info.maxTokens ?? undefined |
| 39 | + |
| 40 | + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { |
| 41 | + model: modelId, |
| 42 | + max_tokens: maxTokens, |
| 43 | + messages: openAiMessages, |
| 44 | + stream: true, |
| 45 | + stream_options: { |
| 46 | + include_usage: true, |
| 47 | + }, |
| 48 | + } |
| 49 | + |
| 50 | + if (this.supportsTemperature(modelId)) { |
| 51 | + requestOptions.temperature = this.options.modelTemperature ?? 0 |
| 52 | + } |
| 53 | + |
| 54 | + try { |
| 55 | + const { data: completion } = await this.client.chat.completions.create(requestOptions).withResponse() |
| 56 | + |
| 57 | + let lastUsage |
| 58 | + |
| 59 | + for await (const chunk of completion) { |
| 60 | + const delta = chunk.choices[0]?.delta |
| 61 | + const usage = chunk.usage as OpenAI.CompletionUsage |
| 62 | + |
| 63 | + if (delta?.content) { |
| 64 | + yield { type: "text", text: delta.content } |
| 65 | + } |
| 66 | + |
| 67 | + if (usage) { |
| 68 | + lastUsage = usage |
| 69 | + } |
| 70 | + } |
| 71 | + |
| 72 | + if (lastUsage) { |
| 73 | + const usageData: ApiStreamUsageChunk = { |
| 74 | + type: "usage", |
| 75 | + inputTokens: lastUsage.prompt_tokens || 0, |
| 76 | + outputTokens: lastUsage.completion_tokens || 0, |
| 77 | + } |
| 78 | + |
| 79 | + yield usageData |
| 80 | + } |
| 81 | + } catch (error) { |
| 82 | + if (error instanceof Error) { |
| 83 | + throw new Error(`LiteLLM streaming error: ${error.message}`) |
| 84 | + } |
| 85 | + throw error |
| 86 | + } |
| 87 | + } |
| 88 | + |
| 89 | + async completePrompt(prompt: string): Promise<string> { |
| 90 | + const { id: modelId, info } = await this.fetchModel() |
| 91 | + |
| 92 | + try { |
| 93 | + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { |
| 94 | + model: modelId, |
| 95 | + messages: [{ role: "user", content: prompt }], |
| 96 | + } |
| 97 | + |
| 98 | + if (this.supportsTemperature(modelId)) { |
| 99 | + requestOptions.temperature = this.options.modelTemperature ?? 0 |
| 100 | + } |
| 101 | + |
| 102 | + requestOptions.max_tokens = info.maxTokens |
| 103 | + |
| 104 | + const response = await this.client.chat.completions.create(requestOptions) |
| 105 | + return response.choices[0]?.message.content || "" |
| 106 | + } catch (error) { |
| 107 | + if (error instanceof Error) { |
| 108 | + throw new Error(`LiteLLM completion error: ${error.message}`) |
| 109 | + } |
| 110 | + throw error |
| 111 | + } |
| 112 | + } |
| 113 | +} |
0 commit comments