|
| 1 | +import { Anthropic } from "@anthropic-ai/sdk" |
| 2 | +import OpenAI from "openai" |
| 3 | +import { convertToOpenAiMessages } from "../transform/openai-format" |
| 4 | +import { ApiStream } from "../transform/stream" |
| 5 | +import { convertToR1Format } from "../transform/r1-format" |
| 6 | +// Removed unused imports: ApiHandler, nebiusModels, ModelInfo, NebiusModelId |
| 7 | + |
| 8 | +import { SingleCompletionHandler } from "../index" |
| 9 | +import { RouterProvider } from "./router-provider" |
| 10 | + |
| 11 | +import { ApiHandlerOptions, nebiusDefaultModelId, nebiusDefaultModelInfo } from "../../shared/api" |
| 12 | + |
| 13 | +export class NebiusHandler extends RouterProvider implements SingleCompletionHandler { |
| 14 | + constructor(options: ApiHandlerOptions) { |
| 15 | + super({ |
| 16 | + options, |
| 17 | + name: "nebius", |
| 18 | + baseURL: "https://api.studio.nebius.ai/v1", |
| 19 | + apiKey: options.nebiusApiKey || "dummy-key", |
| 20 | + modelId: options.nebiusModelId, |
| 21 | + defaultModelId: nebiusDefaultModelId, |
| 22 | + defaultModelInfo: nebiusDefaultModelInfo, |
| 23 | + }) |
| 24 | + } |
| 25 | + |
| 26 | + async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { |
| 27 | + const model = this.getModel() |
| 28 | + |
| 29 | + const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = model.id.includes("DeepSeek-R1") |
| 30 | + ? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages]) |
| 31 | + : [{ role: "system", content: systemPrompt }, ...convertToOpenAiMessages(messages)] |
| 32 | + |
| 33 | + const stream = await this.client.chat.completions.create({ |
| 34 | + model: model.id, |
| 35 | + messages: openAiMessages, |
| 36 | + temperature: 0, |
| 37 | + stream: true, |
| 38 | + stream_options: { include_usage: true }, |
| 39 | + }) |
| 40 | + for await (const chunk of stream) { |
| 41 | + const delta = chunk.choices[0]?.delta |
| 42 | + if (delta?.content) { |
| 43 | + yield { |
| 44 | + type: "text", |
| 45 | + text: delta.content, |
| 46 | + } |
| 47 | + } |
| 48 | + |
| 49 | + if (chunk.usage) { |
| 50 | + yield { |
| 51 | + type: "usage", |
| 52 | + inputTokens: chunk.usage.prompt_tokens || 0, |
| 53 | + outputTokens: chunk.usage.completion_tokens || 0, |
| 54 | + } |
| 55 | + } |
| 56 | + } |
| 57 | + } |
| 58 | + |
| 59 | + async completePrompt(prompt: string): Promise<string> { |
| 60 | + const { id: modelId, info } = await this.fetchModel() |
| 61 | + |
| 62 | + try { |
| 63 | + const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { |
| 64 | + model: modelId, |
| 65 | + messages: [{ role: "user", content: prompt }], |
| 66 | + } |
| 67 | + |
| 68 | + if (this.supportsTemperature(modelId)) { |
| 69 | + requestOptions.temperature = this.options.modelTemperature ?? 0 |
| 70 | + } |
| 71 | + |
| 72 | + requestOptions.max_tokens = info.maxTokens |
| 73 | + |
| 74 | + const response = await this.client.chat.completions.create(requestOptions) |
| 75 | + return response.choices[0]?.message.content || "" |
| 76 | + } catch (error) { |
| 77 | + if (error instanceof Error) { |
| 78 | + throw new Error(`nebius completion error: ${error.message}`) |
| 79 | + } |
| 80 | + throw error |
| 81 | + } |
| 82 | + } |
| 83 | +} |
0 commit comments