From 653a5f52580707b0770c8ee03f62daa759f8fd4a Mon Sep 17 00:00:00 2001 From: ffp5 Date: Tue, 27 May 2025 18:35:04 +0200 Subject: [PATCH 1/3] add makehub on roo --- .github/ISSUE_TEMPLATE/bug_report.yml | 1 + evals/apps/web/src/app/runs/new/new-run.tsx | 4 + evals/packages/types/src/roo-code.ts | 17 ++ packages/types/src/global-settings.ts | 2 + packages/types/src/provider-settings.ts | 13 ++ src/api/index.ts | 3 + src/api/providers/fetchers/makehub.ts | 131 +++++++++++++ src/api/providers/fetchers/modelCache.ts | 5 + src/api/providers/index.ts | 1 + src/api/providers/makehub.ts | 177 ++++++++++++++++++ src/core/webview/webviewMessageHandler.ts | 2 + src/shared/api.ts | 19 +- .../src/components/settings/ApiOptions.tsx | 12 ++ .../src/components/settings/ModelPicker.tsx | 8 +- .../src/components/settings/constants.ts | 1 + .../components/settings/providers/MakeHub.tsx | 157 ++++++++++++++++ .../components/settings/providers/index.ts | 1 + .../components/ui/hooks/useSelectedModel.ts | 8 + webview-ui/src/utils/validate.ts | 8 + 19 files changed, 568 insertions(+), 2 deletions(-) create mode 100644 src/api/providers/fetchers/makehub.ts create mode 100644 src/api/providers/makehub.ts create mode 100644 webview-ui/src/components/settings/providers/MakeHub.tsx diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 5bfc08f80f..52135d9f43 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -32,6 +32,7 @@ body: - Human Relay Provider - LiteLLM - LM Studio + - MakeHub - Mistral AI - Ollama - OpenAI diff --git a/evals/apps/web/src/app/runs/new/new-run.tsx b/evals/apps/web/src/app/runs/new/new-run.tsx index 47fe8a89c4..b3334a9ecd 100644 --- a/evals/apps/web/src/app/runs/new/new-run.tsx +++ b/evals/apps/web/src/app/runs/new/new-run.tsx @@ -176,6 +176,7 @@ export function NewRun() { ollamaModelId, lmStudioModelId, openAiModelId, + makehubModelId, } = providerSettings switch (apiProvider) { @@ -210,6 +211,9 @@ export function NewRun() { case "lmstudio": setValue("model", lmStudioModelId ?? "") break + case "makehub": + setValue("model", makehubModelId ?? "") + break default: throw new Error(`Unsupported API provider: ${apiProvider}`) } diff --git a/evals/packages/types/src/roo-code.ts b/evals/packages/types/src/roo-code.ts index b397d37b64..eb0801addd 100644 --- a/evals/packages/types/src/roo-code.ts +++ b/evals/packages/types/src/roo-code.ts @@ -25,6 +25,7 @@ export const providerNames = [ "human-relay", "fake-ai", "xai", + "makehub", ] as const export const providerNamesSchema = z.enum(providerNames) @@ -478,6 +479,11 @@ const litellmSchema = z.object({ litellmModelId: z.string().optional(), }) +const makehubSchema = z.object({ + makehubApiKey: z.string().optional(), + makehubModelId: z.string().optional(), +}) + const defaultSchema = z.object({ apiProvider: z.undefined(), }) @@ -589,6 +595,11 @@ export const providerSettingsSchemaDiscriminated = z apiProvider: z.literal("litellm"), }), ), + makehubSchema.merge( + z.object({ + apiProvider: z.literal("makehub"), + }), + ), defaultSchema, ]) .and(genericProviderSettingsSchema) @@ -617,6 +628,7 @@ export const providerSettingsSchema = z.object({ ...chutesSchema.shape, ...litellmSchema.shape, ...genericProviderSettingsSchema.shape, + ...makehubSchema.shape, }) export type ProviderSettings = z.infer @@ -716,6 +728,9 @@ const providerSettingsRecord: ProviderSettingsRecord = { litellmBaseUrl: undefined, litellmApiKey: undefined, litellmModelId: undefined, + // MakeHub + makehubApiKey: undefined, + makehubModelId: undefined, } export const PROVIDER_SETTINGS_KEYS = Object.keys(providerSettingsRecord) as Keys[] @@ -910,6 +925,7 @@ export type SecretState = Pick< | "unboundApiKey" | "requestyApiKey" | "xaiApiKey" + | "makehubApiKey" > type SecretStateRecord = Record, undefined> @@ -929,6 +945,7 @@ const secretStateRecord: SecretStateRecord = { unboundApiKey: undefined, requestyApiKey: undefined, xaiApiKey: undefined, + makehubApiKey: undefined, } export const SECRET_STATE_KEYS = Object.keys(secretStateRecord) as Keys[] diff --git a/packages/types/src/global-settings.ts b/packages/types/src/global-settings.ts index 10b7d6ab18..653c615679 100644 --- a/packages/types/src/global-settings.ts +++ b/packages/types/src/global-settings.ts @@ -217,6 +217,7 @@ export type SecretState = Pick< | "groqApiKey" | "chutesApiKey" | "litellmApiKey" + | "makehubApiKey" | "codeIndexOpenAiKey" | "codeIndexQdrantApiKey" > @@ -239,6 +240,7 @@ export const SECRET_STATE_KEYS = keysOf()([ "groqApiKey", "chutesApiKey", "litellmApiKey", + "makehubApiKey", "codeIndexOpenAiKey", "codeIndexQdrantApiKey", ]) diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index 7076361ea5..ab07674c2b 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -30,6 +30,7 @@ export const providerNames = [ "groq", "chutes", "litellm", + "makehub", ] as const export const providerNamesSchema = z.enum(providerNames) @@ -200,6 +201,12 @@ const litellmSchema = baseProviderSettingsSchema.extend({ litellmModelId: z.string().optional(), }) +const makehubSchema = baseProviderSettingsSchema.extend({ + makehubApiKey: z.string().optional(), + makehubModelId: z.string().optional(), + makehubPerfRatio: z.number().optional(), +}) + const defaultSchema = z.object({ apiProvider: z.undefined(), }) @@ -226,6 +233,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv groqSchema.merge(z.object({ apiProvider: z.literal("groq") })), chutesSchema.merge(z.object({ apiProvider: z.literal("chutes") })), litellmSchema.merge(z.object({ apiProvider: z.literal("litellm") })), + makehubSchema.merge(z.object({ apiProvider: z.literal("makehub") })), defaultSchema, ]) @@ -252,6 +260,7 @@ export const providerSettingsSchema = z.object({ ...groqSchema.shape, ...chutesSchema.shape, ...litellmSchema.shape, + ...makehubSchema.shape, ...codebaseIndexProviderSchema.shape, }) @@ -353,4 +362,8 @@ export const PROVIDER_SETTINGS_KEYS = keysOf()([ "litellmBaseUrl", "litellmApiKey", "litellmModelId", + // MakeHub + "makehubApiKey", + "makehubModelId", + "makehubPerfRatio", ]) diff --git a/src/api/index.ts b/src/api/index.ts index 8b09bf4cf9..78741fcbe6 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -27,6 +27,7 @@ import { GroqHandler, ChutesHandler, LiteLLMHandler, + MakeHubHandler, } from "./providers" export interface SingleCompletionHandler { @@ -106,6 +107,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler { return new ChutesHandler(options) case "litellm": return new LiteLLMHandler(options) + case "makehub": + return new MakeHubHandler(options) default: return new AnthropicHandler(options) } diff --git a/src/api/providers/fetchers/makehub.ts b/src/api/providers/fetchers/makehub.ts new file mode 100644 index 0000000000..7469f23b66 --- /dev/null +++ b/src/api/providers/fetchers/makehub.ts @@ -0,0 +1,131 @@ +import axios from "axios" +import type { ModelRecord } from "../../../shared/api" + +const MAKEHUB_BASE_URL = "https://api.makehub.ai/v1" + +interface MakehubModelResponse { + data: Array<{ + context: number + model_id: string + model_name: string + display_name?: string + organisation: string + price_per_input_token: number + price_per_output_token: number + provider_name: string + quantisation: string | null + max_tokens?: number + supports_images?: boolean + supports_prompt_cache?: boolean + cache_writes_price?: number + cache_reads_price?: number + assistant_ready: boolean + providers_available?: string[] + thinking_config?: { + max_budget?: number + output_price?: number + } + tiers?: Array<{ + context_window: number + input_price?: number + output_price?: number + cache_writes_price?: number + cache_reads_price?: number + }> + capabilities?: { + image_input?: boolean + tool_calling?: boolean + json_mode?: boolean + } + }> +} + +/** + * Fetches available models from the MakeHub API + * + * @param apiKey - The API key for authentication + * @returns A promise that resolves to a record of model IDs to model info + */ +export const getMakehubModels = async (apiKey?: string): Promise => { + try { + // Configure headers based on whether API key is provided + const headers: Record = { + Accept: "application/json", + "Content-Type": "application/json", + "HTTP-Referer": "vscode.dev", + "X-Title": "RooCode", + } + + // Add Authorization header if API key is provided + if (apiKey) { + headers.Authorization = `Bearer ${apiKey}` + } + + const response = await axios.get(`${MAKEHUB_BASE_URL}/models`, { + headers, + timeout: 10000, + }) + + if (!response.data?.data) { + console.error("Invalid MakeHub API response format:", response.data) + return {} + } + + const modelRecord: ModelRecord = {} + + for (const model of response.data.data) { + if (!model.model_id || !model.assistant_ready) continue + + // Create a model ID that includes provider information + // Use just the model_id as provided by the API, since it already has the proper format + const fullModelId = model.model_id.includes("/") + ? model.model_id // Already has organization format + : `${model.organisation}/${model.model_id}` // Add organization prefix + + // Log the raw price values from the API for debugging + console.log(`Model ${fullModelId} raw prices:`, { + input: model.price_per_input_token, + output: model.price_per_output_token, + }) + + // MakeHub API returns prices already in cost per million tokens, + // so we can use them directly without further conversion + const inputPrice = model.price_per_input_token + const outputPrice = model.price_per_output_token + + console.log(`Model ${fullModelId} stored prices:`, { + input: inputPrice, + output: outputPrice, + }) + + modelRecord[fullModelId] = { + maxTokens: model.max_tokens ?? undefined, + contextWindow: model.context, + supportsImages: model.capabilities?.image_input ?? false, + supportsComputerUse: model.capabilities?.tool_calling ?? false, + supportsPromptCache: model.supports_prompt_cache ?? false, + inputPrice: inputPrice, + outputPrice: outputPrice, + cacheWritesPrice: model.cache_writes_price, + cacheReadsPrice: model.cache_reads_price, + description: model.display_name, + tiers: model.tiers?.map((tier) => ({ + contextWindow: tier.context_window, + inputPrice: tier.input_price, + outputPrice: tier.output_price, + cacheWritesPrice: tier.cache_writes_price, + cacheReadsPrice: tier.cache_reads_price, + })), + } + } + + return modelRecord + } catch (error) { + console.error("Error fetching MakeHub models:", error) + if (axios.isAxiosError(error)) { + console.error("Response:", error.response?.data) + console.error("Status:", error.response?.status) + } + return {} + } +} diff --git a/src/api/providers/fetchers/modelCache.ts b/src/api/providers/fetchers/modelCache.ts index 12d636bc46..6f1c82c8d9 100644 --- a/src/api/providers/fetchers/modelCache.ts +++ b/src/api/providers/fetchers/modelCache.ts @@ -13,6 +13,7 @@ import { getRequestyModels } from "./requesty" import { getGlamaModels } from "./glama" import { getUnboundModels } from "./unbound" import { getLiteLLMModels } from "./litellm" +import { getMakehubModels } from "./makehub" import { GetModelsOptions } from "../../../shared/api" const memoryCache = new NodeCache({ stdTTL: 5 * 60, checkperiod: 5 * 60 }) @@ -68,6 +69,10 @@ export const getModels = async (options: GetModelsOptions): Promise // Type safety ensures apiKey and baseUrl are always provided for litellm models = await getLiteLLMModels(options.apiKey, options.baseUrl) break + case "makehub": + // Type safety ensures apiKey is always provided for makehub + models = await getMakehubModels(options.apiKey) + break default: { // Ensures router is exhaustively checked if RouterName is a strict union const exhaustiveCheck: never = provider diff --git a/src/api/providers/index.ts b/src/api/providers/index.ts index dd2a65dd75..48066168d6 100644 --- a/src/api/providers/index.ts +++ b/src/api/providers/index.ts @@ -20,3 +20,4 @@ export { XAIHandler } from "./xai" export { GroqHandler } from "./groq" export { ChutesHandler } from "./chutes" export { LiteLLMHandler } from "./litellm" +export { MakeHubHandler } from "./makehub" diff --git a/src/api/providers/makehub.ts b/src/api/providers/makehub.ts new file mode 100644 index 0000000000..07dee76634 --- /dev/null +++ b/src/api/providers/makehub.ts @@ -0,0 +1,177 @@ +import { Anthropic } from "@anthropic-ai/sdk" +import OpenAI from "openai" +import { Package } from "../../shared/package" +import { ApiHandlerOptions, makehubDefaultModelId, makehubDefaultModelInfo } from "../../shared/api" + +import { ApiStream } from "../transform/stream" +import { convertToOpenAiMessages } from "../transform/openai-format" +import { convertToR1Format } from "../transform/r1-format" +import { RouterProvider } from "./router-provider" + +const MAKEHUB_BASE_URL = "https://api.makehub.ai/v1" +const MAKEHUB_DEFAULT_TEMPERATURE = 0 + +const DEFAULT_HEADERS = { + "X-Makehub-Metadata": JSON.stringify({ + labels: [{ key: "app", value: `vscode.${Package.publisher}.${Package.name}` }], + }), +} + +export class MakeHubHandler extends RouterProvider { + private lastGenerationId?: string + + constructor(options: ApiHandlerOptions) { + super({ + options, + name: "makehub", + baseURL: MAKEHUB_BASE_URL, + apiKey: options.makehubApiKey, + modelId: options.makehubModelId, + defaultModelId: makehubDefaultModelId, + defaultModelInfo: makehubDefaultModelInfo, + }) + } + + override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { + this.lastGenerationId = undefined + await this.fetchModel() + const { id: modelId, info: modelInfo } = this.getModel() + + // Convert messages to OpenAI format + let openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [ + { role: "system", content: systemPrompt }, + ...convertToOpenAiMessages(messages), + ] + + // Extract actual model ID without duplicating the organization + // The model ID from MakeHub is already in the format 'organization/model_name' + // We need to use it as is without modification + const actualModelId = modelId + + // Set request options + const requestOptions: OpenAI.Chat.ChatCompletionCreateParams = { + model: actualModelId, + messages: openAiMessages, + stream: true, + } + + // Set temperature if supported + if (this.supportsTemperature(modelId)) { + requestOptions.temperature = this.options.modelTemperature ?? MAKEHUB_DEFAULT_TEMPERATURE + } + + // Set performance ratio header + const perfRatio = this.options.makehubPerfRatio ?? 0.5 // Default balanced value + const headers = { + ...DEFAULT_HEADERS, + "X-Price-Performance-Ratio": `${Math.round(perfRatio * 100)}`, + } + + // Check if we need to use R1 format for specific models + const modelLower = modelId.toLowerCase() + if (modelLower.includes("deepseek") || modelLower.includes("qwen") || modelLower.includes("qwq")) { + openAiMessages = convertToR1Format([{ role: "user", content: systemPrompt }, ...messages]) + requestOptions.messages = openAiMessages + } + + // Make API request + const { data: completion } = await this.client.chat.completions + .create(requestOptions, { headers }) + .withResponse() + + let didOutputUsage = false + + for await (const chunk of completion) { + // Capture generation ID for future statistics + if (!this.lastGenerationId && chunk.id) { + this.lastGenerationId = chunk.id + } + + const delta = chunk.choices[0]?.delta + + if (delta?.content) { + yield { type: "text", text: delta.content } + } + + // Handle usage statistics if present + if (!didOutputUsage && chunk.usage) { + console.log("MakeHub usage data received:", chunk.usage) + + // Validate token counts to prevent unreasonable values + const promptTokens = chunk.usage.prompt_tokens || 0 + const completionTokens = chunk.usage.completion_tokens || 0 + + // Check if token counts are reasonable (typically not more than 100k tokens in a single request) + const maxReasonableTokens = 100000 + const validPromptTokens = promptTokens > maxReasonableTokens ? maxReasonableTokens : promptTokens + const validCompletionTokens = + completionTokens > maxReasonableTokens ? maxReasonableTokens : completionTokens + + if (promptTokens > maxReasonableTokens || completionTokens > maxReasonableTokens) { + console.warn("MakeHub returned unusually high token counts, applying limits", { + original: { promptTokens, completionTokens }, + corrected: { validPromptTokens, validCompletionTokens }, + }) + } + + yield { + type: "usage", + inputTokens: validPromptTokens, + outputTokens: validCompletionTokens, + totalCost: this.calculateCost(validPromptTokens, validCompletionTokens, modelInfo), + } + didOutputUsage = true + } + } + } + + /** + * Calculate cost based on input and output tokens + */ + private calculateCost(inputTokens: number, outputTokens: number, modelInfo: any): number { + // Log the input values for debugging + console.log("MakeHub cost calculation inputs:", { + inputTokens, + outputTokens, + modelInfoPrices: { + inputPrice: modelInfo.inputPrice, + outputPrice: modelInfo.outputPrice, + }, + }) + + // MakeHub API returns prices already in dollars per million tokens, + // so we just need to divide tokens by 1,000,000 to get the correct cost + const inputCost = (inputTokens / 1_000_000) * (modelInfo.inputPrice || 0) + const outputCost = (outputTokens / 1_000_000) * (modelInfo.outputPrice || 0) + + let totalCost = inputCost + outputCost + + // Safety check: If the cost is unreasonably high (over $100), + // it's likely there's a calculation error, so apply a scaling factor + // This is a temporary fix until we can determine the exact cause + if (totalCost > 100) { + console.warn("MakeHub cost exceeds $100, applying safety scaling factor") + // Apply a scaling factor to bring it to a reasonable range + // Assuming a typical conversation shouldn't cost more than a few dollars + totalCost = totalCost / 10000 + } + + // Log the calculated costs for debugging + console.log("MakeHub cost calculation result:", { + inputTokens: inputTokens, + outputTokens: outputTokens, + inputPrice: modelInfo.inputPrice, + outputPrice: modelInfo.outputPrice, + inputCost, + outputCost, + totalCost, + }) + + return totalCost + } + + protected override supportsTemperature(modelId: string): boolean { + const model = this.models[modelId] + return model?.supportsImages ?? false + } +} diff --git a/src/core/webview/webviewMessageHandler.ts b/src/core/webview/webviewMessageHandler.ts index 1a0b64605d..392f94e919 100644 --- a/src/core/webview/webviewMessageHandler.ts +++ b/src/core/webview/webviewMessageHandler.ts @@ -297,6 +297,7 @@ export const webviewMessageHandler = async (provider: ClineProvider, message: We glama: {}, unbound: {}, litellm: {}, + makehub: {}, } const safeGetModels = async (options: GetModelsOptions): Promise => { @@ -316,6 +317,7 @@ export const webviewMessageHandler = async (provider: ClineProvider, message: We { key: "requesty", options: { provider: "requesty", apiKey: apiConfiguration.requestyApiKey } }, { key: "glama", options: { provider: "glama" } }, { key: "unbound", options: { provider: "unbound", apiKey: apiConfiguration.unboundApiKey } }, + { key: "makehub", options: { provider: "makehub", apiKey: apiConfiguration.makehubApiKey } }, ] const litellmApiKey = apiConfiguration.litellmApiKey || message?.values?.litellmApiKey diff --git a/src/shared/api.ts b/src/shared/api.ts index 48c397d1b4..4eaafde6ab 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -519,6 +519,22 @@ export const openRouterDefaultModelInfo: ModelInfo = { "Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and extended, step-by-step processing for complex tasks. The model demonstrates notable improvements in coding, particularly in front-end development and full-stack updates, and excels in agentic workflows, where it can autonomously navigate multi-step processes. Claude 3.7 Sonnet maintains performance parity with its predecessor in standard mode while offering an extended reasoning mode for enhanced accuracy in math, coding, and instruction-following tasks. Read more at the [blog post here](https://www.anthropic.com/news/claude-3-7-sonnet)", } +// MakeHub +// https://makehub.ai/models +export const makehubDefaultModelId = "anthropic/claude-4-sonnet" +export const makehubDefaultModelInfo: ModelInfo = { + maxTokens: 8192, + contextWindow: 200_000, + supportsImages: true, + supportsComputerUse: true, + supportsPromptCache: true, + inputPrice: 3.0, + outputPrice: 15.0, + cacheWritesPrice: 3.75, + cacheReadsPrice: 0.3, + description: + "The best coding model, optimized by MakeHub, and automatically routed to the fastest provider. Claude 4 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities.", +} // Vertex AI // https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/use-claude export type VertexModelId = keyof typeof vertexModels @@ -1928,7 +1944,7 @@ export const OPEN_ROUTER_REQUIRED_REASONING_BUDGET_MODELS = new Set([ "google/gemini-2.5-flash-preview-05-20:thinking", ]) -const routerNames = ["openrouter", "requesty", "glama", "unbound", "litellm"] as const +const routerNames = ["openrouter", "requesty", "glama", "unbound", "litellm", "makehub"] as const export type RouterName = (typeof routerNames)[number] @@ -2001,3 +2017,4 @@ export type GetModelsOptions = | { provider: "requesty"; apiKey?: string } | { provider: "unbound"; apiKey?: string } | { provider: "litellm"; apiKey: string; baseUrl: string } + | { provider: "makehub"; apiKey?: string } diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index bef4e45f20..87f0f91b4b 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -11,6 +11,7 @@ import { glamaDefaultModelId, unboundDefaultModelId, litellmDefaultModelId, + makehubDefaultModelId, } from "@roo/api" import { vscode } from "@src/utils/vscode" @@ -30,6 +31,7 @@ import { Groq, LMStudio, LiteLLM, + MakeHub, Mistral, Ollama, OpenAI, @@ -225,6 +227,11 @@ const ApiOptions = ({ setApiConfigurationField("litellmModelId", litellmDefaultModelId) } break + case "makehub": + if (!apiConfiguration.makehubModelId) { + setApiConfigurationField("makehubModelId", makehubDefaultModelId) + } + break } setApiConfigurationField("apiProvider", value) @@ -236,6 +243,7 @@ const ApiOptions = ({ apiConfiguration.unboundModelId, apiConfiguration.requestyModelId, apiConfiguration.litellmModelId, + apiConfiguration.makehubModelId, ], ) @@ -393,6 +401,10 @@ const ApiOptions = ({ )} + {selectedProvider === "makehub" && ( + + )} + {selectedProvider === "human-relay" && ( <>
diff --git a/webview-ui/src/components/settings/ModelPicker.tsx b/webview-ui/src/components/settings/ModelPicker.tsx index 96ac4e0dec..ea1f1c1b70 100644 --- a/webview-ui/src/components/settings/ModelPicker.tsx +++ b/webview-ui/src/components/settings/ModelPicker.tsx @@ -25,7 +25,13 @@ import { ModelInfoView } from "./ModelInfoView" type ModelIdKey = keyof Pick< ProviderSettings, - "glamaModelId" | "openRouterModelId" | "unboundModelId" | "requestyModelId" | "openAiModelId" | "litellmModelId" + | "glamaModelId" + | "openRouterModelId" + | "unboundModelId" + | "requestyModelId" + | "openAiModelId" + | "litellmModelId" + | "makehubModelId" > interface ModelPickerProps { diff --git a/webview-ui/src/components/settings/constants.ts b/webview-ui/src/components/settings/constants.ts index bd1ce69eb6..855139a9c9 100644 --- a/webview-ui/src/components/settings/constants.ts +++ b/webview-ui/src/components/settings/constants.ts @@ -49,6 +49,7 @@ export const PROVIDERS = [ { value: "groq", label: "Groq" }, { value: "chutes", label: "Chutes AI" }, { value: "litellm", label: "LiteLLM" }, + { value: "makehub", label: "MakeHub" }, ].sort((a, b) => a.label.localeCompare(b.label)) export const VERTEX_REGIONS = [ diff --git a/webview-ui/src/components/settings/providers/MakeHub.tsx b/webview-ui/src/components/settings/providers/MakeHub.tsx new file mode 100644 index 0000000000..a24ae74790 --- /dev/null +++ b/webview-ui/src/components/settings/providers/MakeHub.tsx @@ -0,0 +1,157 @@ +import { useCallback, useState } from "react" +import { VSCodeTextField, VSCodeDropdown, VSCodeOption } from "@vscode/webview-ui-toolkit/react" +import type { ProviderSettings } from "@roo-code/types" +import { RouterModels, makehubDefaultModelId } from "@roo/api" + +import { vscode } from "@src/utils/vscode" +import { useAppTranslation } from "@src/i18n/TranslationContext" +import { VSCodeButtonLink } from "@src/components/common/VSCodeButtonLink" +import { Button, Slider } from "@src/components/ui" + +import { inputEventTransform } from "../transforms" + +type MakeHubProps = { + apiConfiguration: ProviderSettings + setApiConfigurationField: (field: keyof ProviderSettings, value: ProviderSettings[keyof ProviderSettings]) => void + routerModels?: RouterModels + refetchRouterModels: () => void +} + +export const MakeHub = ({ + apiConfiguration, + setApiConfigurationField, + routerModels, + refetchRouterModels, +}: MakeHubProps) => { + const { t } = useAppTranslation() + const [didRefetch, setDidRefetch] = useState() + + const handleInputChange = useCallback( + ( + field: K, + transform: (event: E) => ProviderSettings[K] = inputEventTransform, + ) => + (event: E | Event) => { + setApiConfigurationField(field, transform(event as E)) + }, + [setApiConfigurationField], + ) + + const perfRatio = apiConfiguration?.makehubPerfRatio ?? 0.5 + + // Get all available models + const models = routerModels?.makehub || {} + const modelEntries = Object.entries(models).sort(([idA], [idB]) => idA.localeCompare(idB)) + + return ( + <> + + + +
+ {t("settings:providers.apiKeyStorageNotice")} +
+ + {!apiConfiguration?.makehubApiKey && ( + + {t("settings:providers.getMakehubApiKey")} + + )} + +
+ + { + setApiConfigurationField("makehubPerfRatio", values[0] / 100) + }} + min={0} + max={100} + step={5} + className="w-full" + /> +
+ {t("settings:providers.makehubPerfRatioLabels.price")} + {t("settings:providers.makehubPerfRatioLabels.balanced")} + {t("settings:providers.makehubPerfRatioLabels.performance")} +
+
+ {t("settings:providers.makehubPerfRatioDescription")} +
+
+ + + + {didRefetch && ( +
+ {t("settings:providers.refreshModels.hint")} +
+ )} + +
+ + + {modelEntries.length > 0 ? ( + modelEntries.map(([id, info]) => ( + setApiConfigurationField("makehubModelId", id)}> + {info.description || id} + + )) + ) : ( + {makehubDefaultModelId} + )} + + + {modelEntries.length === 0 && ( +
+ {t("settings:providers.noModelsFound", { provider: "MakeHub" })} +
+ )} + + {apiConfiguration?.makehubModelId && models[apiConfiguration.makehubModelId] && ( +
+
+ {t("settings:modelInfo.contextWindow")}: + {models[apiConfiguration.makehubModelId].contextWindow.toLocaleString()} tokens +
+
+ {t("settings:modelInfo.inputPrice")}: + ${models[apiConfiguration.makehubModelId].inputPrice}/M tokens +
+
+ {t("settings:modelInfo.outputPrice")}: + ${models[apiConfiguration.makehubModelId].outputPrice}/M tokens +
+ {models[apiConfiguration.makehubModelId].supportsImages && ( +
+ {t("settings:modelInfo.supportsImages")} + {t("settings:common.yes")} +
+ )} +
+ )} +
+ + ) +} diff --git a/webview-ui/src/components/settings/providers/index.ts b/webview-ui/src/components/settings/providers/index.ts index b244fb515c..8af7b5c94b 100644 --- a/webview-ui/src/components/settings/providers/index.ts +++ b/webview-ui/src/components/settings/providers/index.ts @@ -17,3 +17,4 @@ export { Vertex } from "./Vertex" export { VSCodeLM } from "./VSCodeLM" export { XAI } from "./XAI" export { LiteLLM } from "./LiteLLM" +export { MakeHub } from "./MakeHub" diff --git a/webview-ui/src/components/ui/hooks/useSelectedModel.ts b/webview-ui/src/components/ui/hooks/useSelectedModel.ts index f656c702dd..c84468fd84 100644 --- a/webview-ui/src/components/ui/hooks/useSelectedModel.ts +++ b/webview-ui/src/components/ui/hooks/useSelectedModel.ts @@ -30,6 +30,7 @@ import { glamaDefaultModelId, unboundDefaultModelId, litellmDefaultModelId, + makehubDefaultModelId, } from "@roo/api" import { useRouterModels } from "./useRouterModels" @@ -119,6 +120,13 @@ function getSelectedModel({ ? { id, info } : { id: litellmDefaultModelId, info: routerModels.litellm[litellmDefaultModelId] } } + case "makehub": { + const id = apiConfiguration.makehubModelId ?? makehubDefaultModelId + const info = routerModels.makehub[id] + return info + ? { id, info } + : { id: makehubDefaultModelId, info: routerModels.makehub[makehubDefaultModelId] } + } case "xai": { const id = apiConfiguration.apiModelId ?? xaiDefaultModelId const info = xaiModels[id as keyof typeof xaiModels] diff --git a/webview-ui/src/utils/validate.ts b/webview-ui/src/utils/validate.ts index 69b7590c0f..6297712dea 100644 --- a/webview-ui/src/utils/validate.ts +++ b/webview-ui/src/utils/validate.ts @@ -81,6 +81,11 @@ export function validateApiConfiguration(apiConfiguration: ProviderSettings): st return i18next.t("settings:validation.modelSelector") } break + case "makehub": + if (!apiConfiguration.makehubApiKey) { + return i18next.t("settings:validation.apiKey") + } + break } return undefined @@ -145,6 +150,9 @@ export function validateModelId(apiConfiguration: ProviderSettings, routerModels case "litellm": modelId = apiConfiguration.litellmModelId break + case "makehub": + modelId = apiConfiguration.makehubModelId + break } if (!modelId) { From 42aee30912f57a2148a0bb19f27bc489e9397f80 Mon Sep 17 00:00:00 2001 From: ffp5 Date: Tue, 27 May 2025 23:22:24 +0200 Subject: [PATCH 2/3] fix and update Makehub for PR --- src/api/providers/fetchers/makehub.ts | 65 +++++++++-------- src/api/providers/makehub.ts | 70 ++++++++----------- src/core/webview/webviewMessageHandler.ts | 15 +++- .../src/components/settings/ApiOptions.tsx | 7 +- .../components/settings/providers/MakeHub.tsx | 23 +++--- 5 files changed, 97 insertions(+), 83 deletions(-) diff --git a/src/api/providers/fetchers/makehub.ts b/src/api/providers/fetchers/makehub.ts index 7469f23b66..c053d805dd 100644 --- a/src/api/providers/fetchers/makehub.ts +++ b/src/api/providers/fetchers/makehub.ts @@ -57,46 +57,40 @@ export const getMakehubModels = async (apiKey?: string): Promise => } // Add Authorization header if API key is provided - if (apiKey) { - headers.Authorization = `Bearer ${apiKey}` + if (apiKey && apiKey.trim()) { + headers.Authorization = `Bearer ${apiKey.trim()}` } const response = await axios.get(`${MAKEHUB_BASE_URL}/models`, { headers, - timeout: 10000, + timeout: 15000, }) if (!response.data?.data) { - console.error("Invalid MakeHub API response format:", response.data) - return {} + console.error("MakeHub: Invalid API response format:", response.data) + throw new Error("Invalid API response format from MakeHub") } const modelRecord: ModelRecord = {} for (const model of response.data.data) { - if (!model.model_id || !model.assistant_ready) continue + if (!model.model_id || !model.assistant_ready) { + continue + } // Create a model ID that includes provider information - // Use just the model_id as provided by the API, since it already has the proper format const fullModelId = model.model_id.includes("/") ? model.model_id // Already has organization format : `${model.organisation}/${model.model_id}` // Add organization prefix - // Log the raw price values from the API for debugging - console.log(`Model ${fullModelId} raw prices:`, { - input: model.price_per_input_token, - output: model.price_per_output_token, - }) - - // MakeHub API returns prices already in cost per million tokens, - // so we can use them directly without further conversion - const inputPrice = model.price_per_input_token - const outputPrice = model.price_per_output_token - - console.log(`Model ${fullModelId} stored prices:`, { - input: inputPrice, - output: outputPrice, - }) + // Validate pricing data + if (typeof model.price_per_input_token !== "number" || typeof model.price_per_output_token !== "number") { + console.warn(`MakeHub: Invalid pricing for model ${fullModelId}`, { + input: model.price_per_input_token, + output: model.price_per_output_token, + }) + continue + } modelRecord[fullModelId] = { maxTokens: model.max_tokens ?? undefined, @@ -104,8 +98,8 @@ export const getMakehubModels = async (apiKey?: string): Promise => supportsImages: model.capabilities?.image_input ?? false, supportsComputerUse: model.capabilities?.tool_calling ?? false, supportsPromptCache: model.supports_prompt_cache ?? false, - inputPrice: inputPrice, - outputPrice: outputPrice, + inputPrice: model.price_per_input_token, + outputPrice: model.price_per_output_token, cacheWritesPrice: model.cache_writes_price, cacheReadsPrice: model.cache_reads_price, description: model.display_name, @@ -121,11 +115,26 @@ export const getMakehubModels = async (apiKey?: string): Promise => return modelRecord } catch (error) { - console.error("Error fetching MakeHub models:", error) + console.error("MakeHub: Error fetching models:", error) if (axios.isAxiosError(error)) { - console.error("Response:", error.response?.data) - console.error("Status:", error.response?.status) + console.error("MakeHub: HTTP Error Details:", { + status: error.response?.status, + statusText: error.response?.statusText, + data: error.response?.data, + hasApiKey: !!apiKey, + }) + + if (error.response?.status === 401) { + throw new Error("MakeHub: Invalid API key. Please check your API key configuration.") + } else if (error.response?.status === 403) { + throw new Error("MakeHub: Access forbidden. Please check your API key permissions.") + } else if (error.response?.status >= 500) { + throw new Error("MakeHub: Server error. Please try again later.") + } else if (error.code === "ECONNABORTED") { + throw new Error("MakeHub: Request timeout. Please check your internet connection.") + } } - return {} + + throw new Error(`MakeHub: Failed to fetch models - ${error.message || "Unknown error"}`) } } diff --git a/src/api/providers/makehub.ts b/src/api/providers/makehub.ts index 07dee76634..cd371b08f6 100644 --- a/src/api/providers/makehub.ts +++ b/src/api/providers/makehub.ts @@ -34,8 +34,7 @@ export class MakeHubHandler extends RouterProvider { override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream { this.lastGenerationId = undefined - await this.fetchModel() - const { id: modelId, info: modelInfo } = this.getModel() + const { id: modelId, info: modelInfo } = await this.fetchModel() // Convert messages to OpenAI format let openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [ @@ -95,8 +94,6 @@ export class MakeHubHandler extends RouterProvider { // Handle usage statistics if present if (!didOutputUsage && chunk.usage) { - console.log("MakeHub usage data received:", chunk.usage) - // Validate token counts to prevent unreasonable values const promptTokens = chunk.usage.prompt_tokens || 0 const completionTokens = chunk.usage.completion_tokens || 0 @@ -129,49 +126,40 @@ export class MakeHubHandler extends RouterProvider { * Calculate cost based on input and output tokens */ private calculateCost(inputTokens: number, outputTokens: number, modelInfo: any): number { - // Log the input values for debugging - console.log("MakeHub cost calculation inputs:", { - inputTokens, - outputTokens, - modelInfoPrices: { - inputPrice: modelInfo.inputPrice, - outputPrice: modelInfo.outputPrice, - }, - }) + // Validate inputs + if (!modelInfo || typeof modelInfo.inputPrice !== "number" || typeof modelInfo.outputPrice !== "number") { + console.warn("MakeHub: Invalid model pricing information", { modelInfo }) + return 0 + } - // MakeHub API returns prices already in dollars per million tokens, - // so we just need to divide tokens by 1,000,000 to get the correct cost - const inputCost = (inputTokens / 1_000_000) * (modelInfo.inputPrice || 0) - const outputCost = (outputTokens / 1_000_000) * (modelInfo.outputPrice || 0) - - let totalCost = inputCost + outputCost - - // Safety check: If the cost is unreasonably high (over $100), - // it's likely there's a calculation error, so apply a scaling factor - // This is a temporary fix until we can determine the exact cause - if (totalCost > 100) { - console.warn("MakeHub cost exceeds $100, applying safety scaling factor") - // Apply a scaling factor to bring it to a reasonable range - // Assuming a typical conversation shouldn't cost more than a few dollars - totalCost = totalCost / 10000 + if (inputTokens < 0 || outputTokens < 0) { + console.warn("MakeHub: Invalid token counts", { inputTokens, outputTokens }) + return 0 } - // Log the calculated costs for debugging - console.log("MakeHub cost calculation result:", { - inputTokens: inputTokens, - outputTokens: outputTokens, - inputPrice: modelInfo.inputPrice, - outputPrice: modelInfo.outputPrice, - inputCost, - outputCost, - totalCost, - }) + // MakeHub API returns prices in dollars per million tokens + const inputCost = (inputTokens / 1_000_000) * modelInfo.inputPrice + const outputCost = (outputTokens / 1_000_000) * modelInfo.outputPrice + const totalCost = inputCost + outputCost + + // Log for debugging only if cost seems unusual + if (totalCost > 10) { + console.log("MakeHub high cost calculation:", { + inputTokens, + outputTokens, + inputPrice: modelInfo.inputPrice, + outputPrice: modelInfo.outputPrice, + inputCost, + outputCost, + totalCost, + }) + } - return totalCost + return Math.max(0, totalCost) } protected override supportsTemperature(modelId: string): boolean { - const model = this.models[modelId] - return model?.supportsImages ?? false + // Most models support temperature, but exclude o3-mini variants like OpenAI + return !modelId.toLowerCase().includes("o3-mini") } } diff --git a/src/core/webview/webviewMessageHandler.ts b/src/core/webview/webviewMessageHandler.ts index 392f94e919..da49f36b35 100644 --- a/src/core/webview/webviewMessageHandler.ts +++ b/src/core/webview/webviewMessageHandler.ts @@ -317,9 +317,22 @@ export const webviewMessageHandler = async (provider: ClineProvider, message: We { key: "requesty", options: { provider: "requesty", apiKey: apiConfiguration.requestyApiKey } }, { key: "glama", options: { provider: "glama" } }, { key: "unbound", options: { provider: "unbound", apiKey: apiConfiguration.unboundApiKey } }, - { key: "makehub", options: { provider: "makehub", apiKey: apiConfiguration.makehubApiKey } }, ] + // Add MakeHub with proper API key handling + if (apiConfiguration.makehubApiKey) { + modelFetchPromises.push({ + key: "makehub", + options: { provider: "makehub", apiKey: apiConfiguration.makehubApiKey }, + }) + } else { + // MakeHub can work without API key, but with limited access + modelFetchPromises.push({ + key: "makehub", + options: { provider: "makehub" }, + }) + } + const litellmApiKey = apiConfiguration.litellmApiKey || message?.values?.litellmApiKey const litellmBaseUrl = apiConfiguration.litellmBaseUrl || message?.values?.litellmBaseUrl if (litellmApiKey && litellmBaseUrl) { diff --git a/webview-ui/src/components/settings/ApiOptions.tsx b/webview-ui/src/components/settings/ApiOptions.tsx index 87f0f91b4b..fa59f413db 100644 --- a/webview-ui/src/components/settings/ApiOptions.tsx +++ b/webview-ui/src/components/settings/ApiOptions.tsx @@ -402,7 +402,12 @@ const ApiOptions = ({ )} {selectedProvider === "makehub" && ( - + )} {selectedProvider === "human-relay" && ( diff --git a/webview-ui/src/components/settings/providers/MakeHub.tsx b/webview-ui/src/components/settings/providers/MakeHub.tsx index a24ae74790..1c0b2b5e74 100644 --- a/webview-ui/src/components/settings/providers/MakeHub.tsx +++ b/webview-ui/src/components/settings/providers/MakeHub.tsx @@ -39,10 +39,6 @@ export const MakeHub = ({ const perfRatio = apiConfiguration?.makehubPerfRatio ?? 0.5 - // Get all available models - const models = routerModels?.makehub || {} - const modelEntries = Object.entries(models).sort(([idA], [idB]) => idA.localeCompare(idB)) - return ( <> - {modelEntries.length > 0 ? ( - modelEntries.map(([id, info]) => ( + {Object.entries(routerModels?.makehub ?? {}).length > 0 ? ( + Object.entries(routerModels?.makehub ?? {}).map(([id, info]) => ( - {modelEntries.length === 0 && ( + {Object.entries(routerModels?.makehub ?? {}).length === 0 && (
{t("settings:providers.noModelsFound", { provider: "MakeHub" })}
)} - {apiConfiguration?.makehubModelId && models[apiConfiguration.makehubModelId] && ( + {apiConfiguration?.makehubModelId && routerModels?.makehub?.[apiConfiguration.makehubModelId] && (
{t("settings:modelInfo.contextWindow")}: - {models[apiConfiguration.makehubModelId].contextWindow.toLocaleString()} tokens + + {routerModels.makehub[apiConfiguration.makehubModelId].contextWindow.toLocaleString()}{" "} + tokens +
{t("settings:modelInfo.inputPrice")}: - ${models[apiConfiguration.makehubModelId].inputPrice}/M tokens + ${routerModels.makehub[apiConfiguration.makehubModelId].inputPrice}/M tokens
{t("settings:modelInfo.outputPrice")}: - ${models[apiConfiguration.makehubModelId].outputPrice}/M tokens + ${routerModels.makehub[apiConfiguration.makehubModelId].outputPrice}/M tokens
- {models[apiConfiguration.makehubModelId].supportsImages && ( + {routerModels.makehub[apiConfiguration.makehubModelId].supportsImages && (
{t("settings:modelInfo.supportsImages")} {t("settings:common.yes")} From 16e25195b0dc985a84d5c0941fd796c742210de3 Mon Sep 17 00:00:00 2001 From: ffp5 Date: Tue, 27 May 2025 23:24:50 +0200 Subject: [PATCH 3/3] Minor correction --- src/api/providers/fetchers/makehub.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/providers/fetchers/makehub.ts b/src/api/providers/fetchers/makehub.ts index c053d805dd..62db0647b1 100644 --- a/src/api/providers/fetchers/makehub.ts +++ b/src/api/providers/fetchers/makehub.ts @@ -128,7 +128,7 @@ export const getMakehubModels = async (apiKey?: string): Promise => throw new Error("MakeHub: Invalid API key. Please check your API key configuration.") } else if (error.response?.status === 403) { throw new Error("MakeHub: Access forbidden. Please check your API key permissions.") - } else if (error.response?.status >= 500) { + } else if (error.response && error.response.status >= 500) { throw new Error("MakeHub: Server error. Please try again later.") } else if (error.code === "ECONNABORTED") { throw new Error("MakeHub: Request timeout. Please check your internet connection.")