From 63a34197302a05335028dcf1f232e7dfaf2985d8 Mon Sep 17 00:00:00 2001 From: Roo Code Date: Tue, 14 Oct 2025 16:30:51 +0000 Subject: [PATCH] feat: add n1n.ai as a model provider - Added n1n provider type definitions - Created N1nHandler extending OpenAiHandler for API compatibility - Integrated n1n into provider settings, API factory, and ProfileValidator - Added n1n models fetcher for dynamic model discovery - Updated webview message handler and shared API configuration - Supports 400+ models through OpenAI-compatible API at https://n1n.ai/v1/ Addresses #8657 --- packages/types/src/provider-settings.ts | 12 ++++ packages/types/src/providers/index.ts | 1 + packages/types/src/providers/n1n.ts | 20 ++++++ src/api/index.ts | 3 + src/api/providers/fetchers/modelCache.ts | 5 ++ src/api/providers/fetchers/n1n.ts | 74 +++++++++++++++++++++++ src/api/providers/index.ts | 1 + src/api/providers/n1n.ts | 28 +++++++++ src/core/webview/webviewMessageHandler.ts | 11 ++++ src/shared/ProfileValidator.ts | 2 + src/shared/api.ts | 1 + 11 files changed, 158 insertions(+) create mode 100644 packages/types/src/providers/n1n.ts create mode 100644 src/api/providers/fetchers/n1n.ts create mode 100644 src/api/providers/n1n.ts diff --git a/packages/types/src/provider-settings.ts b/packages/types/src/provider-settings.ts index a66aae08a243..d9c212c15b68 100644 --- a/packages/types/src/provider-settings.ts +++ b/packages/types/src/provider-settings.ts @@ -49,6 +49,7 @@ export const dynamicProviders = [ "requesty", "unbound", "glama", + "n1n", ] as const export type DynamicProvider = (typeof dynamicProviders)[number] @@ -131,6 +132,7 @@ export const providerNames = [ "groq", "mistral", "moonshot", + "n1n", "openai-native", "qwen-code", "roo", @@ -399,6 +401,11 @@ const ioIntelligenceSchema = apiModelIdProviderModelSchema.extend({ ioIntelligenceApiKey: z.string().optional(), }) +const n1nSchema = baseProviderSettingsSchema.extend({ + n1nApiKey: z.string().optional(), + n1nModelId: z.string().optional(), +}) + const qwenCodeSchema = apiModelIdProviderModelSchema.extend({ qwenCodeOauthPath: z.string().optional(), }) @@ -450,6 +457,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv fireworksSchema.merge(z.object({ apiProvider: z.literal("fireworks") })), featherlessSchema.merge(z.object({ apiProvider: z.literal("featherless") })), ioIntelligenceSchema.merge(z.object({ apiProvider: z.literal("io-intelligence") })), + n1nSchema.merge(z.object({ apiProvider: z.literal("n1n") })), qwenCodeSchema.merge(z.object({ apiProvider: z.literal("qwen-code") })), rooSchema.merge(z.object({ apiProvider: z.literal("roo") })), vercelAiGatewaySchema.merge(z.object({ apiProvider: z.literal("vercel-ai-gateway") })), @@ -491,6 +499,7 @@ export const providerSettingsSchema = z.object({ ...fireworksSchema.shape, ...featherlessSchema.shape, ...ioIntelligenceSchema.shape, + ...n1nSchema.shape, ...qwenCodeSchema.shape, ...rooSchema.shape, ...vercelAiGatewaySchema.shape, @@ -526,6 +535,7 @@ export const modelIdKeys = [ "litellmModelId", "huggingFaceModelId", "ioIntelligenceModelId", + "n1nModelId", "vercelAiGatewayModelId", "deepInfraModelId", ] as const satisfies readonly (keyof ProviderSettings)[] @@ -577,6 +587,7 @@ export const modelIdKeysByProvider: Record = { fireworks: "apiModelId", featherless: "apiModelId", "io-intelligence": "ioIntelligenceModelId", + n1n: "n1nModelId", roo: "apiModelId", "vercel-ai-gateway": "vercelAiGatewayModelId", } @@ -704,6 +715,7 @@ export const MODELS_BY_PROVIDER: Record< requesty: { id: "requesty", label: "Requesty", models: [] }, unbound: { id: "unbound", label: "Unbound", models: [] }, deepinfra: { id: "deepinfra", label: "DeepInfra", models: [] }, + n1n: { id: "n1n", label: "n1n AI", models: [] }, "vercel-ai-gateway": { id: "vercel-ai-gateway", label: "Vercel AI Gateway", models: [] }, // Local providers; models discovered from localhost endpoints. diff --git a/packages/types/src/providers/index.ts b/packages/types/src/providers/index.ts index 21e43aaa99a6..d6258c1f6bda 100644 --- a/packages/types/src/providers/index.ts +++ b/packages/types/src/providers/index.ts @@ -16,6 +16,7 @@ export * from "./lite-llm.js" export * from "./lm-studio.js" export * from "./mistral.js" export * from "./moonshot.js" +export * from "./n1n.js" export * from "./ollama.js" export * from "./openai.js" export * from "./openrouter.js" diff --git a/packages/types/src/providers/n1n.ts b/packages/types/src/providers/n1n.ts new file mode 100644 index 000000000000..38b8d0a9c848 --- /dev/null +++ b/packages/types/src/providers/n1n.ts @@ -0,0 +1,20 @@ +import type { ModelInfo } from "../model.js" + +// n1n.ai is an OpenAI-compatible API that provides access to 400+ models +// Since they have a large and dynamic model list, we'll fetch models dynamically +export type N1nModelId = string + +export const n1nDefaultModelId = "gpt-4o-mini" + +// Default model info for when dynamic fetching isn't available +export const n1nDefaultModelInfo: ModelInfo = { + maxTokens: 16_384, + contextWindow: 128_000, + supportsImages: true, + supportsPromptCache: false, + inputPrice: 0.15, + outputPrice: 0.6, +} + +// Base URL for n1n.ai API +export const N1N_BASE_URL = "https://n1n.ai/v1" diff --git a/src/api/index.ts b/src/api/index.ts index ac0096767624..7f9f7c7ed91e 100644 --- a/src/api/index.ts +++ b/src/api/index.ts @@ -19,6 +19,7 @@ import { DeepSeekHandler, MoonshotHandler, MistralHandler, + N1nHandler, VsCodeLmHandler, UnboundHandler, RequestyHandler, @@ -123,6 +124,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler { return new QwenCodeHandler(options) case "moonshot": return new MoonshotHandler(options) + case "n1n": + return new N1nHandler(options) case "vscode-lm": return new VsCodeLmHandler(options) case "mistral": diff --git a/src/api/providers/fetchers/modelCache.ts b/src/api/providers/fetchers/modelCache.ts index 2ccb73a45514..d1c3f200e0ee 100644 --- a/src/api/providers/fetchers/modelCache.ts +++ b/src/api/providers/fetchers/modelCache.ts @@ -24,6 +24,7 @@ import { getLMStudioModels } from "./lmstudio" import { getIOIntelligenceModels } from "./io-intelligence" import { getDeepInfraModels } from "./deepinfra" import { getHuggingFaceModels } from "./huggingface" +import { getN1nModels } from "./n1n" const memoryCache = new NodeCache({ stdTTL: 5 * 60, checkperiod: 5 * 60 }) @@ -99,6 +100,10 @@ export const getModels = async (options: GetModelsOptions): Promise case "huggingface": models = await getHuggingFaceModels() break + case "n1n": + // n1n models endpoint requires an API key + models = await getN1nModels(options.apiKey) + break default: { // Ensures router is exhaustively checked if RouterName is a strict union. const exhaustiveCheck: never = provider diff --git a/src/api/providers/fetchers/n1n.ts b/src/api/providers/fetchers/n1n.ts new file mode 100644 index 000000000000..f28e8479465c --- /dev/null +++ b/src/api/providers/fetchers/n1n.ts @@ -0,0 +1,74 @@ +import axios from "axios" +import { z } from "zod" + +import { type ModelInfo, N1N_BASE_URL } from "@roo-code/types" + +import { DEFAULT_HEADERS } from "../constants" + +// n1n models endpoint follows OpenAI /models shape +const N1nModelSchema = z.object({ + id: z.string(), + object: z.literal("model").optional(), + owned_by: z.string().optional(), + created: z.number().optional(), +}) + +const N1nModelsResponseSchema = z.object({ + data: z.array(N1nModelSchema).optional(), + object: z.string().optional(), +}) + +export async function getN1nModels(apiKey: string): Promise> { + const headers: Record = { + ...DEFAULT_HEADERS, + Authorization: `Bearer ${apiKey}`, + } + + const url = `${N1N_BASE_URL}/models` + const models: Record = {} + + try { + const response = await axios.get(url, { headers }) + const parsed = N1nModelsResponseSchema.safeParse(response.data) + const data = parsed.success ? parsed.data.data || [] : response.data?.data || [] + + for (const m of data as Array>) { + // Default model info - n1n doesn't provide detailed metadata in /models endpoint + // These are conservative defaults that should work for most models + const info: ModelInfo = { + maxTokens: 4096, + contextWindow: 16384, + supportsImages: false, // Will be true for vision models like gpt-4-vision + supportsPromptCache: false, + // n1n doesn't expose pricing via API, would need to be hardcoded or fetched separately + inputPrice: undefined, + outputPrice: undefined, + } + + // Check for known vision model patterns + if (m.id.includes("vision") || m.id.includes("gpt-4o") || m.id.includes("claude-3")) { + info.supportsImages = true + } + + // Check for known models with larger contexts + if (m.id.includes("gpt-4-turbo") || m.id.includes("claude-3") || m.id.includes("gpt-4o")) { + info.contextWindow = 128000 + info.maxTokens = 4096 + } else if (m.id.includes("claude-2")) { + info.contextWindow = 100000 + info.maxTokens = 4096 + } else if (m.id.includes("gpt-3.5-turbo-16k")) { + info.contextWindow = 16384 + info.maxTokens = 4096 + } + + models[m.id] = info + } + + return models + } catch (error) { + console.error("Error fetching n1n models:", error) + // Return empty object on error - the handler will use default model + return {} + } +} diff --git a/src/api/providers/index.ts b/src/api/providers/index.ts index 85d877b6bc78..d1bdb88f3b40 100644 --- a/src/api/providers/index.ts +++ b/src/api/providers/index.ts @@ -17,6 +17,7 @@ export { IOIntelligenceHandler } from "./io-intelligence" export { LiteLLMHandler } from "./lite-llm" export { LmStudioHandler } from "./lm-studio" export { MistralHandler } from "./mistral" +export { N1nHandler } from "./n1n" export { OllamaHandler } from "./ollama" export { OpenAiNativeHandler } from "./openai-native" export { OpenAiHandler } from "./openai" diff --git a/src/api/providers/n1n.ts b/src/api/providers/n1n.ts new file mode 100644 index 000000000000..22c795ecd96f --- /dev/null +++ b/src/api/providers/n1n.ts @@ -0,0 +1,28 @@ +import { n1nDefaultModelId, n1nDefaultModelInfo, N1N_BASE_URL } from "@roo-code/types" + +import type { ApiHandlerOptions } from "../../shared/api" + +import { getModelParams } from "../transform/model-params" + +import { OpenAiHandler } from "./openai" + +export class N1nHandler extends OpenAiHandler { + constructor(options: ApiHandlerOptions) { + super({ + ...options, + openAiApiKey: options.n1nApiKey ?? "", + openAiModelId: options.n1nModelId ?? n1nDefaultModelId, + openAiBaseUrl: N1N_BASE_URL, + openAiStreamingEnabled: true, + }) + } + + override getModel() { + const id = this.options.n1nModelId ?? n1nDefaultModelId + // Since n1n.ai supports 400+ models dynamically, we use default model info + // unless we implement dynamic model fetching + const info = n1nDefaultModelInfo + const params = getModelParams({ format: "openai", modelId: id, model: info, settings: this.options }) + return { id, info, ...params } + } +} diff --git a/src/core/webview/webviewMessageHandler.ts b/src/core/webview/webviewMessageHandler.ts index af5f9925c353..46043ca557f9 100644 --- a/src/core/webview/webviewMessageHandler.ts +++ b/src/core/webview/webviewMessageHandler.ts @@ -768,6 +768,7 @@ export const webviewMessageHandler = async ( glama: {}, ollama: {}, lmstudio: {}, + n1n: {}, } const safeGetModels = async (options: GetModelsOptions): Promise => { @@ -829,6 +830,16 @@ export const webviewMessageHandler = async ( }) } + // Add n1n if API key is provided. + const n1nApiKey = apiConfiguration.n1nApiKey + + if (n1nApiKey) { + modelFetchPromises.push({ + key: "n1n", + options: { provider: "n1n", apiKey: n1nApiKey }, + }) + } + const results = await Promise.allSettled( modelFetchPromises.map(async ({ key, options }) => { const models = await safeGetModels(options) diff --git a/src/shared/ProfileValidator.ts b/src/shared/ProfileValidator.ts index 78ff6ed9fe1f..5f7b69ef2749 100644 --- a/src/shared/ProfileValidator.ts +++ b/src/shared/ProfileValidator.ts @@ -92,6 +92,8 @@ export class ProfileValidator { return profile.ioIntelligenceModelId case "deepinfra": return profile.deepInfraModelId + case "n1n": + return profile.n1nModelId case "human-relay": case "fake-ai": default: diff --git a/src/shared/api.ts b/src/shared/api.ts index 79001cb0ad09..f81348e71892 100644 --- a/src/shared/api.ts +++ b/src/shared/api.ts @@ -163,6 +163,7 @@ const dynamicProviderExtras = { glama: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type ollama: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type lmstudio: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type + n1n: {} as { apiKey: string }, } as const satisfies Record // Build the dynamic options union from the map, intersected with CommonFetchParams