Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions packages/types/src/provider-settings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ export const dynamicProviders = [
"requesty",
"unbound",
"glama",
"n1n",
] as const

export type DynamicProvider = (typeof dynamicProviders)[number]
Expand Down Expand Up @@ -131,6 +132,7 @@ export const providerNames = [
"groq",
"mistral",
"moonshot",
"n1n",
"openai-native",
"qwen-code",
"roo",
Expand Down Expand Up @@ -399,6 +401,11 @@ const ioIntelligenceSchema = apiModelIdProviderModelSchema.extend({
ioIntelligenceApiKey: z.string().optional(),
})

const n1nSchema = baseProviderSettingsSchema.extend({
n1nApiKey: z.string().optional(),
n1nModelId: z.string().optional(),
})

const qwenCodeSchema = apiModelIdProviderModelSchema.extend({
qwenCodeOauthPath: z.string().optional(),
})
Expand Down Expand Up @@ -450,6 +457,7 @@ export const providerSettingsSchemaDiscriminated = z.discriminatedUnion("apiProv
fireworksSchema.merge(z.object({ apiProvider: z.literal("fireworks") })),
featherlessSchema.merge(z.object({ apiProvider: z.literal("featherless") })),
ioIntelligenceSchema.merge(z.object({ apiProvider: z.literal("io-intelligence") })),
n1nSchema.merge(z.object({ apiProvider: z.literal("n1n") })),
qwenCodeSchema.merge(z.object({ apiProvider: z.literal("qwen-code") })),
rooSchema.merge(z.object({ apiProvider: z.literal("roo") })),
vercelAiGatewaySchema.merge(z.object({ apiProvider: z.literal("vercel-ai-gateway") })),
Expand Down Expand Up @@ -491,6 +499,7 @@ export const providerSettingsSchema = z.object({
...fireworksSchema.shape,
...featherlessSchema.shape,
...ioIntelligenceSchema.shape,
...n1nSchema.shape,
...qwenCodeSchema.shape,
...rooSchema.shape,
...vercelAiGatewaySchema.shape,
Expand Down Expand Up @@ -526,6 +535,7 @@ export const modelIdKeys = [
"litellmModelId",
"huggingFaceModelId",
"ioIntelligenceModelId",
"n1nModelId",
"vercelAiGatewayModelId",
"deepInfraModelId",
] as const satisfies readonly (keyof ProviderSettings)[]
Expand Down Expand Up @@ -577,6 +587,7 @@ export const modelIdKeysByProvider: Record<TypicalProvider, ModelIdKey> = {
fireworks: "apiModelId",
featherless: "apiModelId",
"io-intelligence": "ioIntelligenceModelId",
n1n: "n1nModelId",
roo: "apiModelId",
"vercel-ai-gateway": "vercelAiGatewayModelId",
}
Expand Down Expand Up @@ -704,6 +715,7 @@ export const MODELS_BY_PROVIDER: Record<
requesty: { id: "requesty", label: "Requesty", models: [] },
unbound: { id: "unbound", label: "Unbound", models: [] },
deepinfra: { id: "deepinfra", label: "DeepInfra", models: [] },
n1n: { id: "n1n", label: "n1n AI", models: [] },
"vercel-ai-gateway": { id: "vercel-ai-gateway", label: "Vercel AI Gateway", models: [] },

// Local providers; models discovered from localhost endpoints.
Expand Down
1 change: 1 addition & 0 deletions packages/types/src/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ export * from "./lite-llm.js"
export * from "./lm-studio.js"
export * from "./mistral.js"
export * from "./moonshot.js"
export * from "./n1n.js"
export * from "./ollama.js"
export * from "./openai.js"
export * from "./openrouter.js"
Expand Down
20 changes: 20 additions & 0 deletions packages/types/src/providers/n1n.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import type { ModelInfo } from "../model.js"

// n1n.ai is an OpenAI-compatible API that provides access to 400+ models
// Since they have a large and dynamic model list, we'll fetch models dynamically
export type N1nModelId = string

export const n1nDefaultModelId = "gpt-4o-mini"

// Default model info for when dynamic fetching isn't available
export const n1nDefaultModelInfo: ModelInfo = {
maxTokens: 16_384,
contextWindow: 128_000,
supportsImages: true,
supportsPromptCache: false,
inputPrice: 0.15,
outputPrice: 0.6,
}

// Base URL for n1n.ai API
export const N1N_BASE_URL = "https://n1n.ai/v1"
3 changes: 3 additions & 0 deletions src/api/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import {
DeepSeekHandler,
MoonshotHandler,
MistralHandler,
N1nHandler,
VsCodeLmHandler,
UnboundHandler,
RequestyHandler,
Expand Down Expand Up @@ -123,6 +124,8 @@ export function buildApiHandler(configuration: ProviderSettings): ApiHandler {
return new QwenCodeHandler(options)
case "moonshot":
return new MoonshotHandler(options)
case "n1n":
return new N1nHandler(options)
case "vscode-lm":
return new VsCodeLmHandler(options)
case "mistral":
Expand Down
5 changes: 5 additions & 0 deletions src/api/providers/fetchers/modelCache.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import { getLMStudioModels } from "./lmstudio"
import { getIOIntelligenceModels } from "./io-intelligence"
import { getDeepInfraModels } from "./deepinfra"
import { getHuggingFaceModels } from "./huggingface"
import { getN1nModels } from "./n1n"

const memoryCache = new NodeCache({ stdTTL: 5 * 60, checkperiod: 5 * 60 })

Expand Down Expand Up @@ -99,6 +100,10 @@ export const getModels = async (options: GetModelsOptions): Promise<ModelRecord>
case "huggingface":
models = await getHuggingFaceModels()
break
case "n1n":
// n1n models endpoint requires an API key
models = await getN1nModels(options.apiKey)
break
default: {
// Ensures router is exhaustively checked if RouterName is a strict union.
const exhaustiveCheck: never = provider
Expand Down
74 changes: 74 additions & 0 deletions src/api/providers/fetchers/n1n.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import axios from "axios"
import { z } from "zod"

import { type ModelInfo, N1N_BASE_URL } from "@roo-code/types"

import { DEFAULT_HEADERS } from "../constants"

// n1n models endpoint follows OpenAI /models shape
const N1nModelSchema = z.object({
id: z.string(),
object: z.literal("model").optional(),
owned_by: z.string().optional(),
created: z.number().optional(),
})

const N1nModelsResponseSchema = z.object({
data: z.array(N1nModelSchema).optional(),
object: z.string().optional(),
})

export async function getN1nModels(apiKey: string): Promise<Record<string, ModelInfo>> {
const headers: Record<string, string> = {
...DEFAULT_HEADERS,
Authorization: `Bearer ${apiKey}`,
}

const url = `${N1N_BASE_URL}/models`
const models: Record<string, ModelInfo> = {}

try {
const response = await axios.get(url, { headers })
const parsed = N1nModelsResponseSchema.safeParse(response.data)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Consider logging additional details from the Zod safeParse result when parsing fails (e.g. logging parsed.error) to aid in debugging schema mismatches.

This comment was generated because it violated a code review rule: irule_PTI8rjtnhwrWq6jS.

const data = parsed.success ? parsed.data.data || [] : response.data?.data || []

for (const m of data as Array<z.infer<typeof N1nModelSchema>>) {
// Default model info - n1n doesn't provide detailed metadata in /models endpoint
// These are conservative defaults that should work for most models
const info: ModelInfo = {
maxTokens: 4096,
contextWindow: 16384,
supportsImages: false, // Will be true for vision models like gpt-4-vision
supportsPromptCache: false,
// n1n doesn't expose pricing via API, would need to be hardcoded or fetched separately
inputPrice: undefined,
outputPrice: undefined,
}

// Check for known vision model patterns
if (m.id.includes("vision") || m.id.includes("gpt-4o") || m.id.includes("claude-3")) {
info.supportsImages = true
}

// Check for known models with larger contexts
if (m.id.includes("gpt-4-turbo") || m.id.includes("claude-3") || m.id.includes("gpt-4o")) {
info.contextWindow = 128000
info.maxTokens = 4096
} else if (m.id.includes("claude-2")) {
info.contextWindow = 100000
info.maxTokens = 4096
} else if (m.id.includes("gpt-3.5-turbo-16k")) {
info.contextWindow = 16384
info.maxTokens = 4096
}

models[m.id] = info
}

return models
} catch (error) {
console.error("Error fetching n1n models:", error)
// Return empty object on error - the handler will use default model
return {}
}
}
1 change: 1 addition & 0 deletions src/api/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ export { IOIntelligenceHandler } from "./io-intelligence"
export { LiteLLMHandler } from "./lite-llm"
export { LmStudioHandler } from "./lm-studio"
export { MistralHandler } from "./mistral"
export { N1nHandler } from "./n1n"
export { OllamaHandler } from "./ollama"
export { OpenAiNativeHandler } from "./openai-native"
export { OpenAiHandler } from "./openai"
Expand Down
28 changes: 28 additions & 0 deletions src/api/providers/n1n.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import { n1nDefaultModelId, n1nDefaultModelInfo, N1N_BASE_URL } from "@roo-code/types"

import type { ApiHandlerOptions } from "../../shared/api"

import { getModelParams } from "../transform/model-params"

import { OpenAiHandler } from "./openai"

export class N1nHandler extends OpenAiHandler {
constructor(options: ApiHandlerOptions) {
super({
...options,
openAiApiKey: options.n1nApiKey ?? "",
openAiModelId: options.n1nModelId ?? n1nDefaultModelId,
openAiBaseUrl: N1N_BASE_URL,
openAiStreamingEnabled: true,
})
}

override getModel() {
const id = this.options.n1nModelId ?? n1nDefaultModelId
// Since n1n.ai supports 400+ models dynamically, we use default model info
// unless we implement dynamic model fetching
const info = n1nDefaultModelInfo
const params = getModelParams({ format: "openai", modelId: id, model: info, settings: this.options })
return { id, info, ...params }
Comment on lines +20 to +26
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The getModel() method always returns n1nDefaultModelInfo instead of using dynamically fetched models from the cache. This means the 400+ models from n1n.ai won't have accurate metadata (context windows, pricing, vision support, etc.).

The fetcher in src/api/providers/fetchers/n1n.ts is implemented and integrated, but N1nHandler never uses it. Compare with DeepInfraHandler.getModel() which properly uses cached models:

const info = this.models[id] ?? deepInfraDefaultModelInfo

N1nHandler should follow the same pattern - store fetched models and use them in getModel().

}
}
11 changes: 11 additions & 0 deletions src/core/webview/webviewMessageHandler.ts
Original file line number Diff line number Diff line change
Expand Up @@ -768,6 +768,7 @@ export const webviewMessageHandler = async (
glama: {},
ollama: {},
lmstudio: {},
n1n: {},
}

const safeGetModels = async (options: GetModelsOptions): Promise<ModelRecord> => {
Expand Down Expand Up @@ -829,6 +830,16 @@ export const webviewMessageHandler = async (
})
}

// Add n1n if API key is provided.
const n1nApiKey = apiConfiguration.n1nApiKey

if (n1nApiKey) {
modelFetchPromises.push({
key: "n1n",
options: { provider: "n1n", apiKey: n1nApiKey },
})
}

const results = await Promise.allSettled(
modelFetchPromises.map(async ({ key, options }) => {
const models = await safeGetModels(options)
Expand Down
2 changes: 2 additions & 0 deletions src/shared/ProfileValidator.ts
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,8 @@ export class ProfileValidator {
return profile.ioIntelligenceModelId
case "deepinfra":
return profile.deepInfraModelId
case "n1n":
return profile.n1nModelId
case "human-relay":
case "fake-ai":
default:
Expand Down
1 change: 1 addition & 0 deletions src/shared/api.ts
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,7 @@ const dynamicProviderExtras = {
glama: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type
ollama: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type
lmstudio: {} as {}, // eslint-disable-line @typescript-eslint/no-empty-object-type
n1n: {} as { apiKey: string },
} as const satisfies Record<RouterName, object>

// Build the dynamic options union from the map, intersected with CommonFetchParams
Expand Down
Loading