diff --git a/.vscode/settings.json b/.vscode/settings.json
index e6b76c9e9de..7bb6acd469d 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -57,6 +57,7 @@
"Weaviate",
"XAILLM",
"Zilliz"
+ "SubModel",
],
"eslint.experimental.useFlatConfig": true,
"docker.languageserver.formatter.ignoreMultilineInstructions": true
diff --git a/README.md b/README.md
index 88922e65912..f13a22cb26a 100644
--- a/README.md
+++ b/README.md
@@ -103,6 +103,8 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace
- [PPIO](https://ppinfra.com?utm_source=github_anything-llm)
- [Moonshot AI](https://www.moonshot.ai/)
- [CometAPI (chat models)](https://api.cometapi.com/)
+- [SubModel](https://submodel.ai)
+
**Embedder models:**
- [AnythingLLM Native Embedder](/server/storage/models/README.md) (default)
diff --git a/docker/.env.example b/docker/.env.example
index 421d05368c5..ea5f01c23e8 100644
--- a/docker/.env.example
+++ b/docker/.env.example
@@ -143,6 +143,10 @@ GID='1000'
# MOONSHOT_AI_API_KEY='your-moonshot-api-key-here'
# MOONSHOT_AI_MODEL_PREF='moonshot-v1-32k'
+# LLM_PROVIDER='submodel'
+# SUBMODEL_INSTAGEN_ACCESS_KEY='your-submodel-instagen-access-key-here'
+# SUBMODEL_MODEL_PREF=deepseek-ai/DeepSeek-V3-0324
+
###########################################
######## Embedding API SElECTION ##########
###########################################
diff --git a/frontend/src/components/LLMSelection/SubModelLLMOptions/index.jsx b/frontend/src/components/LLMSelection/SubModelLLMOptions/index.jsx
new file mode 100644
index 00000000000..8c6afec26d1
--- /dev/null
+++ b/frontend/src/components/LLMSelection/SubModelLLMOptions/index.jsx
@@ -0,0 +1,154 @@
+import System from "@/models/system";
+import { useState, useEffect } from "react";
+
+export default function SubModelLLMOptions({ settings }) {
+ return (
+
+
+
+
+
+
+ {!settings?.credentialsOnly && (
+
+ )}
+
+
+ );
+}
+
+function SubModelModelSelection({ settings }) {
+ const [groupedModels, setGroupedModels] = useState({});
+ const [loading, setLoading] = useState(true);
+ const [selectedModelId, setSelectedModelId] = useState(settings?.SubModelModelPref);
+
+ useEffect(() => {
+ async function fetchModels() {
+ setLoading(true);
+ const { models } = await System.customModels("submodel");
+ if (models?.length > 0) {
+ const modelsByOrganization = models.reduce((acc, model) => {
+ acc[model.organization] = acc[model.organization] || [];
+ acc[model.organization].push(model);
+ return acc;
+ }, {});
+ setGroupedModels(modelsByOrganization);
+ }
+ setLoading(false);
+ }
+ fetchModels();
+ }, []);
+
+ // Update selected model when settings change
+ useEffect(() => {
+ setSelectedModelId(settings?.SubModelModelPref);
+ }, [settings?.SubModelModelPref]);
+
+ if (loading || Object.keys(groupedModels).length === 0) {
+ return (
+
+
+
+
+ );
+ }
+
+ return (
+
+
+
+
+
+
+
+ );
+}
+
+function FreeQuotaInfo({ groupedModels, selectedModelId }) {
+ // Find the currently selected model
+ const selectedModel = Object.values(groupedModels)
+ .flat()
+ .find(model => model.id === selectedModelId);
+
+ // Only show models with free_quota structure
+ if (!selectedModel?.free_quota) {
+ return null;
+ }
+
+ return (
+
+
+
+
+ Free Quota Available
+
+
+
+
+
+ Daily Tokens:
+
+
+ {selectedModel.free_quota.day_token?.toLocaleString() || 'N/A'}
+
+
+
+
+ Daily Requests:
+
+
+ {selectedModel.free_quota.day_request?.toLocaleString() || 'N/A'}
+
+
+
+
+ );
+}
diff --git a/frontend/src/hooks/useGetProvidersModels.js b/frontend/src/hooks/useGetProvidersModels.js
index 82ef427cfd5..caa08bdafca 100644
--- a/frontend/src/hooks/useGetProvidersModels.js
+++ b/frontend/src/hooks/useGetProvidersModels.js
@@ -52,6 +52,7 @@ const groupedProviders = [
"novita",
"openrouter",
"ppio",
+ "submodel",
];
export default function useGetProviderModels(provider = null) {
const [defaultModels, setDefaultModels] = useState([]);
diff --git a/frontend/src/media/llmprovider/submodel.png b/frontend/src/media/llmprovider/submodel.png
new file mode 100644
index 00000000000..e54e601b9c8
Binary files /dev/null and b/frontend/src/media/llmprovider/submodel.png differ
diff --git a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
index afadca6ac09..200ed77ba5d 100644
--- a/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
+++ b/frontend/src/pages/GeneralSettings/LLMPreference/index.jsx
@@ -31,6 +31,7 @@ import APIPieLogo from "@/media/llmprovider/apipie.png";
import XAILogo from "@/media/llmprovider/xai.png";
import NvidiaNimLogo from "@/media/llmprovider/nvidia-nim.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
+import SubModelLogo from "@/media/llmprovider/submodel.png";
import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png";
import CometApiLogo from "@/media/llmprovider/cometapi.png";
@@ -63,6 +64,7 @@ import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
import NvidiaNimOptions from "@/components/LLMSelection/NvidiaNimOptions";
import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
+import SubModelLLMOptions from "@/components/LLMSelection/SubModelLLMOptions";
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
import MoonshotAiOptions from "@/components/LLMSelection/MoonshotAiOptions";
@@ -335,6 +337,14 @@ export const AVAILABLE_LLM_PROVIDERS = [
"GenericOpenAiKey",
],
},
+ {
+ name: "SubModel",
+ value: "submodel",
+ logo: SubModelLogo,
+ options: (settings) => ,
+ description: "Powerful AI Cloud for Startups.",
+ requiredConfig: ["SubModelLLMAccessKey"],
+ },
];
export default function GeneralLLMPreference() {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
index 442a443d949..56ef241a274 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/DataHandling/index.jsx
@@ -36,6 +36,7 @@ import QDrantLogo from "@/media/vectordbs/qdrant.png";
import MilvusLogo from "@/media/vectordbs/milvus.png";
import VoyageAiLogo from "@/media/embeddingprovider/voyageai.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
+import SubModelLogo from "@/media/llmprovider/submodel.png";
import PGVectorLogo from "@/media/vectordbs/pgvector.png";
import DPAISLogo from "@/media/llmprovider/dpais.png";
import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png";
@@ -261,6 +262,14 @@ export const LLM_SELECTION_PRIVACY = {
],
logo: CometApiLogo,
},
+ submodel: {
+ name: "SubModel",
+ description: [
+ "Your chats will not be used for training",
+ "Your prompts and document text used in response creation are visible to SubModel",
+ ],
+ logo: SubModelLogo,
+ },
};
export const VECTOR_DB_PRIVACY = {
diff --git a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
index 7a16985fe11..c837462a061 100644
--- a/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
+++ b/frontend/src/pages/OnboardingFlow/Steps/LLMPreference/index.jsx
@@ -26,6 +26,7 @@ import XAILogo from "@/media/llmprovider/xai.png";
import NvidiaNimLogo from "@/media/llmprovider/nvidia-nim.png";
import CohereLogo from "@/media/llmprovider/cohere.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
+import SubModelLogo from "@/media/llmprovider/submodel.png";
import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png";
import CometApiLogo from "@/media/llmprovider/cometapi.png";
@@ -56,6 +57,7 @@ import NovitaLLMOptions from "@/components/LLMSelection/NovitaLLMOptions";
import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
import NvidiaNimOptions from "@/components/LLMSelection/NvidiaNimOptions";
import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
+import SubModelLLMOptions from "@/components/LLMSelection/SubModelLLMOptions";
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
import MoonshotAiOptions from "@/components/LLMSelection/MoonshotAiOptions";
import CometApiLLMOptions from "@/components/LLMSelection/CometApiLLMOptions";
@@ -281,6 +283,13 @@ const LLMS = [
options: (settings) => ,
description: "500+ AI Models all in one API.",
},
+ {
+ name: "SubModel",
+ value: "submodel",
+ logo: SubModelLogo,
+ options: (settings) => ,
+ description: "Powerful AI Cloud for Startups.",
+ },
];
export default function LLMPreference({
diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
index 9710243dacb..1d2eb989643 100644
--- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentLLMSelection/index.jsx
@@ -33,6 +33,7 @@ const ENABLED_PROVIDERS = [
"gemini",
"moonshotai",
"cometapi",
+ "submodel",
// TODO: More agent support.
// "cohere", // Has tool calling and will need to build explicit support
// "huggingface" // Can be done but already has issues with no-chat templated. Needs to be tested.
diff --git a/locales/README.fa-IR.md b/locales/README.fa-IR.md
index c28abf648ae..c5bb4a3fd4b 100644
--- a/locales/README.fa-IR.md
+++ b/locales/README.fa-IR.md
@@ -104,6 +104,7 @@ AnythingLLM اسناد شما را به اشیایی به نام `workspaces` ت
- [xAI](https://x.ai/)
- [Novita AI (chat models)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
- [PPIO](https://ppinfra.com?utm_source=github_anything-llm)
+- [SubModel](https://submodel.ai)
diff --git a/locales/README.ja-JP.md b/locales/README.ja-JP.md
index d6fef0fa5b7..f065b0e29ae 100644
--- a/locales/README.ja-JP.md
+++ b/locales/README.ja-JP.md
@@ -92,6 +92,7 @@ AnythingLLMは、ドキュメントを`ワークスペース`と呼ばれるオ
- [KoboldCPP](https://github.com/LostRuins/koboldcpp)
- [PPIO](https://ppinfra.com?utm_source=github_anything-llm)
- [CometAPI (チャットモデル)](https://api.cometapi.com/)
+- [SubModel](https://submodel.ai)
**埋め込みモデル:**
diff --git a/locales/README.tr-TR.md b/locales/README.tr-TR.md
index 9f539779503..df2577d10e1 100644
--- a/locales/README.tr-TR.md
+++ b/locales/README.tr-TR.md
@@ -101,6 +101,7 @@ AnythingLLM, belgelerinizi **"çalışma alanları" (workspaces)** adı verilen
- [xAI](https://x.ai/)
- [Novita AI (chat models)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
- [PPIO](https://ppinfra.com?utm_source=github_anything-llm)
+- [SubModel](https://submodel.ai)
**Embedder modelleri:**
diff --git a/locales/README.zh-CN.md b/locales/README.zh-CN.md
index aa328351449..07d88e64e62 100644
--- a/locales/README.zh-CN.md
+++ b/locales/README.zh-CN.md
@@ -101,6 +101,7 @@ AnythingLLM将您的文档划分为称为`workspaces` (工作区)的对象。工
- [Novita AI (聊天模型)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
- [PPIO (聊天模型)](https://ppinfra.com?utm_source=github_anything-llm)
- [CometAPI (聊天模型)](https://api.cometapi.com/)
+- [SubModel](https://submodel.ai)
**支持的嵌入模型:**
diff --git a/server/.env.example b/server/.env.example
index c60319ab6ab..3c9e373aec8 100644
--- a/server/.env.example
+++ b/server/.env.example
@@ -142,6 +142,10 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long.
# MOONSHOT_AI_API_KEY='your-moonshot-api-key-here'
# MOONSHOT_AI_MODEL_PREF='moonshot-v1-32k'
+# LLM_PROVIDER='submodel'
+# SUBMODEL_INSTAGEN_ACCESS_KEY='your-submodel-instagen-access-key-here'
+# SUBMODEL_MODEL_PREF=deepseek-ai/DeepSeek-V3-0324
+
###########################################
######## Embedding API SElECTION ##########
###########################################
diff --git a/server/endpoints/utils.js b/server/endpoints/utils.js
index 0bb51b83052..0e5eee3333d 100644
--- a/server/endpoints/utils.js
+++ b/server/endpoints/utils.js
@@ -145,6 +145,9 @@ function getModelTag() {
case "moonshotai":
model = process.env.MOONSHOT_AI_MODEL_PREF;
break;
+ case "submodel":
+ model = process.env.SUBMODEL_MODEL_PREF;
+ break;
default:
model = "--";
break;
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index d11684640fe..3f01a5e7414 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -616,6 +616,10 @@ const SystemSettings = {
CometApiLLMApiKey: !!process.env.COMETAPI_LLM_API_KEY,
CometApiLLMModelPref: process.env.COMETAPI_LLM_MODEL_PREF,
CometApiLLMTimeout: process.env.COMETAPI_LLM_TIMEOUT_MS,
+
+ // SubModel InstaGen Access keys
+ SubModelLLMAccessKey: !!process.env.SUBMODEL_INSTAGEN_ACCESS_KEY,
+ SubModelModelPref: process.env.SUBMODEL_MODEL_PREF,
};
},
diff --git a/server/utils/AiProviders/submodel/index.js b/server/utils/AiProviders/submodel/index.js
new file mode 100644
index 00000000000..80c4a10b61b
--- /dev/null
+++ b/server/utils/AiProviders/submodel/index.js
@@ -0,0 +1,270 @@
+const { NativeEmbedder } = require("../../EmbeddingEngines/native");
+const {
+ handleDefaultStreamResponseV2,
+} = require("../../helpers/chat/responses");
+const fs = require("fs");
+const path = require("path");
+const { safeJsonParse } = require("../../http");
+const {
+ LLMPerformanceMonitor,
+} = require("../../helpers/chat/LLMPerformanceMonitor");
+const cacheFolder = path.resolve(
+ process.env.STORAGE_DIR
+ ? path.resolve(process.env.STORAGE_DIR, "models", "submodel")
+ : path.resolve(__dirname, `../../../storage/models/submodel`)
+);
+
+class SubModelLLM {
+ constructor(embedder = null, modelPreference = null) {
+ if (!process.env.SUBMODEL_INSTAGEN_ACCESS_KEY)
+ throw new Error("No SubModel InstaGen Access key was set.");
+
+ const { OpenAI: OpenAIApi } = require("openai");
+ this.basePath = "https://llm.submodel.ai/v1/";
+ this.openai = new OpenAIApi({
+ baseURL: this.basePath,
+ apiKey: process.env.SUBMODEL_INSTAGEN_ACCESS_KEY ?? null,
+ defaultHeaders: {
+ "HTTP-Referer": "https://anythingllm.com",
+ "X-API-Source": "anythingllm",
+ },
+ });
+ this.model =
+ modelPreference ||
+ process.env.SUBMODEL_MODEL_PREF ||
+ "deepseek-ai/DeepSeek-V3-0324";
+ this.limits = {
+ history: this.promptWindowLimit() * 0.15,
+ system: this.promptWindowLimit() * 0.15,
+ user: this.promptWindowLimit() * 0.7,
+ };
+
+ this.embedder = embedder ?? new NativeEmbedder();
+ this.defaultTemp = 0.7;
+
+ if (!fs.existsSync(cacheFolder))
+ fs.mkdirSync(cacheFolder, { recursive: true });
+ this.cacheModelPath = path.resolve(cacheFolder, "models.json");
+ this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
+
+ this.log(`Loaded with model: ${this.model}`);
+ }
+
+ log(text, ...args) {
+ console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
+ }
+
+ async #syncModels() {
+ if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
+ return false;
+
+ this.log(
+ "Model cache is not present or stale. Fetching from SubModel API."
+ );
+ await fetchSubModelModels();
+ return;
+ }
+
+ #cacheIsStale() {
+ const MAX_STALE = 6.048e8; // 1 Week in MS
+ if (!fs.existsSync(this.cacheAtPath)) return true;
+ const now = Number(new Date());
+ const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
+ return now - timestampMs > MAX_STALE;
+ }
+
+ #appendContext(contextTexts = []) {
+ if (!contextTexts || !contextTexts.length) return "";
+ return (
+ "\nContext:\n" +
+ contextTexts
+ .map((text, i) => {
+ return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
+ })
+ .join("")
+ );
+ }
+
+ models() {
+ if (!fs.existsSync(this.cacheModelPath)) return {};
+ return safeJsonParse(
+ fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
+ {}
+ );
+ }
+
+ streamingEnabled() {
+ return "streamGetChatCompletion" in this;
+ }
+
+ promptWindowLimit() {
+ const model = this.models()[this.model];
+ if (!model) return 4096; // Default to 4096 if we cannot find the model
+ return model?.maxLength || 4096;
+ }
+
+ async isValidChatCompletionModel(model = "") {
+ await this.#syncModels();
+ const availableModels = this.models();
+ return Object.prototype.hasOwnProperty.call(availableModels, model);
+ }
+
+ /**
+ * Generates appropriate content array for a message + attachments.
+ * @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
+ * @returns {string|object[]}
+ */
+ #generateContent({ userPrompt, attachments = [] }) {
+ if (!attachments.length) {
+ return userPrompt;
+ }
+
+ const content = [{ type: "text", text: userPrompt }];
+ for (let attachment of attachments) {
+ content.push({
+ type: "image_url",
+ image_url: {
+ url: attachment.contentString,
+ detail: "auto",
+ },
+ });
+ }
+ return content.flat();
+ }
+
+ constructPrompt({
+ systemPrompt = "",
+ contextTexts = [],
+ chatHistory = [],
+ userPrompt = "",
+ // attachments = [], - not supported
+ }) {
+ const prompt = {
+ role: "system",
+ content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
+ };
+ return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
+ }
+
+ async getChatCompletion(messages = null, { temperature = 0.7 }) {
+ if (!(await this.isValidChatCompletionModel(this.model)))
+ throw new Error(
+ `SubModel chat: ${this.model} is not valid for chat completion!`
+ );
+
+ const result = await LLMPerformanceMonitor.measureAsyncFunction(
+ this.openai.chat.completions
+ .create({
+ model: this.model,
+ messages,
+ temperature,
+ })
+ .catch((e) => {
+ throw new Error(e.message);
+ })
+ );
+
+ if (
+ !Object.prototype.hasOwnProperty.call(result.output, "choices") ||
+ result.output.choices.length === 0
+ )
+ return null;
+
+ return {
+ textResponse: result.output.choices[0].message.content,
+ metrics: {
+ prompt_tokens: result.output.usage.prompt_tokens || 0,
+ completion_tokens: result.output.usage.completion_tokens || 0,
+ total_tokens: result.output.usage.total_tokens || 0,
+ outputTps: result.output.usage.completion_tokens / result.duration,
+ duration: result.duration,
+ },
+ };
+ }
+
+ async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
+ if (!(await this.isValidChatCompletionModel(this.model)))
+ throw new Error(
+ `SubModel chat: ${this.model} is not valid for chat completion!`
+ );
+
+ const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
+ this.openai.chat.completions.create({
+ model: this.model,
+ stream: true,
+ messages,
+ temperature,
+ }),
+ messages
+ );
+ return measuredStreamRequest;
+ }
+
+ handleStream(response, stream, responseProps) {
+ return handleDefaultStreamResponseV2(response, stream, responseProps);
+ }
+
+ async embedTextInput(textInput) {
+ return await this.embedder.embedTextInput(textInput);
+ }
+ async embedChunks(textChunks = []) {
+ return await this.embedder.embedChunks(textChunks);
+ }
+
+ async compressMessages(promptArgs = {}, rawHistory = []) {
+ const { messageArrayCompressor } = require("../../helpers/chat");
+ const messageArray = this.constructPrompt(promptArgs);
+ return await messageArrayCompressor(this, messageArray, rawHistory);
+ }
+}
+
+async function fetchSubModelModels() {
+ return await fetch(`https://llm.submodel.ai/v1/models`, {
+ method: "GET",
+ headers: {
+ "Content-Type": "application/json",
+ },
+ })
+ .then((res) => res.json())
+ .then(({ data = [] }) => {
+ const models = {};
+ data.forEach((model) => {
+ const organization = model.id?.split("/")?.[0] || "SubModel";
+ models[model.id] = {
+ id: model.id,
+ name: model.display_name || model.title || model.id,
+ organization,
+ free_quota: model?.free_quota || null,
+ pricing: model?.pricing || null,
+ maxLength: model.context_length || 4096,
+ };
+ });
+
+ if (!fs.existsSync(cacheFolder))
+ fs.mkdirSync(cacheFolder, { recursive: true });
+ fs.writeFileSync(
+ path.resolve(cacheFolder, "models.json"),
+ JSON.stringify(models),
+ {
+ encoding: "utf-8",
+ }
+ );
+ fs.writeFileSync(
+ path.resolve(cacheFolder, ".cached_at"),
+ String(Number(new Date())),
+ {
+ encoding: "utf-8",
+ }
+ );
+ return models;
+ })
+ .catch((e) => {
+ console.error(e);
+ return {};
+ });
+}
+
+module.exports = {
+ SubModelLLM,
+ fetchSubModelModels,
+};
diff --git a/server/utils/agents/aibitat/index.js b/server/utils/agents/aibitat/index.js
index 683850dfcb9..40ac4b786d6 100644
--- a/server/utils/agents/aibitat/index.js
+++ b/server/utils/agents/aibitat/index.js
@@ -832,6 +832,8 @@ ${this.getHistory({ to: route.to })
return new Providers.DellProAiStudioProvider({ model: config.model });
case "cometapi":
return new Providers.CometApiProvider({ model: config.model });
+ case "submodel":
+ return new Providers.SubModelProvider({ model: config.model });
default:
throw new Error(
`Unknown provider: ${config.provider}. Please use a valid provider.`
diff --git a/server/utils/agents/aibitat/providers/ai-provider.js b/server/utils/agents/aibitat/providers/ai-provider.js
index c2528acd948..bfca950ecbb 100644
--- a/server/utils/agents/aibitat/providers/ai-provider.js
+++ b/server/utils/agents/aibitat/providers/ai-provider.js
@@ -192,6 +192,14 @@ class Provider {
apiKey: process.env.MOONSHOT_AI_API_KEY ?? null,
...config,
});
+ case "submodel":
+ return new ChatOpenAI({
+ configuration: {
+ baseURL: "https://llm.submodela.ai/v1",
+ },
+ apiKey: process.env.SUBMODEL_INSTAGEN_ACCESS_KEY ?? null,
+ ...config,
+ });
// OSS Model Runners
// case "anythingllm_ollama":
// return new ChatOllama({
diff --git a/server/utils/agents/aibitat/providers/index.js b/server/utils/agents/aibitat/providers/index.js
index 2146269bb48..4a89e433302 100644
--- a/server/utils/agents/aibitat/providers/index.js
+++ b/server/utils/agents/aibitat/providers/index.js
@@ -21,6 +21,7 @@ const XAIProvider = require("./xai.js");
const NovitaProvider = require("./novita.js");
const NvidiaNimProvider = require("./nvidiaNim.js");
const PPIOProvider = require("./ppio.js");
+const SubModelProvider = require("./submodel.js");
const GeminiProvider = require("./gemini.js");
const DellProAiStudioProvider = require("./dellProAiStudio.js");
const MoonshotAiProvider = require("./moonshotAi.js");
@@ -51,6 +52,7 @@ module.exports = {
CometApiProvider,
NvidiaNimProvider,
PPIOProvider,
+ SubModelProvider,
GeminiProvider,
DellProAiStudioProvider,
MoonshotAiProvider,
diff --git a/server/utils/agents/aibitat/providers/submodel.js b/server/utils/agents/aibitat/providers/submodel.js
new file mode 100644
index 00000000000..f19e0795a68
--- /dev/null
+++ b/server/utils/agents/aibitat/providers/submodel.js
@@ -0,0 +1,115 @@
+const OpenAI = require("openai");
+const Provider = require("./ai-provider.js");
+const InheritMultiple = require("./helpers/classes.js");
+const UnTooled = require("./helpers/untooled.js");
+
+/**
+ * The agent provider for the SubModel AI provider.
+ */
+class SubModelProvider extends InheritMultiple([Provider, UnTooled]) {
+ model;
+
+ constructor(config = {}) {
+ const { model = "deepseek-ai/DeepSeek-V3-0324" } = config;
+ super();
+ const client = new OpenAI({
+ baseURL: "https://llm.submodel.ai/v1/",
+ apiKey: process.env.SUBMODEL_INSTAGEN_ACCESS_KEY,
+ maxRetries: 3,
+ defaultHeaders: {
+ "HTTP-Referer": "https://anythingllm.com",
+ "X-API-Source": "anythingllm",
+ },
+ });
+
+ this._client = client;
+ this.model = model;
+ this.verbose = true;
+ }
+
+ get client() {
+ return this._client;
+ }
+
+ async #handleFunctionCallChat({ messages = [] }) {
+ return await this.client.chat.completions
+ .create({
+ model: this.model,
+ temperature: 0,
+ messages,
+ })
+ .then((result) => {
+ if (!Object.prototype.hasOwnProperty.call(result, "choices"))
+ throw new Error("SubModel chat: No results!");
+ if (result.choices.length === 0)
+ throw new Error("SubModel chat: No results length!");
+ return result.choices[0].message.content;
+ })
+ .catch((_) => {
+ return null;
+ });
+ }
+
+ /**
+ * Create a completion based on the received messages.
+ *
+ * @param messages A list of messages to send to the API.
+ * @param functions
+ * @returns The completion.
+ */
+ async complete(messages, functions = null) {
+ let completion;
+ if (functions.length > 0) {
+ const { toolCall, text } = await this.functionCall(
+ messages,
+ functions,
+ this.#handleFunctionCallChat.bind(this)
+ );
+
+ if (toolCall !== null) {
+ this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
+ this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
+ return {
+ result: null,
+ functionCall: {
+ name: toolCall.name,
+ arguments: toolCall.arguments,
+ },
+ cost: 0,
+ };
+ }
+ completion = { content: text };
+ }
+
+ if (!completion?.content) {
+ this.providerLog("Will assume chat completion without tool call inputs.");
+ const response = await this.client.chat.completions.create({
+ model: this.model,
+ messages: this.cleanMsgs(messages),
+ });
+ completion = response.choices[0].message;
+ }
+
+ // The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
+ // from calling the exact same function over and over in a loop within a single chat exchange
+ // _but_ we should enable it to call previously used tools in a new chat interaction.
+ this.deduplicator.reset("runs");
+ return {
+ result: completion.content,
+ cost: 0,
+ };
+ }
+
+ /**
+ * Get the cost of the completion.
+ *
+ * @param _usage The completion to get the cost for.
+ * @returns The cost of the completion.
+ * Stubbed since SubModel has no cost basis.
+ */
+ getCost() {
+ return 0;
+ }
+}
+
+module.exports = SubModelProvider;
diff --git a/server/utils/agents/index.js b/server/utils/agents/index.js
index 46581d3c5ce..1736251d290 100644
--- a/server/utils/agents/index.js
+++ b/server/utils/agents/index.js
@@ -203,6 +203,12 @@ class AgentHandler {
if (!process.env.MOONSHOT_AI_MODEL_PREF)
throw new Error("Moonshot AI model must be set to use agents.");
break;
+ case "submodel":
+ if (!process.env.SUBMODEL_INSTAGEN_ACCESS_KEY)
+ throw new Error(
+ "SubModel InstaGen Access Key must be provided to use agents."
+ );
+ break;
case "cometapi":
if (!process.env.COMETAPI_LLM_API_KEY)
@@ -281,6 +287,8 @@ class AgentHandler {
return process.env.DPAIS_LLM_MODEL_PREF;
case "cometapi":
return process.env.COMETAPI_LLM_MODEL_PREF ?? "gpt-5-mini";
+ case "submodel":
+ return process.env.SUBMODEL_MODEL_PREF ?? "deepseek-ai/DeepSeek-V3-0324";
default:
return null;
}
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index ea5e738cdfa..21b001a8e5f 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -9,6 +9,7 @@ const { parseNvidiaNimBasePath } = require("../AiProviders/nvidiaNim");
const { fetchPPIOModels } = require("../AiProviders/ppio");
const { GeminiLLM } = require("../AiProviders/gemini");
const { fetchCometApiModels } = require("../AiProviders/cometapi");
+const { fetchSubModelModels } = require("../AiProviders/submodel");
const SUPPORT_CUSTOM_MODELS = [
"openai",
@@ -35,6 +36,7 @@ const SUPPORT_CUSTOM_MODELS = [
"ppio",
"dpais",
"moonshotai",
+ "submodel",
// Embedding Engines
"native-embedder",
];
@@ -92,6 +94,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
return await getDellProAiStudioModels(basePath);
case "moonshotai":
return await getMoonshotAiModels(apiKey);
+ case "submodel":
+ return await getSubModelModels(apiKey);
case "native-embedder":
return await getNativeEmbedderModels();
default:
@@ -728,6 +732,22 @@ async function getMoonshotAiModels(_apiKey = null) {
return { models, error: null };
}
+async function getSubModelModels() {
+ const submodelModels = await fetchSubModelModels();
+ if (!Object.keys(submodelModels).length === 0)
+ return { models: [], error: null };
+ const models = Object.values(submodelModels).map((model) => {
+ return {
+ id: model.id,
+ organization: model.organization,
+ name: model.name,
+ free_quota: model.free_quota,
+ pricing: model.pricing,
+ };
+ });
+ return { models, error: null };
+}
+
module.exports = {
getCustomModels,
SUPPORT_CUSTOM_MODELS,
diff --git a/server/utils/helpers/index.js b/server/utils/helpers/index.js
index 12327698954..fb7343c5f7c 100644
--- a/server/utils/helpers/index.js
+++ b/server/utils/helpers/index.js
@@ -215,6 +215,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
case "cometapi":
const { CometApiLLM } = require("../AiProviders/cometapi");
return new CometApiLLM(embedder, model);
+ case "submodel":
+ const { SubModelLLM } = require("../AiProviders/submodel");
+ return new SubModelLLM(embedder, model);
default:
throw new Error(
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
@@ -365,6 +368,9 @@ function getLLMProviderClass({ provider = null } = {}) {
case "moonshotai":
const { MoonshotAiLLM } = require("../AiProviders/moonshotAi");
return MoonshotAiLLM;
+ case "submodel":
+ const { SubModelLLM } = require("../AiProviders/submodel");
+ return SubModelLLM;
case "cometapi":
const { CometApiLLM } = require("../AiProviders/cometapi");
return CometApiLLM;
@@ -438,6 +444,8 @@ function getBaseLLMProviderModel({ provider = null } = {}) {
return process.env.MOONSHOT_AI_MODEL_PREF;
case "cometapi":
return process.env.COMETAPI_LLM_MODEL_PREF;
+ case "submodel":
+ return process.env.SUBMODEL_MODEL_PREF;
default:
return null;
}
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 9032237833e..2b0a368b122 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -704,7 +704,7 @@ const KEY_MAPPING = {
envKey: "MOONSHOT_AI_MODEL_PREF",
checks: [isNotEmpty],
},
-
+
// CometAPI Options
CometApiLLMApiKey: {
envKey: "COMETAPI_LLM_API_KEY",
@@ -718,6 +718,16 @@ const KEY_MAPPING = {
envKey: "COMETAPI_LLM_TIMEOUT_MS",
checks: [],
},
+
+ // SubModel Options
+ SubModelKey: {
+ envKey: "SUBMODEL_INSTAGEN_ACCESS_KEY",
+ checks: [isNotEmpty],
+ },
+ SubModelModelPref: {
+ envKey: "SUBMODEL_MODEL_PREF",
+ checks: [isNotEmpty],
+ },
};
function isNotEmpty(input = "") {
@@ -828,6 +838,7 @@ function supportedLLM(input = "") {
"dpais",
"moonshotai",
"cometapi",
+ "submodel",
].includes(input);
return validSelection ? null : `${input} is not a valid LLM provider.`;
}