Skip to content

Commit 31f3fb4

Browse files
committed
feat: lm studio support
1 parent acdf1f2 commit 31f3fb4

File tree

12 files changed

+250
-20
lines changed

12 files changed

+250
-20
lines changed

app/assets/icons/lmstudio.svg

Lines changed: 1 addition & 0 deletions
Loading

app/components/Settings/SettingsAdminModels.vue

Lines changed: 76 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,14 @@ interface Models {
55
openai: Model[];
66
gemini: Model[];
77
anthropic: Model[];
8+
lmstudio: Model[];
89
ollama: Record<string, Model[]>;
910
}
1011
const models = ref<Models>({
1112
openai: [],
1213
gemini: [],
1314
anthropic: [],
15+
lmstudio: [],
1416
ollama: {},
1517
});
1618
async function fetchModels(provider: keyof Models, url?: string) {
@@ -22,7 +24,8 @@ async function fetchModels(provider: keyof Models, url?: string) {
2224
} else if (
2325
provider === "openai" ||
2426
provider === "gemini" ||
25-
provider === "anthropic"
27+
provider === "anthropic" ||
28+
provider === "lmstudio"
2629
) {
2730
const response = await $fetch<{ models: Model[] }>(
2831
`/api/models/${provider}`,
@@ -204,29 +207,48 @@ async function pullOllamaModel(url: string) {
204207
const openaiAvailable = ref(false);
205208
const geminiAvailable = ref(false);
206209
const anthropicAvailable = ref(false);
210+
const lmStudioEnabled = computed(
211+
() => globalSettingsStore.settings.lmStudioEnabled,
212+
);
213+
207214
// Check the ollama endpoint on mount
208215
onMounted(async () => {
209216
// Check if the ollama url is set
210217
if (globalSettingsStore.settings.ollamaUrls) {
211218
for (const url of globalSettingsStore.settings.ollamaUrls) {
212219
testOllamaUrl(url);
213220
}
214-
// Fetch the models
215-
const response = await $fetch<{ providers: string[] }>("/api/providers");
216-
if (response.providers.includes("openai")) {
217-
openaiAvailable.value = true;
218-
fetchModels("openai");
219-
}
220-
if (response.providers.includes("gemini")) {
221-
geminiAvailable.value = true;
222-
fetchModels("gemini");
223-
}
224-
if (response.providers.includes("anthropic")) {
225-
anthropicAvailable.value = true;
226-
fetchModels("anthropic");
227-
}
221+
}
222+
223+
// Check if the lm studio is available
224+
if (lmStudioEnabled.value) {
225+
fetchModels("lmstudio");
226+
}
227+
// Fetch the models
228+
const response = await $fetch<{ providers: string[] }>("/api/providers");
229+
if (response.providers.includes("openai")) {
230+
openaiAvailable.value = true;
231+
fetchModels("openai");
232+
}
233+
if (response.providers.includes("gemini")) {
234+
geminiAvailable.value = true;
235+
fetchModels("gemini");
236+
}
237+
if (response.providers.includes("anthropic")) {
238+
anthropicAvailable.value = true;
239+
fetchModels("anthropic");
228240
}
229241
});
242+
243+
// Watch for lm studio enabled
244+
watch(
245+
() => globalSettingsStore.settings.lmStudioEnabled,
246+
(newValue) => {
247+
if (newValue) {
248+
fetchModels("lmstudio");
249+
}
250+
},
251+
);
230252
</script>
231253

232254
<template>
@@ -312,6 +334,43 @@ onMounted(async () => {
312334
</div>
313335
</SettingsGroup>
314336

337+
<!-- LMStudio -->
338+
<SettingsGroup title="lm studio" icon="local:lmstudio">
339+
<div>
340+
<SettingsToggleItem
341+
title="enable LM Studio"
342+
description="only connects to local lm studio instance for now"
343+
:value="lmStudioEnabled"
344+
@toggle="
345+
() =>
346+
globalSettingsStore.updateSettings({
347+
lmStudioEnabled: !lmStudioEnabled,
348+
})
349+
"
350+
/>
351+
<div
352+
v-if="lmStudioEnabled"
353+
class="w-full grid grid-cols-2 md:grid-cols-3 gap-2 text-nowrap mb-3"
354+
>
355+
<div
356+
v-for="model in models?.lmstudio.sort((a, b) =>
357+
a.name.localeCompare(b.name),
358+
)"
359+
:key="model.name"
360+
:class="[
361+
'flex border border-(--main-color) rounded-full px-3 cursor-pointer ',
362+
checkAvailableModel(model)
363+
? 'bg-(--main-color) text-(--bg-color)'
364+
: 'text-(--text-color)',
365+
]"
366+
@click="updateAvailableModels(model)"
367+
>
368+
<HoverScrollText>{{ model.name }}</HoverScrollText>
369+
</div>
370+
</div>
371+
</div>
372+
</SettingsGroup>
373+
<!-- Ollama -->
315374
<SettingsGroup title="ollama" icon="simple-icons:ollama">
316375
<div class="flex items-center gap-2 mb-4">
317376
<div class="text-(--main-color)">add url</div>
@@ -422,6 +481,8 @@ onMounted(async () => {
422481
</div>
423482
</div>
424483
</SettingsGroup>
484+
485+
<!-- Leftover Models -->
425486
<SettingsGroup
426487
v-if="availableModels.filter((m) => !checkModelAgainstEndpoint(m)).length"
427488
title="leftover models"

app/components/Settings/SettingsGroup.vue

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ defineProps<{
1010
<div
1111
class="flex w-full items-center justify-between gap-3 text-(--main-color) text-xl settings-group-header"
1212
>
13-
<div class="flex items-center gap-1 settings-group-title">
13+
<div class="flex items-center gap-1 settings-group-title text-nowrap">
1414
<Icon
1515
v-if="typeof icon === 'string'"
1616
:name="icon"

app/stores/globalSettings.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ export interface GlobalSettings {
77
allowRegistration: boolean;
88
allowFileUpload: boolean;
99
ollamaUrls: string[];
10+
lmStudioEnabled: boolean;
1011
}
1112

1213
function getDefaultSettings(): GlobalSettings {
@@ -15,6 +16,7 @@ function getDefaultSettings(): GlobalSettings {
1516
allowRegistration: false,
1617
allowFileUpload: false,
1718
ollamaUrls: [],
19+
lmStudioEnabled: false,
1820
};
1921
}
2022

app/utils/icon.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ export function getModelProviderIcon(provider?: string) {
66
gemini: "simple-icons:googlegemini",
77
openai: "simple-icons:openai",
88
anthropic: "simple-icons:anthropic",
9+
lmstudio: "local:lmstudio",
910
};
1011
return modelIcons[provider] || "";
1112
}

nuxt.config.ts

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,11 +63,10 @@ export default defineNuxtConfig({
6363
},
6464
},
6565
icon: {
66-
provider: "server",
6766
customCollections: [
6867
{
6968
prefix: "local",
70-
dir: "./assets/icons",
69+
dir: "./app/assets/icons",
7170
},
7271
],
7372
clientBundle: {

package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
"@libsql/client": "^0.15.4",
2626
"@libsql/darwin-arm64": "^0.5.8",
2727
"@libsql/linux-arm64-gnu": "^0.5.8",
28+
"@lmstudio/sdk": "^1.1.1",
2829
"@nuxt/eslint": "1.3.0",
2930
"@nuxt/fonts": "0.11.2",
3031
"@nuxt/icon": "1.12.0",

pnpm-lock.yaml

Lines changed: 32 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

server/api/llm.post.ts

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,13 @@ export default defineEventHandler(async (event) => {
7272
systemPrompt,
7373
});
7474
break;
75+
case "lmstudio":
76+
stream = await streamLMStudio({
77+
history,
78+
model: model.name,
79+
systemPrompt,
80+
});
81+
break;
7582
case "ollama":
7683
if (!model.url) {
7784
logger.error("POST /api/llm: Invalid request: No URL specified");

server/api/models/anthropic/index.get.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ export default defineEventHandler(async (event) => {
1111
});
1212

1313
if (!session) {
14-
logger.error("GET /api/models/openai: Unauthorized access attempt");
14+
logger.error("GET /api/models/anthropic: Unauthorized access attempt");
1515
setResponseStatus(event, 401);
1616
return {
1717
message: "Unauthorized",
@@ -32,7 +32,7 @@ export default defineEventHandler(async (event) => {
3232
models,
3333
};
3434
} catch (error) {
35-
logger.error(error, "GET /api/models/openai: Error fetching models");
35+
logger.error(error, "GET /api/models/anthropic: Error fetching models");
3636
setResponseStatus(event, 500);
3737
return {
3838
message: "Failed to fetch models",

0 commit comments

Comments
 (0)