Skip to content

Commit a7a0bb6

Browse files
committed
feat: adding LM Studio completion endpoint
1 parent 6621e7f commit a7a0bb6

File tree

3 files changed

+46
-1
lines changed

3 files changed

+46
-1
lines changed

server/api/generate-title.post.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ import { auth } from "~/utils/auth";
33
import type { LocalMessage, Model } from "~/utils/db/local";
44
import { completionOpenAI } from "../utils/llm/completionOpenAi";
55
import { completionOllama } from "../utils/llm/completionOllama";
6+
import { completionLMStudio } from "../utils/llm/completionLMStudio";
67
import { completionGemini } from "../utils/llm/completionGemini";
78
import { completionAnthropic } from "../utils/llm/completionAnthropic";
89

@@ -69,6 +70,13 @@ export default defineEventHandler(async (event) => {
6970
systemPrompt: systemPrompt,
7071
});
7172
break;
73+
case "lmstudio":
74+
title = await completionLMStudio({
75+
history,
76+
model: model.name,
77+
systemPrompt: systemPrompt,
78+
});
79+
break;
7280
default:
7381
logger.error("POST /api/generate-title: Invalid provider");
7482
setResponseStatus(event, 400);

server/utils/llm/completionAnthropic.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ export async function completionAnthropic({
2828
throw new Error("Invalid response from Anthropic");
2929
}
3030
} catch (error) {
31-
logger.error(error, "Error getiting completion from OpenAI");
31+
logger.error(error, "Error getiting completion from Anthropic");
3232
throw new Error("Internal server error");
3333
}
3434
}
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
import { logger } from "~/utils/logger";
2+
import type { LocalMessage } from "~/utils/db/local";
3+
import { Chat } from "@lmstudio/sdk";
4+
5+
export async function completionLMStudio({
6+
history,
7+
model,
8+
systemPrompt,
9+
}: {
10+
history: LocalMessage[];
11+
model: string;
12+
systemPrompt: string;
13+
}) {
14+
const lmStudio = getLMStudioClient();
15+
16+
try {
17+
const messages: {
18+
role: "user" | "assistant" | "system";
19+
content: string;
20+
}[] = history.map((msg) => ({
21+
role: msg.role,
22+
content: msg.content,
23+
}));
24+
messages.unshift({ role: "system", content: systemPrompt });
25+
const chat = Chat.from(messages);
26+
27+
const lmStudioModel = await lmStudio.llm.model(model);
28+
const completion = await lmStudioModel.respond(chat, {
29+
maxTokens: 200,
30+
});
31+
32+
return completion.content;
33+
} catch (error) {
34+
logger.error(error, "Error getiting completion from LM Studio");
35+
throw new Error("Internal server error");
36+
}
37+
}

0 commit comments

Comments
 (0)