Skip to content

Commit 1472547

Browse files
authored
Merge pull request #25 from jakobhoeg/develop
feature: handle fetching, chatting & pulling models clientside when run in production env.
2 parents a8367cc + 78ef863 commit 1472547

File tree

10 files changed

+357
-142
lines changed

10 files changed

+357
-142
lines changed

next.config.mjs

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,28 @@
11
/** @type {import('next').NextConfig} */
2-
const nextConfig = {};
2+
const nextConfig = {
3+
webpack: (config, { isServer }) => {
4+
// Fixes npm packages that depend on `fs` module
5+
if (!isServer) {
6+
config.resolve.fallback = {
7+
...config.resolve.fallback, // if you miss it, all the other options in fallback, specified
8+
// by next.js will be dropped. Doesn't make much sense, but how it is
9+
fs: false, // the solution
10+
module: false,
11+
perf_hooks: false,
12+
};
13+
}
14+
15+
return config
16+
},
17+
typescript: {
18+
// !! WARN !!
19+
// Dangerously allow production builds to successfully complete even if
20+
// your project has type errors.
21+
// !! WARN !!
22+
ignoreBuildErrors: true,
23+
},
24+
};
25+
26+
327

428
export default nextConfig;

package-lock.json

Lines changed: 26 additions & 10 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/app/[id]/page.tsx

Lines changed: 74 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,13 @@
11
"use client";
22

33
import { ChatLayout } from "@/components/chat/chat-layout";
4+
import { getSelectedModel } from "@/lib/model-helper";
5+
import { ChatOllama } from "@langchain/community/chat_models/ollama";
6+
import { AIMessage, HumanMessage } from "@langchain/core/messages";
7+
import { BytesOutputParser } from "@langchain/core/output_parsers";
48
import { ChatRequestOptions } from "ai";
5-
import { useChat } from "ai/react";
6-
import React from "react";
9+
import { Message, useChat } from "ai/react";
10+
import React, { useEffect } from "react";
711
import { v4 as uuidv4 } from "uuid";
812

913
export default function Page({ params }: { params: { id: string } }) {
@@ -16,9 +20,24 @@ export default function Page({ params }: { params: { id: string } }) {
1620
error,
1721
stop,
1822
setMessages,
23+
setInput
1924
} = useChat();
2025
const [chatId, setChatId] = React.useState<string>("");
21-
const [selectedModel, setSelectedModel] = React.useState<string>("mistral");
26+
const [selectedModel, setSelectedModel] = React.useState<string>(
27+
getSelectedModel()
28+
);
29+
const [ollama, setOllama] = React.useState<ChatOllama>();
30+
const env = process.env.NODE_ENV;
31+
32+
useEffect(() => {
33+
if (env === "production") {
34+
const newOllama = new ChatOllama({
35+
baseUrl: process.env.OLLAMA_URL || "http://localhost:11434",
36+
model: selectedModel,
37+
});
38+
setOllama(newOllama);
39+
}
40+
}, [selectedModel]);
2241

2342
React.useEffect(() => {
2443
if (params.id) {
@@ -29,6 +48,51 @@ export default function Page({ params }: { params: { id: string } }) {
2948
}
3049
}, [setMessages]);
3150

51+
const addMessage = (Message: any) => {
52+
console.log("addMessage:", Message);
53+
messages.push(Message);
54+
window.dispatchEvent(new Event("storage"));
55+
setMessages([...messages]);
56+
};
57+
58+
59+
// Function to handle chatting with Ollama in production (client side)
60+
const handleSubmitProduction = async (
61+
e: React.FormEvent<HTMLFormElement>
62+
) => {
63+
e.preventDefault();
64+
65+
addMessage({ role: "user", content: input, id: chatId });
66+
setInput("");
67+
68+
if (ollama) {
69+
const parser = new BytesOutputParser();
70+
71+
console.log(messages);
72+
const stream = await ollama
73+
.pipe(parser)
74+
.stream(
75+
(messages as Message[]).map((m) =>
76+
m.role == "user"
77+
? new HumanMessage(m.content)
78+
: new AIMessage(m.content)
79+
)
80+
);
81+
82+
const decoder = new TextDecoder();
83+
84+
let responseMessage = "";
85+
for await (const chunk of stream) {
86+
const decodedChunk = decoder.decode(chunk);
87+
responseMessage += decodedChunk;
88+
}
89+
setMessages([
90+
...messages,
91+
{ role: "assistant", content: responseMessage, id: chatId },
92+
]);
93+
}
94+
};
95+
3296
const onSubmit = (e: React.FormEvent<HTMLFormElement>) => {
3397
e.preventDefault();
3498

@@ -43,8 +107,13 @@ export default function Page({ params }: { params: { id: string } }) {
43107
},
44108
};
45109

46-
// Call the handleSubmit function with the options
47-
handleSubmit(e, requestOptions);
110+
if (env === "production" && selectedModel !== "REST API") {
111+
handleSubmitProduction(e);
112+
} else {
113+
// use the /api/chat route
114+
// Call the handleSubmit function with the options
115+
handleSubmit(e, requestOptions);
116+
}
48117
};
49118

50119
// When starting a new chat, append the messages to the local storage

src/app/api/chat/route.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@ import { ChatOllama } from "@langchain/community/chat_models/ollama";
33
import { AIMessage, HumanMessage } from "@langchain/core/messages";
44
import { BytesOutputParser } from "@langchain/core/output_parsers";
55

6+
export const runtime = "edge";
7+
export const dynamic = "force-dynamic";
68

79
export async function POST(req: Request) {
810
const { messages, selectedModel } = await req.json();

src/app/api/tags/route.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
1+
export const dynamic = "force-dynamic";
2+
13
export async function GET(req: Request) {
4+
const OLLAMA_URL = process.env.OLLAMA_URL || "http://localhost:11434";
25
const res = await fetch(
3-
process.env.OLLAMA_URL + "/api/tags"
6+
OLLAMA_URL + "/api/tags"
47
);
58
return new Response(res.body, res);
69
}

0 commit comments

Comments
 (0)