|
| 1 | + |
| 2 | +import { TavilySearch } from "@langchain/tavily"; |
| 3 | +import { ChatOpenAI } from "@langchain/openai"; |
| 4 | +import { HumanMessage, AIMessage } from "@langchain/core/messages"; |
| 5 | +import { ToolNode } from "@langchain/langgraph/prebuilt"; |
| 6 | +import { StateGraph, MessagesAnnotation } from "@langchain/langgraph"; |
| 7 | + |
| 8 | +// Define the tools for the agent to use |
| 9 | +const tools = [new TavilySearch({ maxResults: 3 })]; |
| 10 | +const toolNode = new ToolNode(tools); |
| 11 | + |
| 12 | +const baseUrl = process.env.LLM_URL || "https://api.openai.com/v1/"; |
| 13 | +console.log("Using LLM base URL:", baseUrl); |
| 14 | +const baseModel = process.env.LLM_MODEL || "gpt-4o-mini"; |
| 15 | +console.log("Using LLM model:", baseModel); |
| 16 | +// Create a model and give it access to the tools |
| 17 | +const model = new ChatOpenAI({ |
| 18 | + model: baseModel, |
| 19 | + temperature: 0.7, |
| 20 | + configuration: { |
| 21 | + baseURL: baseUrl, |
| 22 | + }, |
| 23 | +}).bindTools(tools); |
| 24 | + |
| 25 | +// Define the function that determines whether to continue or not |
| 26 | +function shouldContinue({ messages }: typeof MessagesAnnotation.State) { |
| 27 | + const lastMessage = messages[messages.length - 1] as AIMessage; |
| 28 | + |
| 29 | + // If the LLM makes a tool call, then we route to the "tools" node |
| 30 | + if (lastMessage.tool_calls?.length) { |
| 31 | + return "tools"; |
| 32 | + } |
| 33 | + // Otherwise, we stop (reply to the user) using the special "__end__" node |
| 34 | + return "__end__"; |
| 35 | +} |
| 36 | + |
| 37 | +// Define the function that calls the model |
| 38 | +async function callModel(state: typeof MessagesAnnotation.State) { |
| 39 | + const response = await model.invoke(state.messages); |
| 40 | + |
| 41 | + // We return a list, because this will get added to the existing list |
| 42 | + return { messages: [response] }; |
| 43 | +} |
| 44 | + |
| 45 | +// Define a new graph |
| 46 | +const workflow = new StateGraph(MessagesAnnotation) |
| 47 | + .addNode("agent", callModel) |
| 48 | + .addEdge("__start__", "agent") // __start__ is a special name for the entrypoint |
| 49 | + .addNode("tools", toolNode) |
| 50 | + .addEdge("tools", "agent") |
| 51 | + .addConditionalEdges("agent", shouldContinue); |
| 52 | + |
| 53 | +// Finally, we compile it into a LangChain Runnable. |
| 54 | +const app = workflow.compile(); |
| 55 | + |
| 56 | +// Helper function to get agent output for a given input and optional previous messages |
| 57 | +const getAgentOutput = async (input: string, previousMessages: (HumanMessage | AIMessage)[] = []) => { |
| 58 | + |
| 59 | + const initialState = { |
| 60 | + messages: [...previousMessages, new HumanMessage(input)], |
| 61 | + }; |
| 62 | + |
| 63 | + const finalState = await app.invoke(initialState); |
| 64 | + return { |
| 65 | + content: finalState.messages[finalState.messages.length - 1].content, |
| 66 | + messages: finalState.messages, |
| 67 | + }; |
| 68 | +}; |
| 69 | + |
| 70 | +// Helper function to get agent output as a readablestring |
| 71 | +export const getAgentOutputAsString = async (input: string, previousMessages: (HumanMessage | AIMessage)[] = []) => { |
| 72 | + return getAgentOutput(input, previousMessages).then(result => result.content); |
| 73 | +}; |
| 74 | + |
0 commit comments