|
| 1 | + |
| 2 | +import { TavilySearch } from "@langchain/tavily"; |
| 3 | +import { ChatOpenAI } from "@langchain/openai"; |
| 4 | +import { HumanMessage, AIMessage } from "@langchain/core/messages"; |
| 5 | +import { ToolNode } from "@langchain/langgraph/prebuilt"; |
| 6 | +import { StateGraph, MessagesAnnotation } from "@langchain/langgraph"; |
| 7 | +import { get } from "http"; |
| 8 | + |
| 9 | +// Define the tools for the agent to use |
| 10 | +const tools = [new TavilySearch({ maxResults: 3 })]; |
| 11 | +const toolNode = new ToolNode(tools); |
| 12 | + |
| 13 | +const baseUrl = process.env.LLM_URL || "https://api.openai.com/v1/"; |
| 14 | +const baseModel = process.env.LLM_MODEL || "gpt-4o-mini"; |
| 15 | + |
| 16 | +// Create a model and give it access to the tools |
| 17 | +const model = new ChatOpenAI({ |
| 18 | + model: baseModel, |
| 19 | + temperature: 0.7, |
| 20 | + configuration: { |
| 21 | + baseURL: baseUrl, |
| 22 | + }, |
| 23 | +}).bindTools(tools); |
| 24 | + |
| 25 | +// Define the function that determines whether to continue or not |
| 26 | +function shouldContinue({ messages }: typeof MessagesAnnotation.State) { |
| 27 | + const lastMessage = messages[messages.length - 1] as AIMessage; |
| 28 | + |
| 29 | + // If the LLM makes a tool call, then we route to the "tools" node |
| 30 | + if (lastMessage.tool_calls?.length) { |
| 31 | + return "tools"; |
| 32 | + } |
| 33 | + // Otherwise, we stop (reply to the user) using the special "__end__" node |
| 34 | + return "__end__"; |
| 35 | +} |
| 36 | + |
| 37 | +// Define the function that calls the model |
| 38 | +async function callModel(state: typeof MessagesAnnotation.State) { |
| 39 | + const response = await model.invoke(state.messages); |
| 40 | + // console.log("Model response:", response); |
| 41 | + |
| 42 | + // We return a list, because this will get added to the existing list |
| 43 | + return { messages: [response] }; |
| 44 | +} |
| 45 | + |
| 46 | +// Define a new graph |
| 47 | +const workflow = new StateGraph(MessagesAnnotation) |
| 48 | + .addNode("agent", callModel) |
| 49 | + .addEdge("__start__", "agent") // __start__ is a special name for the entrypoint |
| 50 | + .addNode("tools", toolNode) |
| 51 | + .addEdge("tools", "agent") |
| 52 | + .addConditionalEdges("agent", shouldContinue); |
| 53 | + |
| 54 | +// Finally, we compile it into a LangChain Runnable. |
| 55 | +const app = workflow.compile(); |
| 56 | + |
| 57 | +// Helper function to get agent output for a given input and optional previous messages |
| 58 | +const getAgentOutput = async (input: string, previousMessages: (HumanMessage | AIMessage)[] = []) => { |
| 59 | + const initialState = { |
| 60 | + messages: [...previousMessages, new HumanMessage(input)], |
| 61 | + }; |
| 62 | + const finalState = await app.invoke(initialState); |
| 63 | + return { |
| 64 | + content: finalState.messages[finalState.messages.length - 1].content, |
| 65 | + messages: finalState.messages, |
| 66 | + }; |
| 67 | +}; |
| 68 | + |
| 69 | +// Helper function to get agent output as a readablestring |
| 70 | +export const getAgentOutputAsString = async (input: string, previousMessages: (HumanMessage | AIMessage)[] = []) => { |
| 71 | + return getAgentOutput(input, previousMessages).then(result => result.content); |
| 72 | +}; |
| 73 | + |
| 74 | +// // Example usage: |
| 75 | +// (async () => { |
| 76 | +// // First query |
| 77 | +// const firstResult = await getAgentOutput("what is the weather in sf"); |
| 78 | +// console.log(firstResult.content); |
| 79 | + |
| 80 | +// // Follow-up query with context |
| 81 | +// const secondResult = await getAgentOutput("what about ny", firstResult.messages); |
| 82 | +// console.log(secondResult.content); |
| 83 | +// })(); |
0 commit comments