@@ -4,15 +4,15 @@ import { ChatOpenAI } from "@langchain/openai";
4
4
import { HumanMessage , AIMessage } from "@langchain/core/messages" ;
5
5
import { ToolNode } from "@langchain/langgraph/prebuilt" ;
6
6
import { StateGraph , MessagesAnnotation } from "@langchain/langgraph" ;
7
- import { get } from "http" ;
8
7
9
8
// Define the tools for the agent to use
10
9
const tools = [ new TavilySearch ( { maxResults : 3 } ) ] ;
11
10
const toolNode = new ToolNode ( tools ) ;
12
11
13
12
const baseUrl = process . env . LLM_URL || "https://api.openai.com/v1/" ;
13
+ console . log ( "Using LLM base URL:" , baseUrl ) ;
14
14
const baseModel = process . env . LLM_MODEL || "gpt-4o-mini" ;
15
-
15
+ console . log ( "Using LLM model:" , baseModel ) ;
16
16
// Create a model and give it access to the tools
17
17
const model = new ChatOpenAI ( {
18
18
model : baseModel ,
@@ -56,9 +56,13 @@ const app = workflow.compile();
56
56
57
57
// Helper function to get agent output for a given input and optional previous messages
58
58
const getAgentOutput = async ( input : string , previousMessages : ( HumanMessage | AIMessage ) [ ] = [ ] ) => {
59
+ console . log ( "Getting agent output for input:" , input ) ;
60
+
59
61
const initialState = {
60
62
messages : [ ...previousMessages , new HumanMessage ( input ) ] ,
61
63
} ;
64
+ console . log ( "Initial state messages (JSON):" , JSON . stringify ( initialState . messages , null , 2 ) ) ;
65
+
62
66
const finalState = await app . invoke ( initialState ) ;
63
67
return {
64
68
content : finalState . messages [ finalState . messages . length - 1 ] . content ,
0 commit comments