7
7
from typing import Dict , List , Literal , cast
8
8
9
9
from langchain_core .messages import AIMessage
10
- from langchain_core .prompts import ChatPromptTemplate
11
10
from langchain_core .runnables import RunnableConfig
12
11
from langgraph .graph import StateGraph
13
12
from langgraph .prebuilt import ToolNode
@@ -36,25 +35,21 @@ async def call_model(
36
35
"""
37
36
configuration = Configuration .from_runnable_config (config )
38
37
39
- # Create a prompt template. Customize this to change the agent's behavior.
40
- prompt = ChatPromptTemplate .from_messages (
41
- [("system" , configuration .system_prompt ), ("placeholder" , "{messages}" )]
42
- )
43
-
44
38
# Initialize the model with tool binding. Change the model or add more tools here.
45
39
model = load_chat_model (configuration .model ).bind_tools (TOOLS )
46
40
47
- # Prepare the input for the model, including the current system time
48
- message_value = await prompt .ainvoke (
49
- {
50
- "messages" : state .messages ,
51
- "system_time" : datetime .now (tz = timezone .utc ).isoformat (),
52
- },
53
- config ,
41
+ # Format the system prompt. Customize this to change the agent's behavior.
42
+ system_message = configuration .system_prompt .format (
43
+ system_time = datetime .now (tz = timezone .utc ).isoformat ()
54
44
)
55
45
56
46
# Get the model's response
57
- response = cast (AIMessage , await model .ainvoke (message_value , config ))
47
+ response = cast (
48
+ AIMessage ,
49
+ await model .ainvoke (
50
+ [{"role" : "system" , "content" : system_message }, * state .messages ], config
51
+ ),
52
+ )
58
53
59
54
# Handle the case when it's the last step and the model still wants to use a tool
60
55
if state .is_last_step and response .tool_calls :
@@ -73,15 +68,15 @@ async def call_model(
73
68
74
69
# Define a new graph
75
70
76
- workflow = StateGraph (State , input = InputState , config_schema = Configuration )
71
+ builder = StateGraph (State , input = InputState , config_schema = Configuration )
77
72
78
73
# Define the two nodes we will cycle between
79
- workflow .add_node (call_model )
80
- workflow .add_node ("tools" , ToolNode (TOOLS ))
74
+ builder .add_node (call_model )
75
+ builder .add_node ("tools" , ToolNode (TOOLS ))
81
76
82
77
# Set the entrypoint as `call_model`
83
78
# This means that this node is the first one called
84
- workflow .add_edge ("__start__" , "call_model" )
79
+ builder .add_edge ("__start__" , "call_model" )
85
80
86
81
87
82
def route_model_output (state : State ) -> Literal ["__end__" , "tools" ]:
@@ -108,7 +103,7 @@ def route_model_output(state: State) -> Literal["__end__", "tools"]:
108
103
109
104
110
105
# Add a conditional edge to determine the next step after `call_model`
111
- workflow .add_conditional_edges (
106
+ builder .add_conditional_edges (
112
107
"call_model" ,
113
108
# After call_model finishes running, the next node(s) are scheduled
114
109
# based on the output from route_model_output
@@ -117,11 +112,11 @@ def route_model_output(state: State) -> Literal["__end__", "tools"]:
117
112
118
113
# Add a normal edge from `tools` to `call_model`
119
114
# This creates a cycle: after using tools, we always return to the model
120
- workflow .add_edge ("tools" , "call_model" )
115
+ builder .add_edge ("tools" , "call_model" )
121
116
122
- # Compile the workflow into an executable graph
117
+ # Compile the builder into an executable graph
123
118
# You can customize this by adding interrupt points for state updates
124
- graph = workflow .compile (
119
+ graph = builder .compile (
125
120
interrupt_before = [], # Add node names here to update state before they're called
126
121
interrupt_after = [], # Add node names here to update state after they're called
127
122
)
0 commit comments