11import json
22
3- from langchain_core .messages import AIMessage , ToolMessage
4- from langgraph .graph import END , START , StateGraph
3+ from langchain_core .messages import ToolMessage
4+ from langgraph .graph import END , StateGraph
55
6- from template_langgraph .agents .basic_workflow_agent .models import AgentInput , AgentOutput , AgentState , Profile
6+ from template_langgraph .agents .basic_workflow_agent .models import AgentState
77from template_langgraph .llms .azure_openais import AzureOpenAiWrapper
88from template_langgraph .loggers import get_logger
99from template_langgraph .tools .elasticsearch_tool import search_elasticsearch
@@ -47,8 +47,6 @@ def create_graph(self):
4747
4848 # Create nodes
4949 workflow .add_node ("initialize" , self .initialize )
50- workflow .add_node ("do_something" , self .do_something )
51- workflow .add_node ("extract_profile" , self .extract_profile )
5250 workflow .add_node ("chat_with_tools" , self .chat_with_tools )
5351 workflow .add_node (
5452 "tools" ,
@@ -62,11 +60,8 @@ def create_graph(self):
6260 workflow .add_node ("finalize" , self .finalize )
6361
6462 # Create edges
65- # workflow.add_edge(START, "initialize")
66- workflow .add_edge (START , "chat_with_tools" )
67- workflow .add_edge ("initialize" , "do_something" )
68- workflow .add_edge ("do_something" , "extract_profile" )
69- workflow .add_edge ("extract_profile" , "chat_with_tools" )
63+ workflow .set_entry_point ("initialize" )
64+ workflow .add_edge ("initialize" , "chat_with_tools" )
7065 workflow .add_conditional_edges (
7166 "chat_with_tools" ,
7267 self .route_tools ,
@@ -89,33 +84,6 @@ def initialize(self, state: AgentState) -> AgentState:
8984 # Here you can add any initialization logic if needed
9085 return state
9186
92- def do_something (self , state : AgentState ) -> AgentState :
93- """Perform some action with the given state."""
94- logger .info (f"Doing something with state: { state } " )
95-
96- # Here you can add the logic for the action
97- response : AIMessage = self .llm .invoke (
98- input = state ["messages" ],
99- )
100- logger .info (f"Response after doing something: { response } " )
101- state ["messages" ].append (
102- {
103- "role" : "assistant" ,
104- "content" : response .content ,
105- }
106- )
107-
108- return state
109-
110- def extract_profile (self , state : AgentState ) -> AgentState :
111- """Extract profile information from the state."""
112- logger .info (f"Extracting profile from state: { state } " )
113- profile = self .llm .with_structured_output (Profile ).invoke (
114- input = state ["messages" ],
115- )
116- state ["profile" ] = profile
117- return state
118-
11987 def chat_with_tools (self , state : AgentState ) -> AgentState :
12088 """Chat with tools using the state."""
12189 logger .info (f"Chatting with tools using state: { state } " )
@@ -155,25 +123,6 @@ def finalize(self, state: AgentState) -> AgentState:
155123 # Here you can add any finalization logic if needed
156124 return state
157125
158- def run_agent (self , input : AgentInput ) -> AgentOutput :
159- """Run the agent with the given input."""
160- logger .info (f"Running BasicWorkflowAgent with question: { input .model_dump_json (indent = 2 )} " )
161- app = self .create_graph ()
162- initial_state : AgentState = {
163- "messages" : [
164- {
165- "role" : "user" ,
166- "content" : input .request ,
167- }
168- ],
169- }
170- final_state = app .invoke (initial_state )
171- logger .info (f"Final state after running agent: { final_state } " )
172- return AgentOutput (
173- response = final_state ["messages" ][- 1 ].content ,
174- profile = final_state ["profile" ],
175- )
176-
177126 def draw_mermaid_png (self ) -> bytes :
178127 """Draw the graph in Mermaid format."""
179128 return self .create_graph ().get_graph ().draw_mermaid_png ()
0 commit comments