22An example demonstrating agentic generative UI using LangGraph.
33"""
44
5- import json
65import asyncio
7- from typing import Dict , List , Any , Optional , Literal
8- # LangGraph imports
6+ from typing import List , Any , Optional , Annotated
97from langchain_core .runnables import RunnableConfig
8+ from langchain_core .callbacks .manager import adispatch_custom_event
9+ from langchain_core .messages import SystemMessage
10+ from langchain_core .tools import tool
11+ from langchain_openai import ChatOpenAI
1012from langgraph .graph import StateGraph , END , START
1113from langgraph .types import Command
12- from langchain_core .callbacks .manager import adispatch_custom_event
1314from langgraph .graph import MessagesState
15+ from pydantic import BaseModel , Field
16+
17+ class Step (BaseModel ):
18+ """
19+ A step in a task.
20+ """
21+ description : str = Field (description = "The text of the step in gerund form" )
22+ status : str = Field (description = "The status of the step, always 'pending'" )
23+
1424
15- # OpenAI imports
16- from langchain_openai import ChatOpenAI
17- from langchain_core .messages import SystemMessage
1825
1926# This tool simulates performing a task on the server.
2027# The tool call will be streamed to the frontend as it is being generated.
21- PERFORM_TASK_TOOL = {
22- "type" : "function" ,
23- "function" : {
24- "name" : "generate_task_steps_generative_ui" ,
25- "description" : "Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in gerund form (i.e. Digging hole, opening door, ...)" ,
26- "parameters" : {
27- "type" : "object" ,
28- "properties" : {
29- "steps" : {
30- "type" : "array" ,
31- "items" : {
32- "type" : "object" ,
33- "properties" : {
34- "description" : {
35- "type" : "string" ,
36- "description" : "The text of the step in gerund form"
37- },
38- "status" : {
39- "type" : "string" ,
40- "enum" : ["pending" ],
41- "description" : "The status of the step, always 'pending'"
42- }
43- },
44- "required" : ["description" , "status" ]
45- },
46- "description" : "An array of 10 step objects, each containing text and status"
47- }
48- },
49- "required" : ["steps" ]
50- }
51- }
52- }
28+ @tool
29+ def generate_task_steps_generative_ui (
30+ steps : Annotated [ # pylint: disable=unused-argument
31+ List [Step ],
32+ "An array of 10 step objects, each containing text and status"
33+ ]
34+ ):
35+ """
36+ Make up 10 steps (only a couple of words per step) that are required for a task.
37+ The step should be in gerund form (i.e. Digging hole, opening door, ...).
38+ """
5339
5440
5541class AgentState (MessagesState ):
42+ """
43+ State of the agent.
44+ """
5645 steps : List [dict ] = []
5746 tools : List [Any ]
5847
5948
60- async def start_flow (state : AgentState , config : RunnableConfig ):
49+ async def start_node (state : AgentState , config : RunnableConfig ): # pylint: disable=unused-argument
6150 """
6251 This is the entry point for the flow.
6352 """
@@ -74,7 +63,7 @@ async def start_flow(state: AgentState, config: RunnableConfig):
7463 )
7564
7665
77- async def chat_node (state : AgentState , config : RunnableConfig ):
66+ async def chat_node (state : AgentState , config : Optional [ RunnableConfig ] = None ):
7867 """
7968 Standard chat node.
8069 """
@@ -89,7 +78,7 @@ async def chat_node(state: AgentState, config: RunnableConfig):
8978
9079 # Define the model
9180 model = ChatOpenAI (model = "gpt-4o" )
92-
81+
9382 # Define config for the model with emit_intermediate_state to stream tool calls to frontend
9483 if config is None :
9584 config = RunnableConfig (recursion_limit = 25 )
@@ -105,7 +94,7 @@ async def chat_node(state: AgentState, config: RunnableConfig):
10594 model_with_tools = model .bind_tools (
10695 [
10796 * state ["tools" ],
108- PERFORM_TASK_TOOL
97+ generate_task_steps_generative_ui
10998 ],
11099 # Disable parallel tool calls to avoid race conditions
111100 parallel_tool_calls = False ,
@@ -121,46 +110,41 @@ async def chat_node(state: AgentState, config: RunnableConfig):
121110
122111 # Extract any tool calls from the response
123112 if hasattr (response , "tool_calls" ) and response .tool_calls and len (response .tool_calls ) > 0 :
124- tool_call = response .tool_calls [0 ]
125-
126- # Handle tool_call as a dictionary rather than an object
127- if isinstance (tool_call , dict ):
128- tool_call_id = tool_call ["id" ]
129- tool_call_name = tool_call ["name" ]
130- tool_call_args = tool_call ["args" ]
131- else :
132- # Handle as an object (backward compatibility)
133- tool_call_id = tool_call .id
134- tool_call_name = tool_call .name
135- tool_call_args = tool_call .args
136-
137- if tool_call_name == "generate_task_steps_generative_ui" :
138- steps = [{"description" : step ["description" ], "status" : step ["status" ]} for step in tool_call_args ["steps" ]]
139-
113+ # Handle dicts or object (backward compatibility)
114+ tool_call = (response .tool_calls [0 ]
115+ if isinstance (response .tool_calls [0 ], dict )
116+ else vars (response .tool_calls [0 ]))
117+
118+ if tool_call ["name" ] == "generate_task_steps_generative_ui" :
119+ steps = [
120+ {"description" : step ["description" ], "status" : step ["status" ]}
121+ for step in tool_call ["args" ]["steps" ]
122+ ]
123+
140124 # Add the tool response to messages
141125 tool_response = {
142126 "role" : "tool" ,
143127 "content" : "Steps executed." ,
144- "tool_call_id" : tool_call_id
128+ "tool_call_id" : tool_call [ "id" ]
145129 }
146130
147131 messages = messages + [tool_response ]
132+ state ["steps" ] = steps
148133
149134 # Return Command to route to simulate_task_node
150- for i , step in enumerate (steps ):
135+ for i , _ in enumerate (steps ):
151136 # simulate executing the step
152137 await asyncio .sleep (1 )
153138 steps [i ]["status" ] = "completed"
154- # Update the state with the completed step - using config as first parameter
155- state ["steps" ] = steps
139+ # Update the state with the completed step using config
156140 await adispatch_custom_event (
157141 "manually_emit_state" ,
158142 state ,
159143 config = config ,
160144 )
161-
145+
162146 return Command (
163- goto = 'start_flow ' ,
147+ goto = 'start_node ' ,
164148 update = {
165149 "messages" : messages ,
166150 "steps" : state ["steps" ]
@@ -180,13 +164,13 @@ async def chat_node(state: AgentState, config: RunnableConfig):
180164workflow = StateGraph (AgentState )
181165
182166# Add nodes
183- workflow .add_node ("start_flow " , start_flow )
167+ workflow .add_node ("start_node " , start_node )
184168workflow .add_node ("chat_node" , chat_node )
185169
186- # Add edges (equivalent to the routing in CrewAI)
187- workflow .set_entry_point ("start_flow " )
188- workflow .add_edge (START , "start_flow " )
189- workflow .add_edge ("start_flow " , "chat_node" )
170+ # Add edges
171+ workflow .set_entry_point ("start_node " )
172+ workflow .add_edge (START , "start_node " )
173+ workflow .add_edge ("start_node " , "chat_node" )
190174workflow .add_edge ("chat_node" , END )
191175
192176# Compile the graph
0 commit comments