Skip to content

Commit ebbd955

Browse files
committed
feat(langgraph-py): add example setup
1 parent 5ebde92 commit ebbd955

File tree

22 files changed

+9996
-0
lines changed

22 files changed

+9996
-0
lines changed
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
from .agent.agent import LangGraphAgent
2+
from .agent.types import (
3+
LangGraphEventTypes,
4+
CustomEventNames,
5+
State,
6+
SchemaKeys,
7+
MessageInProgress,
8+
RunMetadata,
9+
MessagesInProgressRecord,
10+
ToolCall,
11+
BaseLangGraphPlatformMessage,
12+
LangGraphPlatformResultMessage,
13+
LangGraphPlatformActionExecutionMessage,
14+
LangGraphPlatformMessage,
15+
PredictStateTool
16+
)
17+
18+
__all__ = [
19+
"LangGraphAgent",
20+
"LangGraphEventTypes",
21+
"CustomEventNames",
22+
"State",
23+
"SchemaKeys",
24+
"MessageInProgress",
25+
"RunMetadata",
26+
"MessagesInProgressRecord",
27+
"ToolCall",
28+
"BaseLangGraphPlatformMessage",
29+
"LangGraphPlatformResultMessage",
30+
"LangGraphPlatformActionExecutionMessage",
31+
"LangGraphPlatformMessage",
32+
"PredictStateTool"
33+
]
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
OPENAI_API_KEY=
2+
LANGSMITH_API_KEY=
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
# LangGraph examples
2+
3+
## How to run
4+
5+
First, make sure to create a new .env file from the .env.example and include the required keys.
6+
7+
For python, run:
8+
`pnpx @langchain/langgraph-cli@latest dev`
File renamed without changes.
Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
"""
2+
A simple agentic chat flow using LangGraph instead of CrewAI.
3+
"""
4+
5+
from typing import Dict, List, Any, Optional
6+
7+
# Updated imports for LangGraph
8+
from langchain_core.runnables import RunnableConfig
9+
from langgraph.graph import StateGraph, END, START
10+
from langgraph.graph import MessagesState
11+
from langgraph.types import Command
12+
from typing_extensions import Literal
13+
from langchain_openai import ChatOpenAI
14+
from langchain_core.messages import SystemMessage
15+
from langgraph.checkpoint.memory import MemorySaver
16+
17+
class AgentState(MessagesState):
18+
tools: List[Any]
19+
20+
async def chat_node(state: AgentState, config: RunnableConfig):
21+
"""
22+
Standard chat node based on the ReAct design pattern. It handles:
23+
- The model to use (and binds in CopilotKit actions and the tools defined above)
24+
- The system prompt
25+
- Getting a response from the model
26+
- Handling tool calls
27+
28+
For more about the ReAct design pattern, see:
29+
https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg
30+
"""
31+
32+
# 1. Define the model
33+
model = ChatOpenAI(model="gpt-4o")
34+
35+
# Define config for the model
36+
if config is None:
37+
config = RunnableConfig(recursion_limit=25)
38+
39+
# 2. Bind the tools to the model
40+
model_with_tools = model.bind_tools(
41+
[
42+
*state["tools"],
43+
# your_tool_here
44+
],
45+
46+
# 2.1 Disable parallel tool calls to avoid race conditions,
47+
# enable this for faster performance if you want to manage
48+
# the complexity of running tool calls in parallel.
49+
parallel_tool_calls=False,
50+
)
51+
52+
# 3. Define the system message by which the chat model will be run
53+
system_message = SystemMessage(
54+
content=f"You are a helpful assistant. ."
55+
)
56+
57+
# 4. Run the model to generate a response
58+
response = await model_with_tools.ainvoke([
59+
system_message,
60+
*state["messages"],
61+
], config)
62+
63+
# 6. We've handled all tool calls, so we can end the graph.
64+
return Command(
65+
goto=END,
66+
update={
67+
"messages": response
68+
}
69+
)
70+
71+
# Define a new graph
72+
workflow = StateGraph(AgentState)
73+
workflow.add_node("chat_node", chat_node)
74+
workflow.set_entry_point("chat_node")
75+
76+
# Add explicit edges, matching the pattern in other examples
77+
workflow.add_edge(START, "chat_node")
78+
workflow.add_edge("chat_node", END)
79+
80+
# Compile the graph
81+
agentic_chat_graph = workflow.compile(checkpointer=MemorySaver())

python-sdk/ag_ui/integrations/langgraph/examples/agents/agentic_generative_ui/__init__.py

Whitespace-only changes.
Lines changed: 193 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,193 @@
1+
"""
2+
An example demonstrating agentic generative UI using LangGraph.
3+
"""
4+
5+
import json
6+
import asyncio
7+
from typing import Dict, List, Any, Optional, Literal
8+
# LangGraph imports
9+
from langchain_core.runnables import RunnableConfig
10+
from langgraph.graph import StateGraph, END, START
11+
from langgraph.types import Command
12+
from langchain_core.callbacks.manager import adispatch_custom_event
13+
from langgraph.graph import MessagesState
14+
from langgraph.checkpoint.memory import MemorySaver
15+
# OpenAI imports
16+
from langchain_openai import ChatOpenAI
17+
from langchain_core.messages import SystemMessage
18+
19+
# This tool simulates performing a task on the server.
20+
# The tool call will be streamed to the frontend as it is being generated.
21+
PERFORM_TASK_TOOL = {
22+
"type": "function",
23+
"function": {
24+
"name": "generate_task_steps_generative_ui",
25+
"description": "Make up 10 steps (only a couple of words per step) that are required for a task. The step should be in gerund form (i.e. Digging hole, opening door, ...)",
26+
"parameters": {
27+
"type": "object",
28+
"properties": {
29+
"steps": {
30+
"type": "array",
31+
"items": {
32+
"type": "object",
33+
"properties": {
34+
"description": {
35+
"type": "string",
36+
"description": "The text of the step in gerund form"
37+
},
38+
"status": {
39+
"type": "string",
40+
"enum": ["pending"],
41+
"description": "The status of the step, always 'pending'"
42+
}
43+
},
44+
"required": ["description", "status"]
45+
},
46+
"description": "An array of 10 step objects, each containing text and status"
47+
}
48+
},
49+
"required": ["steps"]
50+
}
51+
}
52+
}
53+
54+
55+
class AgentState(MessagesState):
56+
steps: List[dict] = []
57+
tools: List[Any]
58+
59+
60+
async def start_flow(state: AgentState, config: RunnableConfig):
61+
"""
62+
This is the entry point for the flow.
63+
"""
64+
65+
if "steps" not in state:
66+
state["steps"] = []
67+
68+
return Command(
69+
goto="chat_node",
70+
update={
71+
"messages": state["messages"],
72+
"steps": state["steps"]
73+
}
74+
)
75+
76+
77+
async def chat_node(state: AgentState, config: RunnableConfig):
78+
"""
79+
Standard chat node.
80+
"""
81+
system_prompt = """
82+
You are a helpful assistant assisting with any task.
83+
When asked to do something, you MUST call the function `generate_task_steps_generative_ui`
84+
that was provided to you.
85+
If you called the function, you MUST NOT repeat the steps in your next response to the user.
86+
Just give a very brief summary (one sentence) of what you did with some emojis.
87+
Always say you actually did the steps, not merely generated them.
88+
"""
89+
90+
# Define the model
91+
model = ChatOpenAI(model="gpt-4o")
92+
93+
# Define config for the model with emit_intermediate_state to stream tool calls to frontend
94+
if config is None:
95+
config = RunnableConfig(recursion_limit=25)
96+
97+
# Use "predict_state" metadata to set up streaming for the write_document tool
98+
config["metadata"]["predict_state"] = [{
99+
"state_key": "steps",
100+
"tool": "generate_task_steps_generative_ui",
101+
"tool_argument": "steps",
102+
}]
103+
104+
# Bind the tools to the model
105+
model_with_tools = model.bind_tools(
106+
[
107+
*state["tools"],
108+
PERFORM_TASK_TOOL
109+
],
110+
# Disable parallel tool calls to avoid race conditions
111+
parallel_tool_calls=False,
112+
)
113+
114+
# Run the model to generate a response
115+
response = await model_with_tools.ainvoke([
116+
SystemMessage(content=system_prompt),
117+
*state["messages"],
118+
], config)
119+
120+
messages = state["messages"] + [response]
121+
122+
# Extract any tool calls from the response
123+
if hasattr(response, "tool_calls") and response.tool_calls and len(response.tool_calls) > 0:
124+
tool_call = response.tool_calls[0]
125+
126+
# Handle tool_call as a dictionary rather than an object
127+
if isinstance(tool_call, dict):
128+
tool_call_id = tool_call["id"]
129+
tool_call_name = tool_call["name"]
130+
tool_call_args = tool_call["args"]
131+
else:
132+
# Handle as an object (backward compatibility)
133+
tool_call_id = tool_call.id
134+
tool_call_name = tool_call.name
135+
tool_call_args = tool_call.args
136+
137+
if tool_call_name == "generate_task_steps_generative_ui":
138+
steps = [{"description": step["description"], "status": step["status"]} for step in tool_call_args["steps"]]
139+
140+
# Add the tool response to messages
141+
tool_response = {
142+
"role": "tool",
143+
"content": "Steps executed.",
144+
"tool_call_id": tool_call_id
145+
}
146+
147+
messages = messages + [tool_response]
148+
149+
# Return Command to route to simulate_task_node
150+
for i, step in enumerate(steps):
151+
# simulate executing the step
152+
await asyncio.sleep(1)
153+
steps[i]["status"] = "completed"
154+
# Update the state with the completed step - using config as first parameter
155+
state["steps"] = steps
156+
await adispatch_custom_event(
157+
"manually_emit_state",
158+
state,
159+
config=config,
160+
)
161+
162+
return Command(
163+
goto='start_flow',
164+
update={
165+
"messages": messages,
166+
"steps": state["steps"]
167+
}
168+
)
169+
170+
return Command(
171+
goto=END,
172+
update={
173+
"messages": messages,
174+
"steps": state["steps"]
175+
}
176+
)
177+
178+
179+
# Define the graph
180+
workflow = StateGraph(AgentState)
181+
182+
# Add nodes
183+
workflow.add_node("start_flow", start_flow)
184+
workflow.add_node("chat_node", chat_node)
185+
186+
# Add edges (equivalent to the routing in CrewAI)
187+
workflow.set_entry_point("start_flow")
188+
workflow.add_edge(START, "start_flow")
189+
workflow.add_edge("start_flow", "chat_node")
190+
workflow.add_edge("chat_node", END)
191+
192+
# Compile the graph
193+
graph = workflow.compile(checkpointer=MemorySaver())

python-sdk/ag_ui/integrations/langgraph/examples/agents/human_in_the_loop/__init__.py

Whitespace-only changes.

0 commit comments

Comments
 (0)