Skip to content

Commit f9d3b33

Browse files
committed
fix checkpointer missing in fastapi langgraph toolbasedgenui
1 parent 215bebe commit f9d3b33

File tree

2 files changed

+19
-4
lines changed
  • typescript-sdk
    • apps/dojo/src
    • integrations/langgraph/examples/python/agents/tool_based_generative_ui

2 files changed

+19
-4
lines changed

typescript-sdk/apps/dojo/src/files.json

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -570,7 +570,7 @@
570570
},
571571
{
572572
"name": "agent.py",
573-
"content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\n\ngraph = workflow.compile()",
573+
"content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n",
574574
"language": "python",
575575
"type": "file"
576576
},
@@ -759,7 +759,7 @@
759759
},
760760
{
761761
"name": "agent.py",
762-
"content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\n\ngraph = workflow.compile()",
762+
"content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n",
763763
"language": "python",
764764
"type": "file"
765765
}
@@ -971,7 +971,7 @@
971971
},
972972
{
973973
"name": "agent.py",
974-
"content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\n\ngraph = workflow.compile()",
974+
"content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n",
975975
"language": "python",
976976
"type": "file"
977977
},

typescript-sdk/integrations/langgraph/examples/python/agents/tool_based_generative_ui/agent.py

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
An example demonstrating tool-based generative UI using LangGraph.
33
"""
44

5+
import os
56
from typing import Any, List
67
from typing_extensions import Literal
78
from langchain_openai import ChatOpenAI
@@ -61,5 +62,19 @@ async def chat_node(state: AgentState, config: RunnableConfig) -> Command[Litera
6162
# This is required even though we don't have any backend tools to pass in.
6263
workflow.add_node("tool_node", ToolNode(tools=[]))
6364
workflow.set_entry_point("chat_node")
65+
workflow.add_edge("chat_node", END)
6466

65-
graph = workflow.compile()
67+
68+
# Conditionally use a checkpointer based on the environment
69+
# Check for multiple indicators that we're running in LangGraph dev/API mode
70+
is_fast_api = os.environ.get("LANGGRAPH_FAST_API", "false").lower() == "true"
71+
72+
# Compile the graph
73+
if is_fast_api:
74+
# For CopilotKit and other contexts, use MemorySaver
75+
from langgraph.checkpoint.memory import MemorySaver
76+
memory = MemorySaver()
77+
graph = workflow.compile(checkpointer=memory)
78+
else:
79+
# When running in LangGraph API/dev, don't use a custom checkpointer
80+
graph = workflow.compile()

0 commit comments

Comments
 (0)