|
570 | 570 | },
|
571 | 571 | {
|
572 | 572 | "name": "agent.py",
|
573 |
| - "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\n\ngraph = workflow.compile()", |
| 573 | + "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", |
574 | 574 | "language": "python",
|
575 | 575 | "type": "file"
|
576 | 576 | },
|
|
759 | 759 | },
|
760 | 760 | {
|
761 | 761 | "name": "agent.py",
|
762 |
| - "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\n\ngraph = workflow.compile()", |
| 762 | + "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", |
763 | 763 | "language": "python",
|
764 | 764 | "type": "file"
|
765 | 765 | }
|
|
971 | 971 | },
|
972 | 972 | {
|
973 | 973 | "name": "agent.py",
|
974 |
| - "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\n\ngraph = workflow.compile()", |
| 974 | + "content": "\"\"\"\nAn example demonstrating tool-based generative UI using LangGraph.\n\"\"\"\n\nimport os\nfrom typing import Any, List\nfrom typing_extensions import Literal\nfrom langchain_openai import ChatOpenAI\nfrom langchain_core.messages import SystemMessage\nfrom langchain_core.runnables import RunnableConfig\nfrom langgraph.graph import StateGraph, END\nfrom langgraph.types import Command\nfrom langgraph.graph import MessagesState\nfrom langgraph.prebuilt import ToolNode\n\n\nclass AgentState(MessagesState):\n \"\"\"\n State of the agent.\n \"\"\"\n tools: List[Any]\n\nasync def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal[\"tool_node\", \"__end__\"]]:\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n model = ChatOpenAI(model=\"gpt-4o\")\n\n model_with_tools = model.bind_tools(\n [\n *state.get(\"tools\", []), # bind tools defined by ag-ui\n ],\n parallel_tool_calls=False,\n )\n\n system_message = SystemMessage(\n content=f\"Help the user with writing Haikus. If the user asks for a haiku, use the generate_haiku tool to display the haiku to the user.\"\n )\n\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n return Command(\n goto=END,\n update={\n \"messages\": [response],\n }\n )\n\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\n# This is required even though we don't have any backend tools to pass in.\nworkflow.add_node(\"tool_node\", ToolNode(tools=[]))\nworkflow.set_entry_point(\"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()\n", |
975 | 975 | "language": "python",
|
976 | 976 | "type": "file"
|
977 | 977 | },
|
|
0 commit comments