Skip to content

Commit 9f80eaf

Browse files
committed
regenerate files.json
1 parent a32dc7f commit 9f80eaf

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

typescript-sdk/apps/dojo/src/files.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -758,7 +758,7 @@
758758
},
759759
{
760760
"name": "agent.py",
761-
"content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any]\n model: str\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n\n # 1. Define the model\n model = ChatOpenAI(model=\"o3\")\n if state[\"model\"] == \"Anthropic\":\n model = ChatAnthropic(\n model=\"claude-sonnet-4-20250514\",\n thinking={\"type\": \"enabled\", \"budget_tokens\": 2000}\n )\n elif state[\"model\"] == \"Gemini\":\n model = ChatGoogleGenerativeAI(model=\"gemini-2.5-pro\", thinking_budget=1024)\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()",
761+
"content": "\"\"\"\nA simple agentic chat flow using LangGraph instead of CrewAI.\n\"\"\"\n\nfrom typing import List, Any, Optional\nimport os\n\nfrom langchain_core.runnables import RunnableConfig\nfrom langchain_core.messages import SystemMessage\nfrom langchain_openai import ChatOpenAI\nfrom langchain_anthropic import ChatAnthropic\nfrom langchain_google_genai import ChatGoogleGenerativeAI\nfrom langgraph.graph import StateGraph, END, START\nfrom langgraph.graph import MessagesState\nfrom langgraph.types import Command\nfrom langgraph.checkpoint.memory import MemorySaver\n\nclass AgentState(MessagesState):\n \"\"\"\n State of our graph.\n \"\"\"\n tools: List[Any]\n model: str\n\nasync def chat_node(state: AgentState, config: Optional[RunnableConfig] = None):\n \"\"\"\n Standard chat node based on the ReAct design pattern. It handles:\n - The model to use (and binds in CopilotKit actions and the tools defined above)\n - The system prompt\n - Getting a response from the model\n - Handling tool calls\n\n For more about the ReAct design pattern, see:\n https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg\n \"\"\"\n\n\n # 1. Define the model\n model = ChatOpenAI(model=\"o3\")\n if state[\"model\"] == \"Anthropic\":\n model = ChatAnthropic(\n model=\"claude-sonnet-4-20250514\",\n thinking={\"type\": \"enabled\", \"budget_tokens\": 2000}\n )\n elif state[\"model\"] == \"Gemini\":\n model = ChatGoogleGenerativeAI(model=\"gemini-2.5-pro\", thinking_budget=1024)\n\n # Define config for the model\n if config is None:\n config = RunnableConfig(recursion_limit=25)\n\n # 2. Bind the tools to the model\n model_with_tools = model.bind_tools(\n [\n *state[\"tools\"],\n # your_tool_here\n ],\n )\n\n # 3. Define the system message by which the chat model will be run\n system_message = SystemMessage(\n content=\"You are a helpful assistant.\"\n )\n\n # 4. Run the model to generate a response\n response = await model_with_tools.ainvoke([\n system_message,\n *state[\"messages\"],\n ], config)\n\n # 6. We've handled all tool calls, so we can end the graph.\n return Command(\n goto=END,\n update={\n \"messages\": response\n }\n )\n\n# Define a new graph\nworkflow = StateGraph(AgentState)\nworkflow.add_node(\"chat_node\", chat_node)\nworkflow.set_entry_point(\"chat_node\")\n\n# Add explicit edges, matching the pattern in other examples\nworkflow.add_edge(START, \"chat_node\")\nworkflow.add_edge(\"chat_node\", END)\n\n# Conditionally use a checkpointer based on the environment\n# Check for multiple indicators that we're running in LangGraph dev/API mode\nis_fast_api = os.environ.get(\"LANGGRAPH_FAST_API\", \"false\").lower() == \"true\"\n\n# Compile the graph\nif is_fast_api:\n # For CopilotKit and other contexts, use MemorySaver\n from langgraph.checkpoint.memory import MemorySaver\n memory = MemorySaver()\n graph = workflow.compile(checkpointer=memory)\nelse:\n # When running in LangGraph API/dev, don't use a custom checkpointer\n graph = workflow.compile()",
762762
"language": "python",
763763
"type": "file"
764764
}

0 commit comments

Comments
 (0)