|
| 1 | +""" |
| 2 | +This is the main entry point for the agent. |
| 3 | +It defines the workflow graph, state, tools, nodes and edges. |
| 4 | +""" |
| 5 | + |
| 6 | +from typing import Any, List |
| 7 | +from typing_extensions import Literal |
| 8 | +from langchain_openai import ChatOpenAI |
| 9 | +from langchain_core.messages import SystemMessage |
| 10 | +from langchain_core.runnables import RunnableConfig |
| 11 | +from langchain.tools import tool |
| 12 | +from langgraph.graph import StateGraph, END |
| 13 | +from langgraph.types import Command |
| 14 | +from langgraph.graph import MessagesState |
| 15 | +from langgraph.prebuilt import ToolNode |
| 16 | + |
| 17 | +class AgentState(MessagesState): |
| 18 | + """ |
| 19 | + Here we define the state of the agent |
| 20 | +
|
| 21 | + In this instance, we're inheriting from CopilotKitState, which will bring in |
| 22 | + the CopilotKitState fields. We're also adding a custom field, `language`, |
| 23 | + which will be used to set the language of the agent. |
| 24 | + """ |
| 25 | + proverbs: List[str] = [] |
| 26 | + tools: List[Any] |
| 27 | + # your_custom_agent_state: str = "" |
| 28 | + |
| 29 | +@tool |
| 30 | +def get_weather(location: str): |
| 31 | + """ |
| 32 | + Get the weather for a given location. |
| 33 | + """ |
| 34 | + return f"The weather for {location} is 70 degrees." |
| 35 | + |
| 36 | +# @tool |
| 37 | +# def your_tool_here(your_arg: str): |
| 38 | +# """Your tool description here.""" |
| 39 | +# print(f"Your tool logic here") |
| 40 | +# return "Your tool response here." |
| 41 | + |
| 42 | +tools = [ |
| 43 | + get_weather |
| 44 | + # your_tool_here |
| 45 | +] |
| 46 | + |
| 47 | +async def chat_node(state: AgentState, config: RunnableConfig) -> Command[Literal["tool_node", "__end__"]]: |
| 48 | + """ |
| 49 | + Standard chat node based on the ReAct design pattern. It handles: |
| 50 | + - The model to use (and binds in CopilotKit actions and the tools defined above) |
| 51 | + - The system prompt |
| 52 | + - Getting a response from the model |
| 53 | + - Handling tool calls |
| 54 | +
|
| 55 | + For more about the ReAct design pattern, see: |
| 56 | + https://www.perplexity.ai/search/react-agents-NcXLQhreS0WDzpVaS4m9Cg |
| 57 | + """ |
| 58 | + |
| 59 | + # 1. Define the model |
| 60 | + model = ChatOpenAI(model="gpt-4o") |
| 61 | + |
| 62 | + print(state) |
| 63 | + |
| 64 | + # 2. Bind the tools to the model |
| 65 | + model_with_tools = model.bind_tools( |
| 66 | + [ |
| 67 | + *state["tools"], # bind tools defined by ag-ui |
| 68 | + get_weather, |
| 69 | + # your_tool_here |
| 70 | + ], |
| 71 | + |
| 72 | + # 2.1 Disable parallel tool calls to avoid race conditions, |
| 73 | + # enable this for faster performance if you want to manage |
| 74 | + # the complexity of running tool calls in parallel. |
| 75 | + parallel_tool_calls=False, |
| 76 | + ) |
| 77 | + |
| 78 | + # 3. Define the system message by which the chat model will be run |
| 79 | + system_message = SystemMessage( |
| 80 | + content=f"You are a helpful assistant. The current proverbs are {state.get('proverbs', [])}." |
| 81 | + ) |
| 82 | + |
| 83 | + # 4. Run the model to generate a response |
| 84 | + response = await model_with_tools.ainvoke([ |
| 85 | + system_message, |
| 86 | + *state["messages"], |
| 87 | + ], config) |
| 88 | + |
| 89 | + # 5. We've handled all tool calls, so we can end the graph. |
| 90 | + return Command( |
| 91 | + goto=END, |
| 92 | + update={ |
| 93 | + "messages": response |
| 94 | + } |
| 95 | + ) |
| 96 | + |
| 97 | +# Define the workflow graph |
| 98 | +workflow = StateGraph(AgentState) |
| 99 | +workflow.add_node("chat_node", chat_node) |
| 100 | +workflow.add_node("tool_node", ToolNode(tools=tools)) |
| 101 | +workflow.add_edge("tool_node", "chat_node") |
| 102 | +workflow.set_entry_point("chat_node") |
| 103 | + |
| 104 | +graph = workflow.compile() |
0 commit comments