diff --git a/docs/references.md b/docs/references.md index b7abf74..9f31dd4 100644 --- a/docs/references.md +++ b/docs/references.md @@ -9,6 +9,9 @@ - [Streamlit](https://python.langchain.com/docs/integrations/callbacks/streamlit/) - [LangChain MCP Adapters](https://github.com/langchain-ai/langchain-mcp-adapters) - [Research Agent with MCP Integration.](https://github.com/langchain-ai/deep_research_from_scratch/blob/main/src/deep_research_from_scratch/research_agent_mcp.py) +- [Command: A new tool for building multi-agent architectures in LangGraph](https://blog.langchain.com/command-a-new-tool-for-multi-agent-architectures-in-langgraph/) +- [Combine control flow and state updates with Command](https://langchain-ai.github.io/langgraph/how-tos/graph-api/#combine-control-flow-and-state-updates-with-command) +- [Command: a new tool for building multi-agent architectures in LangGraph](https://www.youtube.com/watch?v=6BJDKf90L9A) ### Sample Codes diff --git a/pyproject.toml b/pyproject.toml index d4d1a2d..07a671e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -85,3 +85,4 @@ non-subscriptable = "ignore" possibly-unbound-attribute = "ignore" unresolved-attribute = "ignore" invalid-argument-type = "ignore" +invalid-type-form = "ignore" diff --git a/scripts/simple_multi_agent_operator.py b/scripts/simple_multi_agent_operator.py new file mode 100644 index 0000000..5683ea1 --- /dev/null +++ b/scripts/simple_multi_agent_operator.py @@ -0,0 +1,80 @@ +import logging + +import typer +from dotenv import load_dotenv + +from template_langgraph.agents.simple_multi_agent.multi_agent import app as multi_agent_app +from template_langgraph.agents.simple_multi_agent.weather_agent import app as weather_agent_app +from template_langgraph.loggers import get_logger + +app = typer.Typer( + add_completion=False, + help="SimpleMultiAgent CLI", +) +logger = get_logger(__name__) + + +@app.command() +def weather_agent( + query: str = typer.Option( + "What's the weather in Japan?", + "--query", + "-q", + help="The query to ask the model", + ), + verbose: bool = typer.Option( + False, + "--verbose", + "-v", + help="Enable verbose output", + ), +): + if verbose: + logger.setLevel(logging.DEBUG) + + response = weather_agent_app.invoke( + { + "messages": [ + {"role": "user", "content": query}, + ], + }, + debug=True, + ) + logger.info(response["messages"][-1].content) + + +@app.command() +def multi_agent( + query: str = typer.Option( + "What's the weather in Japan?", + "--query", + "-q", + help="The query to ask the model", + ), + verbose: bool = typer.Option( + False, + "--verbose", + "-v", + help="Enable verbose output", + ), +): + if verbose: + logger.setLevel(logging.DEBUG) + + response = multi_agent_app.invoke( + { + "messages": [ + {"role": "user", "content": query}, + ], + }, + debug=True, + ) + logger.info(response["messages"][-1].content) + + +if __name__ == "__main__": + load_dotenv( + override=True, + verbose=True, + ) + app() diff --git a/template_langgraph/agents/simple_multi_agent/__init__.py b/template_langgraph/agents/simple_multi_agent/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/template_langgraph/agents/simple_multi_agent/multi_agent.py b/template_langgraph/agents/simple_multi_agent/multi_agent.py new file mode 100644 index 0000000..176f8ee --- /dev/null +++ b/template_langgraph/agents/simple_multi_agent/multi_agent.py @@ -0,0 +1,42 @@ +from typing import Literal + +from langgraph.graph import END, START, MessagesState, StateGraph +from langgraph.types import Command + +from template_langgraph.agents.simple_multi_agent.weather_agent import app +from template_langgraph.llms.azure_openais import AzureOpenAiWrapper +from template_langgraph.loggers import get_logger + +logger = get_logger(__name__) + + +def transfer_to_weather_agent(): + """Call this to transfer to the weather agent""" + + +tools = [transfer_to_weather_agent] +llm = AzureOpenAiWrapper().chat_model.bind_tools(tools=tools) + + +def call_model(state: MessagesState) -> Command[Literal["weather_agent", END]]: + messages = state["messages"] + response = llm.invoke(messages) + if len(response.tool_calls) > 0: + return Command( + goto="weather_agent", + ) + else: + return Command( + goto=END, + update={ + "messages": [response], + }, + ) + + +workflow = StateGraph(MessagesState) + +workflow.add_node("agent", call_model) +workflow.add_node("weather_agent", app) +workflow.add_edge(START, "agent") +app = workflow.compile() diff --git a/template_langgraph/agents/simple_multi_agent/weather_agent.py b/template_langgraph/agents/simple_multi_agent/weather_agent.py new file mode 100644 index 0000000..a87b0dc --- /dev/null +++ b/template_langgraph/agents/simple_multi_agent/weather_agent.py @@ -0,0 +1,45 @@ +from typing import Literal + +from langchain_core.tools import tool +from langgraph.graph import END, START, MessagesState, StateGraph +from langgraph.prebuilt import ToolNode +from langgraph.types import Command + +from template_langgraph.llms.azure_openais import AzureOpenAiWrapper + + +@tool +def search(query: str) -> str: + """Call to surf the web""" + if "japan" in query.lower(): + return "It's 60 degrees and cloudy in Japan" + return "It's 90 degrees and sunny in Japan" + + +tools = [search] +tool_node = ToolNode(tools=tools) +llm = AzureOpenAiWrapper().chat_model.bind_tools(tools=tools) + + +def call_model(state: MessagesState) -> Command[Literal["tools", END]]: + messages = state["messages"] + response = llm.invoke(messages) + if len(response.tool_calls) > 0: + next_node = "tools" + else: + next_node = END + return Command( + goto=next_node, + update={ + "messages": [response], + }, + ) + + +workflow = StateGraph(MessagesState) + +workflow.add_node("agent", call_model) +workflow.add_node("tools", tool_node) +workflow.add_edge(START, "agent") +workflow.add_edge("tools", "agent") +app = workflow.compile()