You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Please find attached the full code for reproduction
import operator
import functools
from pydantic import BaseModel
from typing_extensions import TypedDict
from typing import Literal, Annotated, Sequence
from langchain.tools import tool
from langchain_core.tools import tool
from langchain_ollama import ChatOllama
from langchain_core.messages import BaseMessage
from langgraph.prebuilt import create_react_agent
from langgraph.graph import END, StateGraph, START
from langchain.pydantic_v1 import BaseModel # , Field
from langchain_core.messages import HumanMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
# from langchain.tools import BaseTool, StructuredTool
# from langchain_ollama import OllamaLLM
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], operator.add]
next: str
def supervisor_agent(state):
supervisor_chain = prompt | llm.with_structured_output(routeResponse)
return supervisor_chain.invoke(state)
def agent_node(state, agent, name):
result = agent.invoke(state)
return {
"messages": [HumanMessage(content=result["messages"][-1].content, name=name)]
}
@tool
def search(query: str) -> str:
"""Look up things online."""
return "LangChain"
@tool
def multiply(a: int, b: int) -> int:
"""Multiply two numbers."""
return a * b
members = ["Searcher", "Multiplier"]
system_prompt = (
f"You are a supervisor tasked with managing a conversation between the "
f"following workers: {members}. Given the following user request, "
f"respond with the worker to act next. Each worker will perform a "
f"task and respond with their results and status. When finished, "
f"respond with FINISH."
)
options = ["FINISH"] + members
class routeResponse(BaseModel):
next: Literal[*options]
# llm = OllamaLLM(model="llama3.1:8b")
llm = ChatOllama(
model="llama3.1:8b",
temperature=0,
)
search_llm = ChatOllama(
model="llama3.1:8b",
temperature=0,
).bind_tools([search])
mul_llm = ChatOllama(
model="llama3.1:8b",
temperature=0,
).bind_tools([multiply])
prompt = ChatPromptTemplate.from_messages(
[
("system", system_prompt),
MessagesPlaceholder(variable_name="messages"),
(
"system",
"Given the conversation above, who should act next?"
f"Or should we FINISH? Select one of: {options}",
),
]
).partial(options=str(options), members=", ".join(members))
search_agent = create_react_agent(search_llm, tools=[search])
search_node = functools.partial(agent_node, agent=search_agent, name="Searcher")
# NOTE: THIS PERFORMS ARBITRARY CODE EXECUTION. PROCEED WITH CAUTION
mul_agent = create_react_agent(mul_llm, tools=[multiply])
mul_node = functools.partial(agent_node, agent=mul_agent, name="Multiplier")
workflow = StateGraph(AgentState)
workflow.add_node("Searcher", search_node)
workflow.add_node("Multiplier", mul_node)
workflow.add_node("supervisor", supervisor_agent)
for member in members:
# We want our workers to ALWAYS "report back" to the supervisor when done
workflow.add_edge(member, "supervisor")
# The supervisor populates the "next" field in the graph state
# which routes to a node or finishes
conditional_map = {k: k for k in members}
conditional_map["FINISH"] = END
workflow.add_conditional_edges("supervisor", lambda x: x["next"], conditional_map)
# Finally, add entrypoint
workflow.add_edge(START, "supervisor")
graph = workflow.compile()
graph.get_graph().draw_mermaid_png(output_file_path='agentGraph.png')
for s in graph.stream(
{
"messages": [
HumanMessage(content="Hi")
]
}
):
if "__end__" not in s:
print(s)
print("----")
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
Uh oh!
There was an error while loading. Please reload this page.
-
Hi,
I'm trying to create a Multi-agent supervisor which uses Ollama framework, in order to use local model (most of my code is based on this example: https://github.com/langchain-ai/langgraph/blob/main/docs/docs/tutorials/multi_agent/agent_supervisor.ipynb).
Debugging the code, I found that model invoke returns nothing, which hurts the full process of tool routing:
Please find attached the full code for reproduction
Thank you in advance!
Beta Was this translation helpful? Give feedback.
All reactions