Unexpected Multiple Tool Execution Events in Human-in-the-Loop Workflow #5904
Unanswered
chenzewang
asked this question in
Q&A
Replies: 1 comment
-
Reproduction Code"""
Human-in-the-loop Tool Wrapper Demonstration
This demonstration shows how to use the add_human_in_the_loop wrapper
to add interruption capabilities to any tool.
"""
from langchain_core.messages import HumanMessage, AIMessage
from typing import Callable
from langchain_core.tools import BaseTool, tool as create_tool
from langchain_core.runnables import RunnableConfig
from langgraph.types import interrupt, Command
from langgraph.prebuilt.interrupt import HumanInterruptConfig, HumanInterrupt
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.prebuilt import create_react_agent
from dotenv import load_dotenv
from llm_factory import get_default_llm
load_dotenv()
def add_human_in_the_loop(
tool: Callable | BaseTool,
*,
interrupt_config: HumanInterruptConfig = None,
) -> BaseTool:
"""Wrap a tool to support human-in-the-loop review."""
if not isinstance(tool, BaseTool):
tool = create_tool(tool)
@create_tool(tool.name, description=tool.description, args_schema=tool.args_schema)
def call_tool_with_interrupt(config: RunnableConfig, **tool_input):
request: HumanInterrupt = {
"action_request": {"action": tool.name, "args": tool_input},
"config": interrupt_config,
"description": "Please review the tool call",
}
response = interrupt(request)
# Approve tool call
if response["type"] == "accept":
tool_response = tool.invoke(tool_input, config)
# Update tool call parameters
elif response["type"] == "edit":
tool_input = response["args"]["args"]
tool_response = tool.invoke(tool_input, config)
# Respond to LLM with user feedback
elif response["type"] == "response":
user_feedback = response["args"]
tool_response = user_feedback
else:
raise ValueError(f"Unsupported interrupt response type: {response['type']}")
return tool_response
return call_tool_with_interrupt
def book_hotel(hotel_name: str) -> str:
"""Book a hotel."""
return f"Successfully booked accommodation at {hotel_name}."
checkpointer = InMemorySaver()
def create_agent():
"""Create a React agent with human-in-the-loop tools."""
# Initialize LLM using factory pattern from existing codebase
llm = get_default_llm()
# Create tools with human-in-the-loop wrappers
tools = [
add_human_in_the_loop(book_hotel),
]
# Create agent
agent = create_react_agent(
model=llm,
tools=tools,
checkpointer=checkpointer,
)
return agent
async def run_demo():
"""Run the human-in-the-loop demonstration."""
print("🤖 Human-in-the-loop Tool Demonstration")
print("=" * 50)
agent = create_agent()
config = {"configurable": {"thread_id": "human_demo_1"}}
messages = [HumanMessage(content="I want to book accommodation at the Hilton hotel")]
on_tool_start_chunks = []
async for chunk in agent.astream_events({"messages": messages}, config):
event = chunk["event"]
data = chunk.get("data", {})
if event == "on_tool_start":
on_tool_start_chunks.append(chunk)
print("on_tool_start📦 :", chunk)
print()
if event == "on_tool_end":
print("on_tool_end📦 :", chunk)
print()
elif type(data.get("chunk")) == dict and "__interrupt__" in data.get(
"chunk", {}
):
print("interrupt📦 :", chunk)
print()
print(f"Second Round===================================")
async for chunk in agent.astream_events(Command(resume={"type": "accept"}), config):
event = chunk["event"]
data = chunk.get("data", {})
if event == "on_tool_start":
on_tool_start_chunks.append(chunk)
print("on_tool_start📦 :", chunk)
print()
if event == "on_tool_end":
print("on_tool_end📦 :", chunk)
print()
elif type(data.get("chunk")) == dict and "__interrupt__" in data.get(
"chunk", {}
):
print("interrupt📦 :", chunk)
print()
print(
f"on_tool_start_chunks =========📦 Expected len=1 Actual len={len(on_tool_start_chunks)} chunks=",
on_tool_start_chunks,
)
if __name__ == "__main__":
import asyncio
asyncio.run(run_demo()) |
Beta Was this translation helpful? Give feedback.
0 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
Uh oh!
There was an error while loading. Please reload this page.
-
I'm implementing a human-in-the-loop workflow using langGraph, following the official examples. When adding human review for tool calls, I encountered an unexpected behavior: after interrupting and resuming tool execution, the
on_tool_start
andon_tool_end
events trigger 3 times, each with differentrun_id
. However, from the user's perspective, they're only making a single tool call, which is confusing.Beta Was this translation helpful? Give feedback.
All reactions