Skip to content

Commit 5ae461b

Browse files
authored
feat: migrate to LangChain v1 packages with enhanced HITL and context management
2 parents cfb9335 + f289187 commit 5ae461b

File tree

36 files changed

+1386
-738
lines changed

36 files changed

+1386
-738
lines changed
Lines changed: 29 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,41 @@
11
"""Context schema for supervisor configuration."""
22

3-
from typing import Any
3+
from __future__ import annotations
44

5-
from pydantic import BaseModel, Field
5+
from dataclasses import asdict, dataclass, field
66

7+
from langgraph_up_devkits.context import BaseAgentContext
78

8-
class SupervisorContext(BaseModel):
9-
"""Context schema for supervisor configuration."""
109

11-
model_name: str = Field(default="siliconflow:zai-org/GLM-4.5-Air", description="Default model name")
12-
temperature: float = 0.7
13-
max_tokens: int | None = None
14-
debug_mode: bool = False
15-
recursion_limit: int = Field(default=100, description="Recursion limit for agent execution")
10+
@dataclass(kw_only=True)
11+
class SupervisorContext(BaseAgentContext):
12+
"""Context schema for supervisor configuration.
13+
14+
Extends BaseAgentContext with supervisor-specific defaults.
15+
Uses GLM-4.5-Air model by default for efficient coordination.
16+
17+
Inherits from BaseAgentContext:
18+
- model: LLM identifier (overridden to siliconflow:zai-org/GLM-4.5-Air)
19+
- temperature: Sampling temperature (default 0.7)
20+
- max_tokens: Response token cap (default None)
21+
- recursion_limit: LangGraph recursion depth (default 100)
22+
- debug: Enable debug logging
23+
- user_id: Optional user identifier
24+
"""
25+
26+
# Override model default for supervisor
27+
model: str = field(
28+
default="siliconflow:zai-org/GLM-4.5-Air",
29+
metadata={
30+
"description": "The name of the language model to use for the supervisor agent.",
31+
},
32+
)
1633

1734
@classmethod
18-
def default(cls) -> "SupervisorContext":
35+
def default(cls) -> SupervisorContext:
1936
"""Create default supervisor context."""
2037
return cls()
2138

22-
def to_dict(self) -> dict[str, Any]:
39+
def to_dict(self) -> dict[str, dict[str, str | float | int | bool | None]]:
2340
"""Convert to dictionary for RunnableConfig."""
24-
return {"configurable": self.model_dump()}
41+
return {"configurable": asdict(self)}

apps/sample-agent/src/sample_agent/graph.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,14 @@ def make_graph(config: RunnableConfig | None = None) -> CompiledStateGraph[Any,
2424

2525
# Convert runnable config to context
2626
configurable = config.get("configurable", {})
27-
context_kwargs = {k: v for k, v in configurable.items() if k in SupervisorContext.model_fields}
27+
from dataclasses import fields
28+
29+
context_field_names = {f.name for f in fields(SupervisorContext)}
30+
context_kwargs = {k: v for k, v in configurable.items() if k in context_field_names}
2831
context = SupervisorContext(**context_kwargs)
2932

3033
# Load model based on configuration
31-
model = load_chat_model(context.model_name)
34+
model = load_chat_model(context.model)
3235

3336
# Create agents with the configured model via make_graph functions
3437
math_agent = make_math_graph(config)
Lines changed: 16 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,25 @@
1-
"""Simple state definition for Agent1 extending MessagesState."""
1+
"""State definition for sample-agent extending AgentState."""
22

3-
from typing import Annotated, TypedDict
3+
from typing import NotRequired
44

5-
from langchain_core.messages import BaseMessage
6-
from langgraph.graph.message import add_messages
5+
from langchain.agents import AgentState as BaseAgentState
76

87

9-
class AgentState(TypedDict):
10-
"""Simple state for sample-agent with task description support."""
8+
class AgentState(BaseAgentState): # type: ignore[type-arg]
9+
"""State for sample-agent with additional fields.
1110
12-
# Core message history
13-
messages: Annotated[list[BaseMessage], add_messages]
11+
Extends langchain.agents.AgentState which provides:
12+
- messages: Annotated[list[BaseMessage], add_messages]
13+
- jump_to: NotRequired[Annotated[JumpTo | None, EphemeralValue, PrivateStateAttr]]
14+
- structured_response: NotRequired[Annotated[ResponseT, OmitFromInput]]
15+
"""
1416

15-
# Required for create_react_agent
17+
# Required by create_react_agent
1618
remaining_steps: int
1719

18-
# Task management - following the reference pattern
19-
task_description: str | None
20+
# Additional fields for supervisor pattern
21+
task_description: NotRequired[str | None]
22+
active_agent: NotRequired[str | None]
2023

21-
# Active agent tracking
22-
active_agent: str | None
24+
25+
__all__ = ["AgentState"]

apps/sample-agent/src/sample_agent/subagents/math.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,14 @@ def make_graph(config: RunnableConfig | None = None) -> CompiledStateGraph[Any,
2727

2828
# Convert runnable config to context
2929
configurable = config.get("configurable", {})
30-
context_kwargs = {k: v for k, v in configurable.items() if k in SupervisorContext.model_fields}
30+
from dataclasses import fields
31+
32+
context_field_names = {f.name for f in fields(SupervisorContext)}
33+
context_kwargs = {k: v for k, v in configurable.items() if k in context_field_names}
3134
context = SupervisorContext(**context_kwargs)
3235

3336
# Load model based on configuration
34-
model = load_chat_model(context.model_name)
37+
model = load_chat_model(context.model)
3538

3639
# Create and return the math agent directly
3740
return create_agent(

apps/sample-agent/src/sample_agent/subagents/research.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,14 @@ def make_graph(config: RunnableConfig | None = None) -> CompiledStateGraph[Any,
2727

2828
# Convert runnable config to context
2929
configurable = config.get("configurable", {})
30-
context_kwargs = {k: v for k, v in configurable.items() if k in SupervisorContext.model_fields}
30+
from dataclasses import fields
31+
32+
context_field_names = {f.name for f in fields(SupervisorContext)}
33+
context_kwargs = {k: v for k, v in configurable.items() if k in context_field_names}
3134
context = SupervisorContext(**context_kwargs)
3235

3336
# Load model based on configuration
34-
model = load_chat_model(context.model_name)
37+
model = load_chat_model(context.model)
3538

3639
# Create and return the research agent directly
3740
return create_agent(

apps/sample-agent/src/sample_agent/tools/handoff.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,8 @@
22

33
from typing import Annotated, Any
44

5-
from langchain_core.messages import ToolMessage
6-
from langchain_core.tools import BaseTool, InjectedToolCallId, tool
5+
from langchain.messages import ToolMessage
6+
from langchain.tools import BaseTool, InjectedToolCallId, tool
77
from langgraph.prebuilt import InjectedState
88
from langgraph.types import Command
99
from langgraph_supervisor.handoff import METADATA_KEY_HANDOFF_DESTINATION

apps/sample-agent/tests/conftest.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@
77
from unittest.mock import patch
88

99
import pytest
10-
from langchain_core.language_models.chat_models import BaseChatModel
11-
from langchain_core.messages import AIMessage
10+
from langchain.chat_models import BaseChatModel
11+
from langchain.messages import AIMessage
1212
from langchain_core.outputs import ChatGeneration, LLMResult
1313

1414

apps/sample-agent/tests/integration/test_handoff.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
"""Integration tests for handoff functionality with real models and workflows."""
22

33
import pytest
4-
from langchain_core.messages import HumanMessage
4+
from langchain.messages import HumanMessage
55
from sample_agent.graph import make_graph
66
from sample_agent.state import AgentState
77
from sample_agent.tools.handoff import create_custom_handoff_tool

apps/sample-agent/tests/unit/test_graph.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
from unittest.mock import Mock, patch
44

55
import pytest
6-
from langchain_core.messages import AIMessage, HumanMessage
6+
from langchain.messages import AIMessage, HumanMessage
77
from sample_agent.state import AgentState
88
from sample_agent.tools.basic import add, multiply, web_search
99
from sample_agent.tools.handoff import create_custom_handoff_tool
@@ -92,7 +92,7 @@ def test_math_agent_creation(self, mock_load_model, mock_create_agent):
9292
mock_compiled_graph = Mock()
9393
mock_create_agent.return_value = mock_compiled_graph
9494

95-
config = {"configurable": {"model_name": "test_model"}}
95+
config = {"configurable": {"model": "test_model"}}
9696
result = make_graph(config)
9797

9898
mock_load_model.assert_called_once_with("test_model")
@@ -115,7 +115,7 @@ def test_research_agent_creation(self, mock_load_model, mock_create_agent):
115115
mock_compiled_graph = Mock()
116116
mock_create_agent.return_value = mock_compiled_graph
117117

118-
config = {"configurable": {"model_name": "test_model"}}
118+
config = {"configurable": {"model": "test_model"}}
119119
result = make_graph(config)
120120

121121
mock_load_model.assert_called_once_with("test_model")

apps/sample-deep-agent/README.md

Lines changed: 134 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -174,16 +174,146 @@ Automatic storage of:
174174

175175
## Testing
176176

177-
Run the test suite:
177+
### Unit Tests
178+
179+
Run unit tests (no API keys required):
178180

179181
```bash
180-
# Unit tests
181182
make unit sample-deep-agent
183+
```
184+
185+
### Integration Tests
186+
187+
Integration tests require API keys and make real API calls:
188+
189+
```bash
190+
# Set up environment variables first
191+
export SILICONFLOW_API_KEY=your_key_here
192+
export TAVILY_API_KEY=your_key_here
182193

183-
# Integration tests (requires API keys)
194+
# Run all integration tests
184195
make integration sample-deep-agent
185196

186-
# All tests
197+
# Run specific HITL integration test
198+
cd apps/sample-deep-agent
199+
uv run pytest tests/integration/test_hitl.py::TestHITLWorkflow::test_comprehensive_hitl_workflow -v -s
200+
```
201+
202+
### Human-in-the-Loop (HITL) Testing
203+
204+
The agent includes comprehensive HITL integration tests that verify interrupt functionality with real LLM calls.
205+
206+
#### HITL Configuration
207+
208+
Configure interrupts by passing `interrupt_on` and `subagent_interrupts` to `make_graph()`:
209+
210+
```python
211+
from sample_deep_agent.graph import make_graph
212+
213+
# Define interrupt configuration
214+
interrupt_on = {
215+
"task": {"allowed_decisions": ["approve", "reject"]}, # Only approve/reject
216+
"write_todos": False, # Don't interrupt write_todos
217+
"think_tool": False, # Don't interrupt think_tool
218+
"deep_web_search": True, # Interrupt at top level
219+
}
220+
221+
subagent_interrupts = {
222+
"research-agent": {
223+
"deep_web_search": True, # Interrupt in subagent too
224+
"think_tool": False, # Don't interrupt think_tool in subagent
225+
}
226+
}
227+
228+
# Create agent with HITL configuration
229+
agent = make_graph(
230+
config={"configurable": {"max_todos": 1}},
231+
interrupt_on=interrupt_on,
232+
subagent_interrupts=subagent_interrupts
233+
)
234+
```
235+
236+
#### Interrupt Decision Types
237+
238+
Three types of decisions are supported:
239+
240+
1. **Approve**: Execute tool with original arguments
241+
```python
242+
{"type": "approve"}
243+
```
244+
245+
2. **Reject**: Skip tool execution (agent receives error message)
246+
```python
247+
{"type": "reject"}
248+
```
249+
250+
3. **Edit**: Modify arguments before execution
251+
```python
252+
{
253+
"type": "edit",
254+
"edited_action": {
255+
"name": "tool_name",
256+
"args": {"modified": "arguments"}
257+
}
258+
}
259+
```
260+
261+
#### HITL Workflow Example
262+
263+
```python
264+
import uuid
265+
from langchain.messages import HumanMessage
266+
from langgraph.types import Command
267+
268+
# Use thread_id for state persistence (required for HITL)
269+
thread_id = str(uuid.uuid4())
270+
thread_config = {"configurable": {"thread_id": thread_id}}
271+
272+
# Initial invocation
273+
result = await agent.ainvoke(
274+
{"messages": [HumanMessage(content="What are the core features of LangChain v1?")]},
275+
config=thread_config
276+
)
277+
278+
# Handle interrupts
279+
while result.get("__interrupt__"):
280+
interrupts = result["__interrupt__"][0].value
281+
action_requests = interrupts["action_requests"]
282+
283+
# Make decisions for each action
284+
decisions = []
285+
for action in action_requests:
286+
if action["name"] == "task":
287+
decisions.append({"type": "approve"})
288+
elif action["name"] == "deep_web_search":
289+
decisions.append({"type": "reject"})
290+
else:
291+
decisions.append({"type": "approve"})
292+
293+
# Resume with decisions (must use same thread_config)
294+
result = await agent.ainvoke(
295+
Command(resume={"decisions": decisions}),
296+
config=thread_config
297+
)
298+
299+
# Get final result
300+
final_message = result["messages"][-1]
301+
print(final_message.content)
302+
```
303+
304+
#### Key Features Tested
305+
306+
- ✅ Allowed decisions configuration (restrict to approve/reject only)
307+
- ✅ Top-level tool approval/rejection
308+
- ✅ Subagent-specific interrupt overrides
309+
- ✅ Multiple concurrent tool interrupts
310+
- ✅ Agent resilience when tools are rejected
311+
- ✅ Verification that rejected tools don't execute
312+
313+
### All Tests
314+
315+
```bash
316+
# Run all tests across the monorepo
187317
make test
188318
```
189319

0 commit comments

Comments
 (0)