Skip to content

Commit 4cb19fa

Browse files
majdyzclaude
andcommitted
feat(platform): implement graph-level Safe Mode toggle for HITL blocks
## Summary - Add graph-level `safe_mode` metadata field with auto-detection of HITL blocks - Create floating Safe Mode toggle component with dual variants (builder/library) - Update backend to accept `safe_mode` parameter in execution calls - Fix hardcoded safe mode values throughout frontend codebase - Enhance FloatingReviewsPanel with proper execution status detection ## Backend Changes - **Database**: Add `metadata` JSON column to `AgentGraph` table - **API**: Update `execute_graph` endpoint to accept `safe_mode` parameter - **Execution**: Use graph metadata `safe_mode` as default, with API override capability - **Auto-detection**: Populate `has_human_in_the_loop` for graphs containing HITL blocks - **Utils**: Enhance execution context to prioritize API `safe_mode` over graph metadata ## Frontend Changes - **Component**: New `FloatingSafeModeToggle` with white (library) and black (builder) variants - **Integration**: Added toggles to new/old builders and library pages - **API Integration**: Direct graph metadata updates via `usePutV1UpdateGraphVersion` - **Query Management**: Optimistic updates with React Query cache invalidation - **Styling**: Proper positioning and z-index handling for floating elements ## Technical Details - Safe Mode ON (default): HITL blocks require manual review before proceeding - Safe Mode OFF: HITL blocks execute automatically without intervention - Toggle only appears when graph contains HITL blocks (`has_human_in_the_loop: true`) - Backend API takes precedence over graph metadata for execution-time behavior - React Query cache management ensures UI consistency across components ## Known Issues (WIP) - Tooltip z-index still covered by some UI elements - Toggle state not persisting correctly (always shows ON) - Error when removing HITL blocks from graph 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <[email protected]>
1 parent 29de134 commit 4cb19fa

File tree

30 files changed

+551
-110
lines changed

30 files changed

+551
-110
lines changed

autogpt_platform/backend/backend/blocks/human_in_the_loop.py

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
BlockSchemaInput,
1111
BlockSchemaOutput,
1212
)
13-
from backend.data.execution import ExecutionStatus
13+
from backend.data.execution import ExecutionContext, ExecutionStatus
1414
from backend.data.human_review import ReviewResult
1515
from backend.data.model import SchemaField
1616
from backend.executor.manager import async_update_node_execution_status
@@ -92,17 +92,30 @@ async def run(
9292
graph_exec_id: str,
9393
graph_id: str,
9494
graph_version: int,
95+
execution_context: ExecutionContext,
9596
**kwargs,
9697
) -> BlockOutput:
9798
"""
9899
Execute the Human In The Loop block.
99100
100101
This method uses one function to handle the complete workflow - checking existing reviews
101102
and creating pending ones as needed.
103+
104+
If safe_mode is disabled in execution_context, this block will automatically approve
105+
the data without requiring human intervention.
102106
"""
103-
try:
104-
logger.debug(f"HITL block executing for node {node_exec_id}")
107+
# Check if safe mode is disabled
108+
if not execution_context.safe_mode:
109+
logger.info(
110+
f"HITL block skipping review for node {node_exec_id} - safe mode disabled"
111+
)
112+
# Automatically approve the data
113+
yield "status", "approved"
114+
yield "reviewed_data", input_data.data
115+
yield "review_message", "Auto-approved (safe mode disabled)"
116+
return
105117

118+
try:
106119
# Use the data layer to handle the complete workflow
107120
db_client = get_database_manager_async_client()
108121
result = await db_client.get_or_create_human_review(

autogpt_platform/backend/backend/blocks/time_blocks.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
BlockSchemaInput,
1515
BlockSchemaOutput,
1616
)
17-
from backend.data.execution import UserContext
17+
from backend.data.execution import ExecutionContext
1818
from backend.data.model import SchemaField
1919

2020
# Shared timezone literal type for all time/date blocks
@@ -188,10 +188,10 @@ def __init__(self):
188188
)
189189

190190
async def run(
191-
self, input_data: Input, *, user_context: UserContext, **kwargs
191+
self, input_data: Input, *, execution_context: ExecutionContext, **kwargs
192192
) -> BlockOutput:
193-
# Extract timezone from user_context (always present)
194-
effective_timezone = user_context.timezone
193+
# Extract timezone from execution_context (always present)
194+
effective_timezone = execution_context.timezone
195195

196196
# Get the appropriate timezone
197197
tz = _get_timezone(input_data.format_type, effective_timezone)
@@ -298,10 +298,10 @@ def __init__(self):
298298
],
299299
)
300300

301-
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
302-
# Extract timezone from user_context (required keyword argument)
303-
user_context: UserContext = kwargs["user_context"]
304-
effective_timezone = user_context.timezone
301+
async def run(
302+
self, input_data: Input, *, execution_context: ExecutionContext, **kwargs
303+
) -> BlockOutput:
304+
effective_timezone = execution_context.timezone
305305

306306
try:
307307
offset = int(input_data.offset)
@@ -404,10 +404,10 @@ def __init__(self):
404404
],
405405
)
406406

407-
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
408-
# Extract timezone from user_context (required keyword argument)
409-
user_context: UserContext = kwargs["user_context"]
410-
effective_timezone = user_context.timezone
407+
async def run(
408+
self, input_data: Input, *, execution_context: ExecutionContext, **kwargs
409+
) -> BlockOutput:
410+
effective_timezone = execution_context.timezone
411411

412412
# Get the appropriate timezone
413413
tz = _get_timezone(input_data.format_type, effective_timezone)

autogpt_platform/backend/backend/data/credit_test.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from backend.blocks.llm import AITextGeneratorBlock
88
from backend.data.block import get_block
99
from backend.data.credit import BetaUserCredit, UsageTransactionMetadata
10-
from backend.data.execution import NodeExecutionEntry, UserContext
10+
from backend.data.execution import NodeExecutionEntry
1111
from backend.data.user import DEFAULT_USER_ID
1212
from backend.executor.utils import block_usage_cost
1313
from backend.integrations.credentials_store import openai_credentials
@@ -86,7 +86,6 @@ async def test_block_credit_usage(server: SpinTestServer):
8686
"type": openai_credentials.type,
8787
},
8888
},
89-
user_context=UserContext(timezone="UTC"),
9089
),
9190
)
9291
assert spending_amount_1 > 0
@@ -101,7 +100,6 @@ async def test_block_credit_usage(server: SpinTestServer):
101100
node_exec_id="test_node_exec",
102101
block_id=AITextGeneratorBlock().id,
103102
inputs={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
104-
user_context=UserContext(timezone="UTC"),
105103
),
106104
)
107105
assert spending_amount_2 == 0

autogpt_platform/backend/backend/data/execution.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -365,7 +365,7 @@ def from_db(_graph_exec: AgentGraphExecution):
365365

366366
def to_graph_execution_entry(
367367
self,
368-
user_context: "UserContext",
368+
execution_context: "ExecutionContext",
369369
compiled_nodes_input_masks: Optional[NodesInputMasks] = None,
370370
parent_graph_exec_id: Optional[str] = None,
371371
):
@@ -375,7 +375,7 @@ def to_graph_execution_entry(
375375
graph_version=self.graph_version or 0,
376376
graph_exec_id=self.id,
377377
nodes_input_masks=compiled_nodes_input_masks,
378-
user_context=user_context,
378+
execution_context=execution_context,
379379
parent_graph_exec_id=parent_graph_exec_id,
380380
)
381381

@@ -449,7 +449,7 @@ def from_db(_node_exec: AgentNodeExecution, user_id: Optional[str] = None):
449449
)
450450

451451
def to_node_execution_entry(
452-
self, user_context: "UserContext"
452+
self, execution_context: "ExecutionContext"
453453
) -> "NodeExecutionEntry":
454454
return NodeExecutionEntry(
455455
user_id=self.user_id,
@@ -460,7 +460,7 @@ def to_node_execution_entry(
460460
node_id=self.node_id,
461461
block_id=self.block_id,
462462
inputs=self.input_data,
463-
user_context=user_context,
463+
execution_context=execution_context,
464464
)
465465

466466

@@ -1099,10 +1099,11 @@ async def get_latest_node_execution(
10991099
# ----------------- Execution Infrastructure ----------------- #
11001100

11011101

1102-
class UserContext(BaseModel):
1103-
"""Generic user context for graph execution containing user-specific settings."""
1102+
class ExecutionContext(BaseModel):
1103+
"""Execution context containing user-specific settings and execution options."""
11041104

1105-
timezone: str
1105+
timezone: str = "UTC"
1106+
safe_mode: bool = True # Default to safe mode ON
11061107

11071108

11081109
class GraphExecutionEntry(BaseModel):
@@ -1111,8 +1112,8 @@ class GraphExecutionEntry(BaseModel):
11111112
graph_id: str
11121113
graph_version: int
11131114
nodes_input_masks: Optional[NodesInputMasks] = None
1114-
user_context: UserContext
11151115
parent_graph_exec_id: Optional[str] = None
1116+
execution_context: ExecutionContext = Field(default_factory=ExecutionContext)
11161117

11171118

11181119
class NodeExecutionEntry(BaseModel):
@@ -1124,7 +1125,7 @@ class NodeExecutionEntry(BaseModel):
11241125
node_id: str
11251126
block_id: str
11261127
inputs: BlockInput
1127-
user_context: UserContext
1128+
execution_context: ExecutionContext = Field(default_factory=ExecutionContext)
11281129

11291130

11301131
class ExecutionQueue(Generic[T]):

autogpt_platform/backend/backend/data/graph.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,13 @@
6161
logger = logging.getLogger(__name__)
6262

6363

64+
class GraphMetadata(BaseDbModel):
65+
"""Metadata for AgentGraph configuration options."""
66+
67+
safe_mode: bool = True # Default to safe mode enabled
68+
has_human_in_the_loop: bool = False # Whether graph contains HITL blocks
69+
70+
6471
class Link(BaseDbModel):
6572
source_id: str
6673
sink_id: str
@@ -195,6 +202,7 @@ class BaseGraph(BaseDbModel):
195202
links: list[Link] = []
196203
forked_from_id: str | None = None
197204
forked_from_version: int | None = None
205+
metadata: GraphMetadata = GraphMetadata()
198206

199207
@computed_field
200208
@property
@@ -736,6 +744,9 @@ def from_db(
736744
description=graph.description or "",
737745
instructions=graph.instructions,
738746
recommended_schedule_cron=graph.recommendedScheduleCron,
747+
metadata=GraphMetadata.model_validate(
748+
graph.metadata if graph.metadata else {}
749+
),
739750
nodes=[NodeModel.from_db(node, for_export) for node in graph.Nodes or []],
740751
links=list(
741752
{
@@ -1232,6 +1243,14 @@ async def fork_graph(graph_id: str, graph_version: int, user_id: str) -> GraphMo
12321243
async def __create_graph(tx, graph: Graph, user_id: str):
12331244
graphs = [graph] + graph.sub_graphs
12341245

1246+
# Auto-detect HITL blocks for metadata
1247+
for g in graphs:
1248+
has_hitl = any(
1249+
node.block_id == "8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d"
1250+
for node in g.nodes
1251+
)
1252+
g.metadata.has_human_in_the_loop = has_hitl
1253+
12351254
await AgentGraph.prisma(tx).create_many(
12361255
data=[
12371256
AgentGraphCreateInput(
@@ -1244,6 +1263,7 @@ async def __create_graph(tx, graph: Graph, user_id: str):
12441263
userId=user_id,
12451264
forkedFromId=graph.forked_from_id,
12461265
forkedFromVersion=graph.forked_from_version,
1266+
metadata=SafeJson(graph.metadata.model_dump()),
12471267
)
12481268
for graph in graphs
12491269
]

autogpt_platform/backend/backend/data/human_review.py

Lines changed: 6 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -121,27 +121,17 @@ async def get_or_create_human_review(
121121
if review.processed:
122122
return None
123123

124-
if review.status == ReviewStatus.APPROVED:
125-
# Return the approved review result
126-
return ReviewResult(
127-
data=review.payload,
128-
status=ReviewStatus.APPROVED,
129-
message=review.reviewMessage or "",
130-
processed=review.processed,
131-
node_exec_id=review.nodeExecId,
132-
)
133-
elif review.status == ReviewStatus.REJECTED:
134-
# Return the rejected review result
124+
# If pending, return None to continue waiting, otherwise return the review result
125+
if review.status == ReviewStatus.WAITING:
126+
return None
127+
else:
135128
return ReviewResult(
136-
data=None,
137-
status=ReviewStatus.REJECTED,
129+
data=review.payload if review.status == ReviewStatus.APPROVED else None,
130+
status=review.status,
138131
message=review.reviewMessage or "",
139132
processed=review.processed,
140133
node_exec_id=review.nodeExecId,
141134
)
142-
else:
143-
# Review is pending - return None to continue waiting
144-
return None
145135

146136

147137
async def has_pending_reviews_for_graph_exec(graph_exec_id: str) -> bool:

autogpt_platform/backend/backend/executor/database.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
get_block_error_stats,
1010
get_child_graph_executions,
1111
get_execution_kv_data,
12+
get_graph_execution,
1213
get_graph_execution_meta,
1314
get_graph_executions,
1415
get_graph_executions_count,
@@ -130,6 +131,7 @@ def _(
130131
get_child_graph_executions = _(get_child_graph_executions)
131132
get_graph_executions = _(get_graph_executions)
132133
get_graph_executions_count = _(get_graph_executions_count)
134+
get_graph_execution = _(get_graph_execution)
133135
get_graph_execution_meta = _(get_graph_execution_meta)
134136
create_graph_execution = _(create_graph_execution)
135137
get_node_execution = _(get_node_execution)
@@ -254,6 +256,7 @@ def get_service_type(cls):
254256
get_latest_node_execution = d.get_latest_node_execution
255257
get_graph = d.get_graph
256258
get_graph_metadata = d.get_graph_metadata
259+
get_graph_execution = d.get_graph_execution
257260
get_graph_execution_meta = d.get_graph_execution_meta
258261
get_node = d.get_node
259262
get_node_execution = d.get_node_execution

autogpt_platform/backend/backend/executor/manager.py

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -29,14 +29,14 @@
2929
from backend.data.credit import UsageTransactionMetadata
3030
from backend.data.dynamic_fields import parse_execution_output
3131
from backend.data.execution import (
32+
ExecutionContext,
3233
ExecutionQueue,
3334
ExecutionStatus,
3435
GraphExecution,
3536
GraphExecutionEntry,
3637
NodeExecutionEntry,
3738
NodeExecutionResult,
3839
NodesInputMasks,
39-
UserContext,
4040
)
4141
from backend.data.graph import Link, Node
4242
from backend.data.model import GraphExecutionStats, NodeExecutionStats
@@ -212,8 +212,8 @@ async def execute_node(
212212
"user_id": user_id,
213213
}
214214

215-
# Add user context from NodeExecutionEntry
216-
extra_exec_kwargs["user_context"] = data.user_context
215+
# Add execution context from NodeExecutionEntry
216+
extra_exec_kwargs["execution_context"] = data.execution_context
217217

218218
# Last-minute fetch credentials + acquire a system-wide read-write lock to prevent
219219
# changes during execution. ⚠️ This means a set of credentials can only be used by
@@ -243,8 +243,12 @@ async def execute_node(
243243
scope.set_tag("node_id", node_id)
244244
scope.set_tag("block_name", node_block.name)
245245
scope.set_tag("block_id", node_block.id)
246-
for k, v in (data.user_context or UserContext(timezone="UTC")).model_dump().items():
247-
scope.set_tag(f"user_context.{k}", v)
246+
for k, v in (
247+
(data.execution_context or ExecutionContext(timezone="UTC"))
248+
.model_dump()
249+
.items()
250+
):
251+
scope.set_tag(f"execution_context.{k}", v)
248252

249253
try:
250254
async for output_name, output_data in node_block.execute(
@@ -289,7 +293,7 @@ async def _enqueue_next_nodes(
289293
graph_version: int,
290294
log_metadata: LogMetadata,
291295
nodes_input_masks: Optional[NodesInputMasks],
292-
user_context: UserContext,
296+
execution_context: ExecutionContext,
293297
) -> list[NodeExecutionEntry]:
294298
async def add_enqueued_execution(
295299
node_exec_id: str, node_id: str, block_id: str, data: BlockInput
@@ -309,7 +313,7 @@ async def add_enqueued_execution(
309313
node_id=node_id,
310314
block_id=block_id,
311315
inputs=data,
312-
user_context=user_context,
316+
execution_context=execution_context,
313317
)
314318

315319
async def register_next_executions(node_link: Link) -> list[NodeExecutionEntry]:
@@ -861,7 +865,9 @@ def _on_graph_execution(
861865
ExecutionStatus.REVIEW,
862866
],
863867
):
864-
node_entry = node_exec.to_node_execution_entry(graph_exec.user_context)
868+
node_entry = node_exec.to_node_execution_entry(
869+
graph_exec.execution_context
870+
)
865871
execution_queue.add(node_entry)
866872

867873
# ------------------------------------------------------------
@@ -1165,7 +1171,7 @@ async def _process_node_output(
11651171
graph_version=graph_exec.graph_version,
11661172
log_metadata=log_metadata,
11671173
nodes_input_masks=nodes_input_masks,
1168-
user_context=graph_exec.user_context,
1174+
execution_context=graph_exec.execution_context,
11691175
):
11701176
execution_queue.add(next_execution)
11711177

0 commit comments

Comments
 (0)