Skip to content

Commit 4e686d7

Browse files
majdyzclaude
andcommitted
feat(platform): implement Safe Mode API for Human-in-the-Loop blocks
- Added PATCH /graphs/{id}/metadata endpoint for metadata-only updates without version bumping - Removed safe_mode parameter from execute_graph API (backend now reads from metadata) - Added has_human_in_the_loop field to LibraryAgent model for proper HITL detection - Created FloatingSafeModeToggle component with support for all graph types - Updated frontend execution calls to remove redundant safe_mode parameter - Added proper cache invalidation for immediate UI updates - Fixed TypeScript compatibility between legacy Graph and new API types 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <[email protected]>
1 parent 29de134 commit 4e686d7

File tree

31 files changed

+734
-113
lines changed

31 files changed

+734
-113
lines changed

autogpt_platform/backend/backend/blocks/human_in_the_loop.py

Lines changed: 16 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
BlockSchemaInput,
1111
BlockSchemaOutput,
1212
)
13-
from backend.data.execution import ExecutionStatus
13+
from backend.data.execution import ExecutionContext, ExecutionStatus
1414
from backend.data.human_review import ReviewResult
1515
from backend.data.model import SchemaField
1616
from backend.executor.manager import async_update_node_execution_status
@@ -92,17 +92,30 @@ async def run(
9292
graph_exec_id: str,
9393
graph_id: str,
9494
graph_version: int,
95+
execution_context: ExecutionContext,
9596
**kwargs,
9697
) -> BlockOutput:
9798
"""
9899
Execute the Human In The Loop block.
99100
100101
This method uses one function to handle the complete workflow - checking existing reviews
101102
and creating pending ones as needed.
103+
104+
If safe_mode is disabled in execution_context, this block will automatically approve
105+
the data without requiring human intervention.
102106
"""
103-
try:
104-
logger.debug(f"HITL block executing for node {node_exec_id}")
107+
# Check if safe mode is disabled
108+
if not execution_context.safe_mode:
109+
logger.info(
110+
f"HITL block skipping review for node {node_exec_id} - safe mode disabled"
111+
)
112+
# Automatically approve the data
113+
yield "status", "approved"
114+
yield "reviewed_data", input_data.data
115+
yield "review_message", "Auto-approved (safe mode disabled)"
116+
return
105117

118+
try:
106119
# Use the data layer to handle the complete workflow
107120
db_client = get_database_manager_async_client()
108121
result = await db_client.get_or_create_human_review(

autogpt_platform/backend/backend/blocks/time_blocks.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
BlockSchemaInput,
1515
BlockSchemaOutput,
1616
)
17-
from backend.data.execution import UserContext
17+
from backend.data.execution import ExecutionContext
1818
from backend.data.model import SchemaField
1919

2020
# Shared timezone literal type for all time/date blocks
@@ -188,10 +188,10 @@ def __init__(self):
188188
)
189189

190190
async def run(
191-
self, input_data: Input, *, user_context: UserContext, **kwargs
191+
self, input_data: Input, *, execution_context: ExecutionContext, **kwargs
192192
) -> BlockOutput:
193-
# Extract timezone from user_context (always present)
194-
effective_timezone = user_context.timezone
193+
# Extract timezone from execution_context (always present)
194+
effective_timezone = execution_context.timezone
195195

196196
# Get the appropriate timezone
197197
tz = _get_timezone(input_data.format_type, effective_timezone)
@@ -298,10 +298,10 @@ def __init__(self):
298298
],
299299
)
300300

301-
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
302-
# Extract timezone from user_context (required keyword argument)
303-
user_context: UserContext = kwargs["user_context"]
304-
effective_timezone = user_context.timezone
301+
async def run(
302+
self, input_data: Input, *, execution_context: ExecutionContext, **kwargs
303+
) -> BlockOutput:
304+
effective_timezone = execution_context.timezone
305305

306306
try:
307307
offset = int(input_data.offset)
@@ -404,10 +404,10 @@ def __init__(self):
404404
],
405405
)
406406

407-
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
408-
# Extract timezone from user_context (required keyword argument)
409-
user_context: UserContext = kwargs["user_context"]
410-
effective_timezone = user_context.timezone
407+
async def run(
408+
self, input_data: Input, *, execution_context: ExecutionContext, **kwargs
409+
) -> BlockOutput:
410+
effective_timezone = execution_context.timezone
411411

412412
# Get the appropriate timezone
413413
tz = _get_timezone(input_data.format_type, effective_timezone)

autogpt_platform/backend/backend/data/credit_test.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from backend.blocks.llm import AITextGeneratorBlock
88
from backend.data.block import get_block
99
from backend.data.credit import BetaUserCredit, UsageTransactionMetadata
10-
from backend.data.execution import NodeExecutionEntry, UserContext
10+
from backend.data.execution import NodeExecutionEntry
1111
from backend.data.user import DEFAULT_USER_ID
1212
from backend.executor.utils import block_usage_cost
1313
from backend.integrations.credentials_store import openai_credentials
@@ -86,7 +86,6 @@ async def test_block_credit_usage(server: SpinTestServer):
8686
"type": openai_credentials.type,
8787
},
8888
},
89-
user_context=UserContext(timezone="UTC"),
9089
),
9190
)
9291
assert spending_amount_1 > 0
@@ -101,7 +100,6 @@ async def test_block_credit_usage(server: SpinTestServer):
101100
node_exec_id="test_node_exec",
102101
block_id=AITextGeneratorBlock().id,
103102
inputs={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
104-
user_context=UserContext(timezone="UTC"),
105103
),
106104
)
107105
assert spending_amount_2 == 0

autogpt_platform/backend/backend/data/execution.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -365,7 +365,7 @@ def from_db(_graph_exec: AgentGraphExecution):
365365

366366
def to_graph_execution_entry(
367367
self,
368-
user_context: "UserContext",
368+
execution_context: "ExecutionContext",
369369
compiled_nodes_input_masks: Optional[NodesInputMasks] = None,
370370
parent_graph_exec_id: Optional[str] = None,
371371
):
@@ -375,7 +375,7 @@ def to_graph_execution_entry(
375375
graph_version=self.graph_version or 0,
376376
graph_exec_id=self.id,
377377
nodes_input_masks=compiled_nodes_input_masks,
378-
user_context=user_context,
378+
execution_context=execution_context,
379379
parent_graph_exec_id=parent_graph_exec_id,
380380
)
381381

@@ -449,7 +449,7 @@ def from_db(_node_exec: AgentNodeExecution, user_id: Optional[str] = None):
449449
)
450450

451451
def to_node_execution_entry(
452-
self, user_context: "UserContext"
452+
self, execution_context: "ExecutionContext"
453453
) -> "NodeExecutionEntry":
454454
return NodeExecutionEntry(
455455
user_id=self.user_id,
@@ -460,7 +460,7 @@ def to_node_execution_entry(
460460
node_id=self.node_id,
461461
block_id=self.block_id,
462462
inputs=self.input_data,
463-
user_context=user_context,
463+
execution_context=execution_context,
464464
)
465465

466466

@@ -1099,10 +1099,11 @@ async def get_latest_node_execution(
10991099
# ----------------- Execution Infrastructure ----------------- #
11001100

11011101

1102-
class UserContext(BaseModel):
1103-
"""Generic user context for graph execution containing user-specific settings."""
1102+
class ExecutionContext(BaseModel):
1103+
"""Execution context containing user-specific settings and execution options."""
11041104

1105-
timezone: str
1105+
timezone: str = "UTC"
1106+
safe_mode: bool = True # Default to safe mode ON
11061107

11071108

11081109
class GraphExecutionEntry(BaseModel):
@@ -1111,8 +1112,8 @@ class GraphExecutionEntry(BaseModel):
11111112
graph_id: str
11121113
graph_version: int
11131114
nodes_input_masks: Optional[NodesInputMasks] = None
1114-
user_context: UserContext
11151115
parent_graph_exec_id: Optional[str] = None
1116+
execution_context: ExecutionContext = Field(default_factory=ExecutionContext)
11161117

11171118

11181119
class NodeExecutionEntry(BaseModel):
@@ -1124,7 +1125,7 @@ class NodeExecutionEntry(BaseModel):
11241125
node_id: str
11251126
block_id: str
11261127
inputs: BlockInput
1127-
user_context: UserContext
1128+
execution_context: ExecutionContext = Field(default_factory=ExecutionContext)
11281129

11291130

11301131
class ExecutionQueue(Generic[T]):

autogpt_platform/backend/backend/data/graph.py

Lines changed: 52 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,13 @@
6161
logger = logging.getLogger(__name__)
6262

6363

64+
class GraphMetadata(BaseDbModel):
65+
"""Metadata for AgentGraph configuration options."""
66+
67+
safe_mode: bool = True # Default to safe mode enabled
68+
has_human_in_the_loop: bool = False # Whether graph contains HITL blocks
69+
70+
6471
class Link(BaseDbModel):
6572
source_id: str
6673
sink_id: str
@@ -195,6 +202,7 @@ class BaseGraph(BaseDbModel):
195202
links: list[Link] = []
196203
forked_from_id: str | None = None
197204
forked_from_version: int | None = None
205+
metadata: GraphMetadata = GraphMetadata()
198206

199207
@computed_field
200208
@property
@@ -385,7 +393,6 @@ def aggregate_credentials_inputs(
385393
field_name,
386394
field_info,
387395
) in node.block.input_schema.get_credentials_fields_info().items():
388-
389396
discriminator = field_info.discriminator
390397
if not discriminator:
391398
node_credential_data.append((field_info, (node.id, field_name)))
@@ -736,6 +743,9 @@ def from_db(
736743
description=graph.description or "",
737744
instructions=graph.instructions,
738745
recommended_schedule_cron=graph.recommendedScheduleCron,
746+
metadata=GraphMetadata.model_validate(
747+
graph.metadata if graph.metadata else {}
748+
),
739749
nodes=[NodeModel.from_db(node, for_export) for node in graph.Nodes or []],
740750
links=list(
741751
{
@@ -1232,6 +1242,13 @@ async def fork_graph(graph_id: str, graph_version: int, user_id: str) -> GraphMo
12321242
async def __create_graph(tx, graph: Graph, user_id: str):
12331243
graphs = [graph] + graph.sub_graphs
12341244

1245+
# Auto-detect HITL blocks for metadata
1246+
for g in graphs:
1247+
has_hitl = any(
1248+
node.block_id == "8b2a7b3c-6e9d-4a5f-8c1b-2e3f4a5b6c7d" for node in g.nodes
1249+
)
1250+
g.metadata.has_human_in_the_loop = has_hitl
1251+
12351252
await AgentGraph.prisma(tx).create_many(
12361253
data=[
12371254
AgentGraphCreateInput(
@@ -1244,6 +1261,7 @@ async def __create_graph(tx, graph: Graph, user_id: str):
12441261
userId=user_id,
12451262
forkedFromId=graph.forked_from_id,
12461263
forkedFromVersion=graph.forked_from_version,
1264+
metadata=SafeJson(graph.metadata.model_dump()),
12471265
)
12481266
for graph in graphs
12491267
]
@@ -1280,6 +1298,39 @@ async def __create_graph(tx, graph: Graph, user_id: str):
12801298
)
12811299

12821300

1301+
async def update_graph_metadata(
1302+
graph_id: str, version: int, metadata: GraphMetadata, user_id: str
1303+
) -> GraphModel:
1304+
# First verify user has access
1305+
existing = await db.agentgraph.find_first(
1306+
where={
1307+
"id": graph_id,
1308+
"version": version,
1309+
"userId": user_id,
1310+
}
1311+
)
1312+
if not existing:
1313+
raise Exception(f"Graph #{graph_id} v{version} not found or not owned by user")
1314+
1315+
updated_graph = await AgentGraph.prisma().update(
1316+
where={
1317+
"graphVersionId": {
1318+
"id": graph_id,
1319+
"version": version,
1320+
},
1321+
},
1322+
data={
1323+
"metadata": SafeJson(metadata.model_dump()),
1324+
},
1325+
include=AGENT_GRAPH_INCLUDE,
1326+
)
1327+
1328+
if not updated_graph:
1329+
raise Exception(f"Failed to update graph #{graph_id} v{version} metadata")
1330+
1331+
return GraphModel.from_db(updated_graph)
1332+
1333+
12831334
# ------------------------ UTILITIES ------------------------ #
12841335

12851336

autogpt_platform/backend/backend/data/human_review.py

Lines changed: 6 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -121,27 +121,17 @@ async def get_or_create_human_review(
121121
if review.processed:
122122
return None
123123

124-
if review.status == ReviewStatus.APPROVED:
125-
# Return the approved review result
126-
return ReviewResult(
127-
data=review.payload,
128-
status=ReviewStatus.APPROVED,
129-
message=review.reviewMessage or "",
130-
processed=review.processed,
131-
node_exec_id=review.nodeExecId,
132-
)
133-
elif review.status == ReviewStatus.REJECTED:
134-
# Return the rejected review result
124+
# If pending, return None to continue waiting, otherwise return the review result
125+
if review.status == ReviewStatus.WAITING:
126+
return None
127+
else:
135128
return ReviewResult(
136-
data=None,
137-
status=ReviewStatus.REJECTED,
129+
data=review.payload if review.status == ReviewStatus.APPROVED else None,
130+
status=review.status,
138131
message=review.reviewMessage or "",
139132
processed=review.processed,
140133
node_exec_id=review.nodeExecId,
141134
)
142-
else:
143-
# Review is pending - return None to continue waiting
144-
return None
145135

146136

147137
async def has_pending_reviews_for_graph_exec(graph_exec_id: str) -> bool:

autogpt_platform/backend/backend/executor/database.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
get_block_error_stats,
1010
get_child_graph_executions,
1111
get_execution_kv_data,
12+
get_graph_execution,
1213
get_graph_execution_meta,
1314
get_graph_executions,
1415
get_graph_executions_count,
@@ -130,6 +131,7 @@ def _(
130131
get_child_graph_executions = _(get_child_graph_executions)
131132
get_graph_executions = _(get_graph_executions)
132133
get_graph_executions_count = _(get_graph_executions_count)
134+
get_graph_execution = _(get_graph_execution)
133135
get_graph_execution_meta = _(get_graph_execution_meta)
134136
create_graph_execution = _(create_graph_execution)
135137
get_node_execution = _(get_node_execution)
@@ -254,6 +256,7 @@ def get_service_type(cls):
254256
get_latest_node_execution = d.get_latest_node_execution
255257
get_graph = d.get_graph
256258
get_graph_metadata = d.get_graph_metadata
259+
get_graph_execution = d.get_graph_execution
257260
get_graph_execution_meta = d.get_graph_execution_meta
258261
get_node = d.get_node
259262
get_node_execution = d.get_node_execution

0 commit comments

Comments
 (0)