Skip to content

Commit 2d4595f

Browse files
committed
toggle implementation
1 parent 3d08c22 commit 2d4595f

File tree

38 files changed

+908
-382
lines changed

38 files changed

+908
-382
lines changed

autogpt_platform/backend/backend/blocks/agent.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
BlockType,
1212
get_block,
1313
)
14-
from backend.data.execution import ExecutionStatus, NodesInputMasks
14+
from backend.data.execution import ExecutionContext, ExecutionStatus, NodesInputMasks
1515
from backend.data.model import NodeExecutionStats, SchemaField
1616
from backend.util.json import validate_with_jsonschema
1717
from backend.util.retry import func_retry
@@ -72,9 +72,9 @@ async def run(
7272
input_data: Input,
7373
*,
7474
graph_exec_id: str,
75+
execution_context: ExecutionContext,
7576
**kwargs,
7677
) -> BlockOutput:
77-
7878
from backend.executor import utils as execution_utils
7979

8080
graph_exec = await execution_utils.add_graph_execution(
@@ -83,8 +83,9 @@ async def run(
8383
user_id=input_data.user_id,
8484
inputs=input_data.inputs,
8585
nodes_input_masks=input_data.nodes_input_masks,
86-
parent_graph_exec_id=graph_exec_id,
87-
is_sub_graph=True, # AgentExecutorBlock executions are always sub-graphs
86+
execution_context=execution_context.model_copy(
87+
update={"parent_execution_id": graph_exec_id},
88+
),
8889
)
8990

9091
logger = execution_utils.LogMetadata(

autogpt_platform/backend/backend/blocks/human_in_the_loop.py

Lines changed: 32 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,9 @@
99
BlockOutput,
1010
BlockSchemaInput,
1111
BlockSchemaOutput,
12+
BlockType,
1213
)
13-
from backend.data.execution import ExecutionStatus
14+
from backend.data.execution import ExecutionContext, ExecutionStatus
1415
from backend.data.human_review import ReviewResult
1516
from backend.data.model import SchemaField
1617
from backend.executor.manager import async_update_node_execution_status
@@ -61,15 +62,15 @@ def __init__(self):
6162
categories={BlockCategory.BASIC},
6263
input_schema=HumanInTheLoopBlock.Input,
6364
output_schema=HumanInTheLoopBlock.Output,
65+
block_type=BlockType.HUMAN_IN_THE_LOOP,
6466
test_input={
6567
"data": {"name": "John Doe", "age": 30},
6668
"name": "User profile data",
6769
"editable": True,
6870
},
6971
test_output=[
70-
("reviewed_data", {"name": "John Doe", "age": 30}),
7172
("status", "approved"),
72-
("review_message", ""),
73+
("reviewed_data", {"name": "John Doe", "age": 30}),
7374
],
7475
test_mock={
7576
"get_or_create_human_review": lambda *_args, **_kwargs: ReviewResult(
@@ -80,9 +81,25 @@ def __init__(self):
8081
node_exec_id="test-node-exec-id",
8182
),
8283
"update_node_execution_status": lambda *_args, **_kwargs: None,
84+
"update_review_processed_status": lambda *_args, **_kwargs: None,
8385
},
8486
)
8587

88+
async def get_or_create_human_review(self, **kwargs):
89+
return await get_database_manager_async_client().get_or_create_human_review(
90+
**kwargs
91+
)
92+
93+
async def update_node_execution_status(self, **kwargs):
94+
return await async_update_node_execution_status(
95+
db_client=get_database_manager_async_client(), **kwargs
96+
)
97+
98+
async def update_review_processed_status(self, node_exec_id: str, processed: bool):
99+
return await get_database_manager_async_client().update_review_processed_status(
100+
node_exec_id, processed
101+
)
102+
86103
async def run(
87104
self,
88105
input_data: Input,
@@ -92,20 +109,20 @@ async def run(
92109
graph_exec_id: str,
93110
graph_id: str,
94111
graph_version: int,
112+
execution_context: ExecutionContext,
95113
**kwargs,
96114
) -> BlockOutput:
97-
"""
98-
Execute the Human In The Loop block.
115+
if not execution_context.safe_mode:
116+
logger.info(
117+
f"HITL block skipping review for node {node_exec_id} - safe mode disabled"
118+
)
119+
yield "status", "approved"
120+
yield "reviewed_data", input_data.data
121+
yield "review_message", "Auto-approved (safe mode disabled)"
122+
return
99123

100-
This method uses one function to handle the complete workflow - checking existing reviews
101-
and creating pending ones as needed.
102-
"""
103124
try:
104-
logger.debug(f"HITL block executing for node {node_exec_id}")
105-
106-
# Use the data layer to handle the complete workflow
107-
db_client = get_database_manager_async_client()
108-
result = await db_client.get_or_create_human_review(
125+
result = await self.get_or_create_human_review(
109126
user_id=user_id,
110127
node_exec_id=node_exec_id,
111128
graph_exec_id=graph_exec_id,
@@ -119,32 +136,24 @@ async def run(
119136
logger.error(f"Error in HITL block for node {node_exec_id}: {str(e)}")
120137
raise
121138

122-
# Check if we're waiting for human input
123139
if result is None:
124140
logger.info(
125141
f"HITL block pausing execution for node {node_exec_id} - awaiting human review"
126142
)
127143
try:
128-
# Set node status to REVIEW so execution manager can't mark it as COMPLETED
129-
# The VALID_STATUS_TRANSITIONS will then prevent any unwanted status changes
130-
# Use the proper wrapper function to ensure websocket events are published
131-
await async_update_node_execution_status(
132-
db_client=db_client,
144+
await self.update_node_execution_status(
133145
exec_id=node_exec_id,
134146
status=ExecutionStatus.REVIEW,
135147
)
136-
# Execution pauses here until API routes process the review
137148
return
138149
except Exception as e:
139150
logger.error(
140151
f"Failed to update node status for HITL block {node_exec_id}: {str(e)}"
141152
)
142153
raise
143154

144-
# Review is complete (approved or rejected) - check if unprocessed
145155
if not result.processed:
146-
# Mark as processed before yielding
147-
await db_client.update_review_processed_status(
156+
await self.update_review_processed_status(
148157
node_exec_id=node_exec_id, processed=True
149158
)
150159

autogpt_platform/backend/backend/blocks/time_blocks.py

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
BlockSchemaInput,
1515
BlockSchemaOutput,
1616
)
17-
from backend.data.execution import UserContext
17+
from backend.data.execution import ExecutionContext
1818
from backend.data.model import SchemaField
1919

2020
# Shared timezone literal type for all time/date blocks
@@ -188,10 +188,9 @@ def __init__(self):
188188
)
189189

190190
async def run(
191-
self, input_data: Input, *, user_context: UserContext, **kwargs
191+
self, input_data: Input, *, execution_context: ExecutionContext, **kwargs
192192
) -> BlockOutput:
193-
# Extract timezone from user_context (always present)
194-
effective_timezone = user_context.timezone
193+
effective_timezone = execution_context.user_timezone
195194

196195
# Get the appropriate timezone
197196
tz = _get_timezone(input_data.format_type, effective_timezone)
@@ -298,10 +297,10 @@ def __init__(self):
298297
],
299298
)
300299

301-
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
302-
# Extract timezone from user_context (required keyword argument)
303-
user_context: UserContext = kwargs["user_context"]
304-
effective_timezone = user_context.timezone
300+
async def run(
301+
self, input_data: Input, *, execution_context: ExecutionContext, **kwargs
302+
) -> BlockOutput:
303+
effective_timezone = execution_context.user_timezone
305304

306305
try:
307306
offset = int(input_data.offset)
@@ -404,10 +403,10 @@ def __init__(self):
404403
],
405404
)
406405

407-
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
408-
# Extract timezone from user_context (required keyword argument)
409-
user_context: UserContext = kwargs["user_context"]
410-
effective_timezone = user_context.timezone
406+
async def run(
407+
self, input_data: Input, *, execution_context: ExecutionContext, **kwargs
408+
) -> BlockOutput:
409+
effective_timezone = execution_context.user_timezone
411410

412411
# Get the appropriate timezone
413412
tz = _get_timezone(input_data.format_type, effective_timezone)

autogpt_platform/backend/backend/data/block.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@ class BlockType(Enum):
7171
AGENT = "Agent"
7272
AI = "AI"
7373
AYRSHARE = "Ayrshare"
74+
HUMAN_IN_THE_LOOP = "Human In The Loop"
7475

7576

7677
class BlockCategory(Enum):
@@ -796,3 +797,12 @@ def get_io_block_ids() -> Sequence[str]:
796797
for id, B in get_blocks().items()
797798
if B().block_type in (BlockType.INPUT, BlockType.OUTPUT)
798799
]
800+
801+
802+
@cached(ttl_seconds=3600)
803+
def get_human_in_the_loop_block_ids() -> Sequence[str]:
804+
return [
805+
id
806+
for id, B in get_blocks().items()
807+
if B().block_type == BlockType.HUMAN_IN_THE_LOOP
808+
]

autogpt_platform/backend/backend/data/credit_test.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from backend.blocks.llm import AITextGeneratorBlock
88
from backend.data.block import get_block
99
from backend.data.credit import BetaUserCredit, UsageTransactionMetadata
10-
from backend.data.execution import NodeExecutionEntry, UserContext
10+
from backend.data.execution import ExecutionContext, NodeExecutionEntry
1111
from backend.data.user import DEFAULT_USER_ID
1212
from backend.executor.utils import block_usage_cost
1313
from backend.integrations.credentials_store import openai_credentials
@@ -86,7 +86,7 @@ async def test_block_credit_usage(server: SpinTestServer):
8686
"type": openai_credentials.type,
8787
},
8888
},
89-
user_context=UserContext(timezone="UTC"),
89+
execution_context=ExecutionContext(user_timezone="UTC"),
9090
),
9191
)
9292
assert spending_amount_1 > 0
@@ -101,7 +101,7 @@ async def test_block_credit_usage(server: SpinTestServer):
101101
node_exec_id="test_node_exec",
102102
block_id=AITextGeneratorBlock().id,
103103
inputs={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
104-
user_context=UserContext(timezone="UTC"),
104+
execution_context=ExecutionContext(user_timezone="UTC"),
105105
),
106106
)
107107
assert spending_amount_2 == 0

autogpt_platform/backend/backend/data/execution.py

Lines changed: 18 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,18 @@
7171
config = Config()
7272

7373

74+
class ExecutionContext(BaseModel):
75+
"""
76+
Unified context that carries execution-level data throughout the entire execution flow.
77+
This includes information needed by blocks, sub-graphs, and execution management.
78+
"""
79+
80+
safe_mode: bool = True
81+
user_timezone: str = "UTC"
82+
root_execution_id: Optional[str] = None
83+
parent_execution_id: Optional[str] = None
84+
85+
7486
# -------------------------- Models -------------------------- #
7587

7688

@@ -365,18 +377,16 @@ def from_db(_graph_exec: AgentGraphExecution):
365377

366378
def to_graph_execution_entry(
367379
self,
368-
user_context: "UserContext",
380+
execution_context: ExecutionContext,
369381
compiled_nodes_input_masks: Optional[NodesInputMasks] = None,
370-
parent_graph_exec_id: Optional[str] = None,
371382
):
372383
return GraphExecutionEntry(
373384
user_id=self.user_id,
374385
graph_id=self.graph_id,
375386
graph_version=self.graph_version or 0,
376387
graph_exec_id=self.id,
377388
nodes_input_masks=compiled_nodes_input_masks,
378-
user_context=user_context,
379-
parent_graph_exec_id=parent_graph_exec_id,
389+
execution_context=execution_context,
380390
)
381391

382392

@@ -449,7 +459,7 @@ def from_db(_node_exec: AgentNodeExecution, user_id: Optional[str] = None):
449459
)
450460

451461
def to_node_execution_entry(
452-
self, user_context: "UserContext"
462+
self, execution_context: ExecutionContext
453463
) -> "NodeExecutionEntry":
454464
return NodeExecutionEntry(
455465
user_id=self.user_id,
@@ -460,7 +470,7 @@ def to_node_execution_entry(
460470
node_id=self.node_id,
461471
block_id=self.block_id,
462472
inputs=self.input_data,
463-
user_context=user_context,
473+
execution_context=execution_context,
464474
)
465475

466476

@@ -1099,20 +1109,13 @@ async def get_latest_node_execution(
10991109
# ----------------- Execution Infrastructure ----------------- #
11001110

11011111

1102-
class UserContext(BaseModel):
1103-
"""Generic user context for graph execution containing user-specific settings."""
1104-
1105-
timezone: str
1106-
1107-
11081112
class GraphExecutionEntry(BaseModel):
11091113
user_id: str
11101114
graph_exec_id: str
11111115
graph_id: str
11121116
graph_version: int
11131117
nodes_input_masks: Optional[NodesInputMasks] = None
1114-
user_context: UserContext
1115-
parent_graph_exec_id: Optional[str] = None
1118+
execution_context: ExecutionContext
11161119

11171120

11181121
class NodeExecutionEntry(BaseModel):
@@ -1124,7 +1127,7 @@ class NodeExecutionEntry(BaseModel):
11241127
node_id: str
11251128
block_id: str
11261129
inputs: BlockInput
1127-
user_context: UserContext
1130+
execution_context: ExecutionContext
11281131

11291132

11301133
class ExecutionQueue(Generic[T]):

autogpt_platform/backend/backend/data/graph.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,10 @@
6161
logger = logging.getLogger(__name__)
6262

6363

64+
class GraphSettings(BaseModel):
65+
human_in_the_loop_safe_mode: bool | None = None
66+
67+
6468
class Link(BaseDbModel):
6569
source_id: str
6670
sink_id: str
@@ -225,6 +229,15 @@ def output_schema(self) -> dict[str, Any]:
225229
def has_external_trigger(self) -> bool:
226230
return self.webhook_input_node is not None
227231

232+
@computed_field
233+
@property
234+
def has_human_in_the_loop(self) -> bool:
235+
return any(
236+
node.block_id
237+
for node in self.nodes
238+
if node.block.block_type == BlockType.HUMAN_IN_THE_LOOP
239+
)
240+
228241
@property
229242
def webhook_input_node(self) -> Node | None:
230243
return next(
@@ -1105,6 +1118,28 @@ async def delete_graph(graph_id: str, user_id: str) -> int:
11051118
return entries_count
11061119

11071120

1121+
async def get_graph_settings(user_id: str, graph_id: str) -> GraphSettings:
1122+
lib = await LibraryAgent.prisma().find_first(
1123+
where={
1124+
"userId": user_id,
1125+
"agentGraphId": graph_id,
1126+
"isDeleted": False,
1127+
"isArchived": False,
1128+
},
1129+
order={"agentGraphVersion": "desc"},
1130+
)
1131+
if not lib or not lib.settings:
1132+
return GraphSettings()
1133+
1134+
try:
1135+
return GraphSettings.model_validate(lib.settings)
1136+
except Exception:
1137+
logger.warning(
1138+
f"Malformed settings for LibraryAgent user={user_id} graph={graph_id}"
1139+
)
1140+
return GraphSettings()
1141+
1142+
11081143
async def validate_graph_execution_permissions(
11091144
user_id: str, graph_id: str, graph_version: int, is_sub_graph: bool = False
11101145
) -> None:

0 commit comments

Comments
 (0)