Skip to content

Commit 8c4899b

Browse files
committed
Merge branch 'dev' into kpczerwinski/secrt-1726-move-completion-backend
2 parents 49e8afc + 07b5fe8 commit 8c4899b

File tree

54 files changed

+1442
-515
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

54 files changed

+1442
-515
lines changed

autogpt_platform/backend/backend/blocks/llm.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,7 @@ class LlmModel(str, Enum, metaclass=LlmModelMeta):
129129
OPENAI_GPT_OSS_120B = "openai/gpt-oss-120b"
130130
OPENAI_GPT_OSS_20B = "openai/gpt-oss-20b"
131131
GEMINI_2_5_PRO = "google/gemini-2.5-pro-preview-03-25"
132+
GEMINI_3_PRO_PREVIEW = "google/gemini-3-pro-preview"
132133
GEMINI_2_5_FLASH = "google/gemini-2.5-flash"
133134
GEMINI_2_0_FLASH = "google/gemini-2.0-flash-001"
134135
GEMINI_2_5_FLASH_LITE_PREVIEW = "google/gemini-2.5-flash-lite-preview-06-17"
@@ -241,6 +242,7 @@ def max_output_tokens(self) -> int | None:
241242
LlmModel.OLLAMA_DOLPHIN: ModelMetadata("ollama", 32768, None),
242243
# https://openrouter.ai/models
243244
LlmModel.GEMINI_2_5_PRO: ModelMetadata("open_router", 1050000, 8192),
245+
LlmModel.GEMINI_3_PRO_PREVIEW: ModelMetadata("open_router", 1048576, 65535),
244246
LlmModel.GEMINI_2_5_FLASH: ModelMetadata("open_router", 1048576, 65535),
245247
LlmModel.GEMINI_2_0_FLASH: ModelMetadata("open_router", 1048576, 8192),
246248
LlmModel.GEMINI_2_5_FLASH_LITE_PREVIEW: ModelMetadata(
@@ -252,12 +254,12 @@ def max_output_tokens(self) -> int | None:
252254
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: ModelMetadata("open_router", 128000, 4096),
253255
LlmModel.DEEPSEEK_CHAT: ModelMetadata("open_router", 64000, 2048),
254256
LlmModel.DEEPSEEK_R1_0528: ModelMetadata("open_router", 163840, 163840),
255-
LlmModel.PERPLEXITY_SONAR: ModelMetadata("open_router", 127000, 127000),
257+
LlmModel.PERPLEXITY_SONAR: ModelMetadata("open_router", 127000, 8000),
256258
LlmModel.PERPLEXITY_SONAR_PRO: ModelMetadata("open_router", 200000, 8000),
257259
LlmModel.PERPLEXITY_SONAR_DEEP_RESEARCH: ModelMetadata(
258260
"open_router",
259261
128000,
260-
128000,
262+
16000,
261263
),
262264
LlmModel.NOUSRESEARCH_HERMES_3_LLAMA_3_1_405B: ModelMetadata(
263265
"open_router", 131000, 4096
@@ -797,7 +799,7 @@ class Input(BlockSchemaInput):
797799
default="",
798800
description="The system prompt to provide additional context to the model.",
799801
)
800-
conversation_history: list[dict] = SchemaField(
802+
conversation_history: list[dict] | None = SchemaField(
801803
default_factory=list,
802804
description="The conversation history to provide context for the prompt.",
803805
)
@@ -904,7 +906,7 @@ async def run(
904906
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
905907
) -> BlockOutput:
906908
logger.debug(f"Calling LLM with input data: {input_data}")
907-
prompt = [json.to_dict(p) for p in input_data.conversation_history]
909+
prompt = [json.to_dict(p) for p in input_data.conversation_history or [] if p]
908910

909911
values = input_data.prompt_values
910912
if values:

autogpt_platform/backend/backend/blocks/smart_decision_maker.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -121,13 +121,16 @@ def _convert_raw_response_to_dict(raw_response: Any) -> dict[str, Any]:
121121
return json.to_dict(raw_response)
122122

123123

124-
def get_pending_tool_calls(conversation_history: list[Any]) -> dict[str, int]:
124+
def get_pending_tool_calls(conversation_history: list[Any] | None) -> dict[str, int]:
125125
"""
126126
All the tool calls entry in the conversation history requires a response.
127127
This function returns the pending tool calls that has not generated an output yet.
128128
129129
Return: dict[str, int] - A dictionary of pending tool call IDs with their count.
130130
"""
131+
if not conversation_history:
132+
return {}
133+
131134
pending_calls = Counter()
132135
for history in conversation_history:
133136
for call_id in _get_tool_requests(history):
@@ -173,7 +176,7 @@ class Input(BlockSchemaInput):
173176
"Function parameters that has no default value and not optional typed has to be provided. ",
174177
description="The system prompt to provide additional context to the model.",
175178
)
176-
conversation_history: list[dict] = SchemaField(
179+
conversation_history: list[dict] | None = SchemaField(
177180
default_factory=list,
178181
description="The conversation history to provide context for the prompt.",
179182
)
@@ -605,10 +608,10 @@ async def run(
605608
tool_functions = await self._create_tool_node_signatures(node_id)
606609
yield "tool_functions", json.dumps(tool_functions)
607610

608-
input_data.conversation_history = input_data.conversation_history or []
609-
prompt = [json.to_dict(p) for p in input_data.conversation_history if p]
611+
conversation_history = input_data.conversation_history or []
612+
prompt = [json.to_dict(p) for p in conversation_history if p]
610613

611-
pending_tool_calls = get_pending_tool_calls(input_data.conversation_history)
614+
pending_tool_calls = get_pending_tool_calls(conversation_history)
612615
if pending_tool_calls and input_data.last_tool_output is None:
613616
raise ValueError(f"Tool call requires an output for {pending_tool_calls}")
614617

autogpt_platform/backend/backend/data/block_cost_config.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@
9292
LlmModel.OPENAI_GPT_OSS_120B: 1,
9393
LlmModel.OPENAI_GPT_OSS_20B: 1,
9494
LlmModel.GEMINI_2_5_PRO: 4,
95+
LlmModel.GEMINI_3_PRO_PREVIEW: 5,
9596
LlmModel.MISTRAL_NEMO: 1,
9697
LlmModel.COHERE_COMMAND_R_08_2024: 1,
9798
LlmModel.COHERE_COMMAND_R_PLUS_08_2024: 3,

autogpt_platform/backend/backend/data/execution.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -460,6 +460,7 @@ def to_node_execution_entry(
460460
async def get_graph_executions(
461461
graph_exec_id: Optional[str] = None,
462462
graph_id: Optional[str] = None,
463+
graph_version: Optional[int] = None,
463464
user_id: Optional[str] = None,
464465
statuses: Optional[list[ExecutionStatus]] = None,
465466
created_time_gte: Optional[datetime] = None,
@@ -476,6 +477,8 @@ async def get_graph_executions(
476477
where_filter["userId"] = user_id
477478
if graph_id:
478479
where_filter["agentGraphId"] = graph_id
480+
if graph_version is not None:
481+
where_filter["agentGraphVersion"] = graph_version
479482
if created_time_gte or created_time_lte:
480483
where_filter["createdAt"] = {
481484
"gte": created_time_gte or datetime.min.replace(tzinfo=timezone.utc),

autogpt_platform/backend/backend/data/graph.py

Lines changed: 36 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
AgentGraphWhereInput,
1919
AgentNodeCreateInput,
2020
AgentNodeLinkCreateInput,
21+
StoreListingVersionWhereInput,
2122
)
2223
from pydantic import BaseModel, Field, create_model
2324
from pydantic.fields import computed_field
@@ -884,9 +885,9 @@ async def get_graph_metadata(graph_id: str, version: int | None = None) -> Graph
884885

885886
async def get_graph(
886887
graph_id: str,
887-
version: int | None = None,
888+
version: int | None,
889+
user_id: str | None,
888890
*,
889-
user_id: str | None = None,
890891
for_export: bool = False,
891892
include_subgraphs: bool = False,
892893
skip_access_check: bool = False,
@@ -897,25 +898,43 @@ async def get_graph(
897898
898899
Returns `None` if the record is not found.
899900
"""
900-
where_clause: AgentGraphWhereInput = {
901-
"id": graph_id,
902-
}
901+
graph = None
903902

904-
if version is not None:
905-
where_clause["version"] = version
903+
# Only search graph directly on owned graph (or access check is skipped)
904+
if skip_access_check or user_id is not None:
905+
graph_where_clause: AgentGraphWhereInput = {
906+
"id": graph_id,
907+
}
908+
if version is not None:
909+
graph_where_clause["version"] = version
910+
if not skip_access_check and user_id is not None:
911+
graph_where_clause["userId"] = user_id
906912

907-
graph = await AgentGraph.prisma().find_first(
908-
where=where_clause,
909-
include=AGENT_GRAPH_INCLUDE,
910-
order={"version": "desc"},
911-
)
913+
graph = await AgentGraph.prisma().find_first(
914+
where=graph_where_clause,
915+
include=AGENT_GRAPH_INCLUDE,
916+
order={"version": "desc"},
917+
)
918+
919+
# Use store listed graph to find not owned graph
912920
if graph is None:
913-
return None
921+
store_where_clause: StoreListingVersionWhereInput = {
922+
"agentGraphId": graph_id,
923+
"submissionStatus": SubmissionStatus.APPROVED,
924+
"isDeleted": False,
925+
}
926+
if version is not None:
927+
store_where_clause["agentGraphVersion"] = version
914928

915-
if not skip_access_check and graph.userId != user_id:
916-
# For access, the graph must be owned by the user or listed in the store
917-
if not await is_graph_published_in_marketplace(graph_id, graph.version):
918-
return None
929+
if store_listing := await StoreListingVersion.prisma().find_first(
930+
where=store_where_clause,
931+
order={"agentGraphVersion": "desc"},
932+
include={"AgentGraph": {"include": AGENT_GRAPH_INCLUDE}},
933+
):
934+
graph = store_listing.AgentGraph
935+
936+
if graph is None:
937+
return None
919938

920939
if include_subgraphs or for_export:
921940
sub_graphs = await get_sub_graphs(graph)

0 commit comments

Comments
 (0)