Skip to content

Commit a666c36

Browse files
authored
Improve error message returned in case of memory planning failure.
Differential Revision: D78047056 Pull Request resolved: #12403
1 parent 890a3ab commit a666c36

File tree

2 files changed

+14
-3
lines changed

2 files changed

+14
-3
lines changed

backends/cadence/aot/memory_planning.py

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,10 @@
1919
MemoryPlanningAlgo,
2020
MemoryPlanningState,
2121
)
22-
from executorch.backends.cadence.aot.utils import MemoryConfig
22+
from executorch.backends.cadence.aot.utils import (
23+
MemoryConfig,
24+
MemoryPlanningAlgoFailure,
25+
)
2326

2427
from executorch.exir import ExecutorchProgramManager
2528
from executorch.exir.memory_planning import collect_specs_from_nodes, Verifier
@@ -95,7 +98,9 @@ def plan(
9598
):
9699
self.plan_spec(spec, state, placement_constraints)
97100
if not state.is_placed(spec):
98-
raise MemoryError(f"Cannot fit {spec} in any memory hierarchy")
101+
raise MemoryPlanningAlgoFailure(
102+
f"Cannot fit {spec} {spec.allocated_memory=} in any memory hierarchy for {self.memory_config}"
103+
)
99104

100105

101106
class GreedyWithHeuristic(MemoryPlanningAlgo):
@@ -169,7 +174,9 @@ def plan(
169174
):
170175
self.plan_spec(spec, state, placement_constraints)
171176
if not state.is_placed(spec):
172-
raise MemoryError(f"Cannot fit {spec} in any memory hierarchy")
177+
raise MemoryPlanningAlgoFailure(
178+
f"Cannot fit {spec} in any memory hierarchy for {self.memory_config}"
179+
)
173180

174181
logging.debug(
175182
f"greedy by size for offset calculation with hierarchy returns bufsizes: {state.bufsizes}"

backends/cadence/aot/utils.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,10 @@
2525
from torch.utils._pytree import tree_flatten
2626

2727

28+
class MemoryPlanningAlgoFailure(Exception):
29+
pass
30+
31+
2832
# Get the output size of a 1D convolution given the input size and parameters
2933
def get_conv1d_output_size(
3034
in_size: torch.Size,

0 commit comments

Comments
 (0)