Skip to content

Commit 187d288

Browse files
Add dim order assert (#7660)
Add dim order assert (#7561) * Add dim order assert for MTK backend * Add dim order guard for mediatek backend AOT part * Fix lintrunner error * Remove unnecessary code --------- Co-authored-by: neuropilot-captain <[email protected]> Co-authored-by: Poyuan Jeng <[email protected]> (cherry picked from commit cd0fcc2) Co-authored-by: neuropilot-captain <[email protected]>
1 parent 8d37805 commit 187d288

File tree

3 files changed

+34
-0
lines changed

3 files changed

+34
-0
lines changed

backends/mediatek/partitioner.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,11 @@ def is_node_supported(self, _, node: torch.fx.Node) -> bool:
4444
return False
4545

4646
op_type = node.target.__name__
47+
48+
# Skip until we can handle the dimension order representation
49+
if op_type == "aten._to_copy.default":
50+
return False
51+
4752
if op_type in self._op_types_to_skip or node.name in self._op_names_to_skip:
4853
print(
4954
f"[Neuropilot Backend] The {op_type} operator with name '{node.name}' is skipped."

backends/mediatek/preprocess.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,23 @@
2222
SKIP_COMPILE_SPEC_KEYS = {"ImportForever"}
2323

2424

25+
def assert_default_dim_order(edge_graph_module: torch.fx.GraphModule) -> None:
26+
for node in edge_graph_module.graph.nodes:
27+
if node.op != "placeholder":
28+
continue
29+
30+
# We expect the default dim order for all tensor-like inputs i.e. inputs, buffers, and params
31+
t = node.meta.get("val", None)
32+
if t is not None and getattr(t, "dim_order", None) is not None:
33+
default_dim_order = tuple(range(t.dim()))
34+
if t.dim_order() != default_dim_order:
35+
raise RuntimeError(
36+
f"Neuropilot backend only supports contiguous memory format for inputs."
37+
f"Expecting dim_order: {default_dim_order}, but got "
38+
f"{node.meta['val'].dim_order()} for a placeholder node {node}."
39+
)
40+
41+
2542
@final
2643
class NeuropilotBackend(BackendDetails):
2744

@@ -30,6 +47,9 @@ def preprocess(
3047
cls, edge_program: ExportedProgram, module_compile_spec: List[CompileSpec]
3148
) -> PreprocessResult:
3249

50+
# Make sure all inputs are contiguous_format or NCHW or default dim order
51+
assert_default_dim_order(edge_program.graph_module)
52+
3353
name_to_node_mappings = {node.name: node for node in edge_program.graph.nodes}
3454
input_names = edge_program.graph_signature.user_inputs
3555
output_names = edge_program.graph_signature.user_outputs

backends/mediatek/runtime/NeuronBackend.cpp

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
#include "api/NeuronAdapter.h"
1414

1515
#include "executorch/runtime/core/error.h"
16+
#include "executorch/runtime/core/exec_aten/util/dim_order_util.h"
1617

1718
#include <algorithm>
1819
#include <memory>
@@ -111,6 +112,14 @@ Error NeuronExecuTorchDelegate::execute(
111112
size_t inputCount = mInputSizes.size(), outputCount = mOutputSizes.size();
112113

113114
for (int i = 0; i < inputCount; i++) {
115+
auto tensor_in = args[i]->toTensor();
116+
ET_CHECK_OR_RETURN_ERROR(
117+
runtime::is_contiguous_dim_order(
118+
tensor_in.dim_order().data(), tensor_in.dim()),
119+
Internal,
120+
"Expecting default dim_order but got a non default dim_order tensor for external input %u",
121+
i);
122+
114123
auto data_ptr = args[i]->toTensor().data_ptr();
115124
auto data_size = args[i]->toTensor().nbytes();
116125
if (IsCached</*isInput=*/true>(i, data_ptr)) {

0 commit comments

Comments
 (0)