Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion backends/mediatek/partitioner.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def is_node_supported(self, _, node: torch.fx.Node) -> bool:
op_type = node.target.__name__

# Skip until we can handle the dimension order representation
if op_type == 'aten._to_copy.default':
if op_type == "aten._to_copy.default":
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I assume you don't partition empty - that is another one of the dim_order ops. Just FYI.

return False

if op_type in self._op_types_to_skip or node.name in self._op_names_to_skip:
Expand Down
2 changes: 1 addition & 1 deletion backends/mediatek/preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def preprocess(
) -> PreprocessResult:

# Make sure all inputs are contiguous_format or NCHW or default dim order
print('here')
print("here")
assert_default_dim_order(edge_program.graph_module)

name_to_node_mappings = {node.name: node for node in edge_program.graph.nodes}
Expand Down
3 changes: 2 additions & 1 deletion backends/mediatek/runtime/NeuronBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,8 @@ Error NeuronExecuTorchDelegate::execute(
for (int i = 0; i < inputCount; i++) {
auto tensor_in = args[i]->toTensor();
ET_CHECK_OR_RETURN_ERROR(
runtime::is_contiguous_dim_order(tensor_in.dim_order().data(), tensor_in.dim()),
runtime::is_contiguous_dim_order(
tensor_in.dim_order().data(), tensor_in.dim()),
Internal,
"Expecting default dim_order but got a non default dim_order tensor for external input %u",
i);
Expand Down
Loading