Skip to content

Commit 80250f8

Browse files
NXP backend: Improve target support checks. (#13367)
### Summary Improve target specific checks for operator support on Neutron. ### Test plan Almost all tests utilize the updated functionality, and some unit-tests were updated to reflect the new implementation.
1 parent 49805dd commit 80250f8

24 files changed

+222
-235
lines changed

backends/nxp/backend/ir/converter/node_converter.py

Lines changed: 13 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,10 @@
1-
# Copyright 2024 NXP
1+
# Copyright 2024-2025 NXP
22
#
33
# This source code is licensed under the BSD-style license found in the
44
# LICENSE file in the root directory of this source tree.
55

66
from abc import ABC, abstractmethod
77
from enum import Enum
8-
from typing import Collection
98

109
import torch
1110

@@ -53,7 +52,6 @@ class NodeConverter(ABC):
5352
"""
5453

5554
context: ConversionContext
56-
supported_targets: Collection
5755

5856
def __init__(self, context: ConversionContext):
5957
self.context = context
@@ -78,25 +76,23 @@ def _is_supported_in_IR(
7876
Classes which implement conversion for individual operators must overwrite this method.
7977
8078
:param node: torch.Node to check.
79+
:param parameters_mapping: Dictionary mapping tensor names to their static data (if they have it).
8180
"""
8281
pass
8382

84-
@classmethod
85-
def _is_supported_on_target(cls, target: Target) -> bool:
86-
"""Check if the node is supported on the target platform. It uses the 'supported_platform' attribute, which is
87-
a list of supported target platforms, and it must be defined by the specific `NodeConverter`.
83+
@staticmethod
84+
def _is_supported_on_target(
85+
node: Node, target: Target, parameters_mapping: dict[str, Parameter]
86+
) -> bool:
87+
"""Check if the node is supported on the target platform.
88+
Child classes should overwrite this method to implement specific target checks. The default implementation
89+
can be used by operators with no target specific requirements.
8890
91+
:param node: The node (edge operator) to check.
8992
:param target: Value of the `Target` enum representing the target platform to check for.
93+
:param parameters_mapping: Dictionary mapping tensor names to their static data (if they have it).
9094
"""
91-
if not (
92-
hasattr(cls, "supported_targets")
93-
and isinstance(cls.supported_targets, Collection)
94-
):
95-
raise NotImplementedError(
96-
f"The NodeConverter `{cls}` does not define its `supported_targets` collection."
97-
)
98-
99-
return target == Target.IGNORE or target in cls.supported_targets
95+
return target == Target.RT700
10096

10197
@classmethod
10298
def is_supported(
@@ -110,7 +106,7 @@ def is_supported(
110106
"""
111107
return cls._is_supported_in_IR(
112108
node, parameters_mapping
113-
) and cls._is_supported_on_target(target)
109+
) and cls._is_supported_on_target(node, target, parameters_mapping)
114110

115111
@staticmethod
116112
def _has_shared_q_params_if_quantized(node: Node) -> bool:

backends/nxp/backend/ir/converter/node_converters/ops_converters/abs_converter.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,10 @@
1-
# Copyright (c) 2025 NXP
2-
# All rights reserved.
1+
# Copyright 2025 NXP
32
#
43
# This source code is licensed under the BSD-style license found in the
54
# LICENSE file in the root directory of this source tree.
65

76

8-
from executorch.backends.nxp.backend.ir.converter.node_converter import (
9-
NodeConverter,
10-
Target,
11-
)
7+
from executorch.backends.nxp.backend.ir.converter.node_converter import NodeConverter
128
from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import (
139
abs_options,
1410
)
@@ -17,7 +13,6 @@
1713

1814

1915
class AbsConverter(NodeConverter):
20-
supported_targets = [Target.RT700]
2116

2217
@staticmethod
2318
def _is_supported_in_IR(

backends/nxp/backend/ir/converter/node_converters/ops_converters/adaptive_avg_pool_2d_converter.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,11 @@
1-
# Copyright (c) 2025 NXP
2-
# All rights reserved.
1+
# Copyright 2025 NXP
32
#
43
# This source code is licensed under the BSD-style license found in the
54
# LICENSE file in the root directory of this source tree.
65

76
import executorch.backends.nxp.backend.ir.lib.tflite.Padding as tflPadding
87
from executorch.backends.nxp.backend.ir.converter.conversion import common
9-
from executorch.backends.nxp.backend.ir.converter.node_converter import (
10-
NodeConverter,
11-
Target,
12-
)
8+
from executorch.backends.nxp.backend.ir.converter.node_converter import NodeConverter
139
from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model
1410
from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import (
1511
average_pool_2d_options,
@@ -20,7 +16,6 @@
2016

2117

2218
class AdaptiveAvgPool2dConverter(NodeConverter):
23-
supported_targets = [Target.RT700]
2419

2520
@staticmethod
2621
def _is_supported_in_IR(

backends/nxp/backend/ir/converter/node_converters/ops_converters/add_tensor_converter.py

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
1-
# Copyright (c) 2025 NXP
2-
# All rights reserved.
1+
# Copyright 2025 NXP
32
#
43
# This source code is licensed under the BSD-style license found in the
54
# LICENSE file in the root directory of this source tree.
@@ -19,7 +18,20 @@
1918

2019

2120
class AddTensorConverter(NodeConverter):
22-
supported_targets = [Target.RT700]
21+
@staticmethod
22+
def _is_supported_on_target(
23+
node: Node, target: Target, parameters_mapping: dict[str, Parameter]
24+
) -> bool:
25+
match target:
26+
case Target.RT700:
27+
if node_uses_shape_broadcasting(node):
28+
# Shape broadcasting may require the addition of `Transpose` ops during conversion.
29+
return False
30+
31+
return True
32+
33+
case _:
34+
return False
2335

2436
@staticmethod
2537
def _is_supported_in_IR(
@@ -31,10 +43,6 @@ def _is_supported_in_IR(
3143
if hasattr(node.kwargs, "alpha"):
3244
return False
3345

34-
# Don't convert if broadcasting input tensors
35-
if node_uses_shape_broadcasting(node):
36-
return False
37-
3846
return True
3947

4048
# add.Tensor Node format: (Tensor self, Tensor other, *, Scalar alpha=1)

backends/nxp/backend/ir/converter/node_converters/ops_converters/addmm_converter.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,11 @@
1-
# Copyright 2024 NXP
1+
# Copyright 2024-2025 NXP
22
#
33
# This source code is licensed under the BSD-style license found in the
44
# LICENSE file in the root directory of this source tree.
55

66
from executorch.backends.nxp.backend.edge_helper import input_rank
77
from executorch.backends.nxp.backend.ir.converter.conversion.common import OpsList
8-
from executorch.backends.nxp.backend.ir.converter.node_converter import (
9-
NodeConverter,
10-
Target,
11-
)
8+
from executorch.backends.nxp.backend.ir.converter.node_converter import NodeConverter
129
from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import (
1310
fully_connected_options,
1411
)
@@ -32,8 +29,6 @@ def _is_supported_in_IR(
3229

3330
return True
3431

35-
supported_targets = [Target.RT700]
36-
3732
def convert(self, node: Node):
3833
self.assert_convertible(node)
3934

backends/nxp/backend/ir/converter/node_converters/ops_converters/avg_pool_2d_converter.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
1-
# Copyright (c) 2025 NXP
2-
# All rights reserved.
1+
# Copyright 2025 NXP
32
#
43
# This source code is licensed under the BSD-style license found in the
54
# LICENSE file in the root directory of this source tree.
@@ -9,10 +8,7 @@
98
common,
109
)
1110
from executorch.backends.nxp.backend.ir.converter.conversion.common import OpsList
12-
from executorch.backends.nxp.backend.ir.converter.node_converter import (
13-
NodeConverter,
14-
Target,
15-
)
11+
from executorch.backends.nxp.backend.ir.converter.node_converter import NodeConverter
1612
from executorch.backends.nxp.backend.ir.tflite_generator import tflite_model
1713
from executorch.backends.nxp.backend.ir.tflite_generator.builtin_options import (
1814
average_pool_2d_options,
@@ -22,7 +18,6 @@
2218

2319

2420
class AvgPool2dConverter(NodeConverter):
25-
supported_targets = [Target.RT700]
2621

2722
@staticmethod
2823
def _is_supported_in_IR(

backends/nxp/backend/ir/converter/node_converters/ops_converters/clone_converter.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,8 @@
44
# LICENSE file in the root directory of this source tree.
55

66
import torch
7-
from executorch.backends.nxp.backend.ir.converter.node_converter import (
8-
NodeConverter,
9-
Target,
10-
)
7+
8+
from executorch.backends.nxp.backend.ir.converter.node_converter import NodeConverter
119
from torch.fx import Node
1210
from torch.nn import Parameter
1311

@@ -20,7 +18,6 @@ def _has_supported_memory_format(node: Node) -> bool:
2018

2119

2220
class CloneConverter(NodeConverter):
23-
supported_targets = [Target.RT700]
2421

2522
@staticmethod
2623
def _is_supported_in_IR(

backends/nxp/backend/ir/converter/node_converters/ops_converters/constant_pad_nd_converter.py

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2024 NXP
1+
# Copyright 2024-2025 NXP
22
#
33
# This source code is licensed under the BSD-style license found in the
44
# LICENSE file in the root directory of this source tree.
@@ -31,7 +31,22 @@
3131

3232

3333
class ConstantPadNDConverter(NodeConverter):
34-
supported_targets = [Target.RT700]
34+
@staticmethod
35+
def _is_supported_on_target(
36+
node: Node, target: Target, parameters_mapping: dict[str, Parameter]
37+
) -> bool:
38+
match target:
39+
case Target.RT700:
40+
# TODO: Consider different tensor formats (dim-order)
41+
paddings = node.args[1]
42+
if len(paddings) > 4 and paddings[4:6] != [0, 0]:
43+
# Attempt to Pad channels dimension, which is not supported on Neutron.
44+
return False
45+
46+
return True
47+
48+
case _:
49+
return False
3550

3651
@staticmethod
3752
def _is_supported_in_IR(

backends/nxp/backend/ir/converter/node_converters/ops_converters/convolution_converter.py

Lines changed: 39 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2024 NXP
1+
# Copyright 2024-2025 NXP
22
#
33
# This source code is licensed under the BSD-style license found in the
44
# LICENSE file in the root directory of this source tree.
@@ -42,57 +42,64 @@
4242

4343

4444
class ConvolutionConverter(NodeConverter):
45-
supported_targets = [Target.RT700]
45+
@staticmethod
46+
def _is_supported_on_target(
47+
node: Node, target: Target, parameters_mapping: dict[str, Parameter]
48+
) -> bool:
49+
match target:
50+
case Target.RT700:
51+
activations = node.args[0]
52+
weights = node.args[1]
53+
groups = node.args[8]
54+
55+
if activations.meta["val"].shape[0] != 1:
56+
# Only batch size 1 is supported on neutron.
57+
return False
58+
59+
if groups == 1: # Regular convolution.
60+
pass
61+
elif conv_utils.group_conv_convertible_as_depthwise(
62+
node, groups
63+
): # Depthwise convolution.
64+
# Only supported if the weights are static, because TFLite `DepthwiseConv2D` uses permuted
65+
# weights. In case the weights are dynamic, a Transpose operator would have to be added, which
66+
# is not supported on Neutron.
67+
if not node_is_effectively_static_tensor(
68+
weights, parameters_mapping
69+
):
70+
return False
71+
elif conv_utils.group_conv_convertible_into_multiple_convolutions(
72+
node, groups
73+
): # Separable conv.
74+
# Requires addition of `Split` and `Concatenation` operators, which are not supported on Neutron.
75+
return False
76+
else: # Unexpected case (should never happen).
77+
return False
78+
79+
return True
80+
81+
case _:
82+
return False
4683

4784
@staticmethod
4885
def _is_supported_in_IR(
4986
node: Node, parameters_mapping: dict[str, Parameter]
5087
) -> bool:
5188
is_transposed = node.args[6]
5289
output_padding = node.args[7]
53-
groups = node.args[8]
5490

5591
if is_transposed:
5692
return False
5793

5894
if output_padding != [0, 0]:
5995
return False
6096

61-
if groups == 1:
62-
# Regular (pointwise) convolution.
63-
pass
64-
65-
elif conv_utils.group_conv_convertible_as_depthwise(
66-
node, groups
67-
) and node_is_effectively_static_tensor(node.args[1], parameters_mapping):
68-
# Depthwise convolution.
69-
# Only supported if the weights are static, because TFLite `DepthwiseConv2D` uses permuted weights. In case
70-
# the weights are dynamic, a Transpose operator would have to be added, which is not supported on Neutron.
71-
pass
72-
73-
elif conv_utils.group_conv_convertible_into_multiple_convolutions(node, groups):
74-
# Group Separable convolution.
75-
# Not supported natively by the eIQ Neutron so Group Separable Convolution.
76-
# In practice it can be computed by splitting the Group Separable Convolution into multiple Pointwise
77-
# Convo it will use the Split and Concat operation. The Concat operation in Neutron Converter
78-
# SDK 25.03 requires the # of channels to be multipy of # of MAC units in the eIQ Neutron.
79-
# For this reason Group Separable Convolution is not delegated by default at this moment.
80-
return False
81-
82-
else:
83-
# All conversion options related to the `group` attribute have been checked and none of them can be used.
84-
return False
85-
8697
if input_tensor_safe(node, 2) is None:
8798
# No bias tensor.
8899
weight_tensor = input_tensor(node, 1)
89100
if weight_tensor.dtype not in [torch.float32, torch.int8, torch.uint8]:
90101
return False
91102

92-
if node.args[0].meta["val"].shape[0] != 1:
93-
# Only batch size 1 is supported on neutron.
94-
return False
95-
96103
return True
97104

98105
Stride = Padding = Dilation = OutPadding = list[int]

backends/nxp/backend/ir/converter/node_converters/ops_converters/hardtanh_converter.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,9 @@
1-
# Copyright (c) 2025 NXP
2-
# All rights reserved.
1+
# Copyright 2025 NXP
32
#
43
# This source code is licensed under the BSD-style license found in the
54
# LICENSE file in the root directory of this source tree.
65

7-
from executorch.backends.nxp.backend.ir.converter.node_converter import (
8-
NodeConverter,
9-
Target,
10-
)
6+
from executorch.backends.nxp.backend.ir.converter.node_converter import NodeConverter
117
from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import (
128
BuiltinOperator,
139
)
@@ -16,7 +12,6 @@
1612

1713

1814
class HardTanhConverter(NodeConverter):
19-
supported_targets = [Target.RT700]
2015

2116
# Maps possible input parameters of HardTanh to equivalent ReLU-based operators supported by TFLite.
2217
supported_modes_map = {

0 commit comments

Comments
 (0)