Skip to content

Commit 690efb9

Browse files
MartinPavellarobert-kalmar
authored andcommitted
NXP backend: Add implementation of Tanh operator converter
1 parent 24d8fb4 commit 690efb9

File tree

15 files changed

+227
-72
lines changed

15 files changed

+227
-72
lines changed

backends/nxp/backend/edge_program_converter.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
exir_ops.edge.aten.permute_copy.default: PermuteCopyConverter, # noqa F405
3939
exir_ops.edge.aten.relu.default: ReLUConverter, # noqa F405
4040
exir_ops.edge.aten._softmax.default: SoftmaxConverter, # noqa F405
41+
exir_ops.edge.aten.tanh.default: TanhConverter, # noqa F405
4142
exir_ops.edge.aten.view_copy.default: ViewCopyConverter, # noqa F405
4243
exir_ops.edge.aten.sigmoid.default: SigmoidConverter, # noqa F405
4344
}

backends/nxp/backend/ir/converter/node_converters/ops_converters/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,9 @@
5252
from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.softmax_converter import (
5353
SoftmaxConverter,
5454
)
55+
from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.tanh_converter import (
56+
TanhConverter,
57+
)
5558
from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.view_copy_converter import (
5659
ViewCopyConverter,
5760
)
@@ -76,4 +79,5 @@
7679
"AdaptiveAvgPool2dConverter",
7780
"HardTanhConverter",
7881
"SigmoidConverter",
82+
"TanhConverter",
7983
]
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
# Copyright 2025 NXP
2+
#
3+
# This source code is licensed under the BSD-style license found in the
4+
# LICENSE file in the root directory of this source tree.
5+
6+
from executorch.backends.nxp.backend.ir.converter.node_converter import NodeConverter
7+
from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import (
8+
BuiltinOperator,
9+
)
10+
from torch.fx import Node
11+
from torch.nn import Parameter
12+
13+
14+
class TanhConverter(NodeConverter):
15+
16+
@staticmethod
17+
def _is_supported_in_IR(
18+
node: Node,
19+
parameters_mapping: dict[str, Parameter],
20+
) -> bool:
21+
return True
22+
23+
def convert(self, node: Node):
24+
self.assert_convertible(node)
25+
26+
t_op = self._create_tflite_op_with_io_tensors(node)
27+
t_op.opcode_index = self.builder.op_code_index_for_op_type(BuiltinOperator.TANH)
28+
29+
self.builder.append_operators([t_op])

backends/nxp/neutron_partitioner.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -202,6 +202,7 @@ def tag_qdq_clusters(self, nodes: List[torch.fx.Node]):
202202
exir_ops.edge.aten.mm.default: MMConverter, # noqa F405
203203
exir_ops.edge.aten.relu.default: ReLUConverter, # noqa F405
204204
exir_ops.edge.aten._softmax.default: SoftmaxConverter, # noqa F405
205+
exir_ops.edge.aten.tanh.default: TanhConverter, # noqa F405
205206
exir_ops.edge.aten.view_copy.default: ViewCopyConverter, # noqa F405
206207
exir_ops.edge.aten.sigmoid.default: SigmoidConverter, # noqa F405
207208
}

backends/nxp/quantizer/neutron_quantizer.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,8 @@
3434
SharedSpecPattern,
3535
SigmoidPattern,
3636
SoftMaxPattern,
37+
TanhInPlacePattern,
38+
TanhPattern,
3739
ViewPattern,
3840
)
3941
from executorch.backends.nxp.quantizer.utils import (
@@ -221,6 +223,8 @@ def __init__(self):
221223
NeutronAtenQuantizer(ReshapePattern(), static_qconfig),
222224
NeutronAtenQuantizer(SigmoidPattern(), static_qconfig),
223225
NeutronAtenQuantizer(SoftMaxPattern(), static_qconfig),
226+
NeutronAtenQuantizer(TanhPattern(), static_qconfig),
227+
NeutronAtenQuantizer(TanhInPlacePattern(), static_qconfig),
224228
NeutronAtenQuantizer(ViewPattern(), static_qconfig),
225229
]
226230
)

backends/nxp/quantizer/patterns.py

Lines changed: 72 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,35 @@ def get_anchors(
106106
)
107107

108108

109+
def get_anchors_for_fixed_quant_specs(
110+
fused_partition: list[fx.GraphModule],
111+
scale: float,
112+
zero_point: int,
113+
quant_min: int = -128,
114+
quant_max: int = 127,
115+
) -> PartitionAnchors:
116+
node = fused_partition[0].nodes[-1]
117+
assert len(fused_partition[0].input_nodes) == 1
118+
119+
qspec = FixedQParamsQuantizationSpec(
120+
dtype=torch.int8,
121+
scale=scale,
122+
zero_point=zero_point,
123+
quant_min=quant_min,
124+
quant_max=quant_max,
125+
qscheme=torch.per_tensor_affine,
126+
)
127+
128+
return PartitionAnchors(
129+
inputs=[(node, 0)],
130+
weights=[],
131+
biases=[],
132+
output=[
133+
(node, qspec),
134+
],
135+
)
136+
137+
109138
class AbsPattern(SharedSpecPattern):
110139
"""
111140
Quantizer for Abs operator.
@@ -438,31 +467,6 @@ def partition_types(self):
438467
return [torch.ops.aten.view.default]
439468

440469

441-
def get_anchors_for_softmax_like_operators(
442-
fused_partition: List[fx.GraphModule],
443-
) -> PartitionAnchors:
444-
node = fused_partition[0].nodes[-1]
445-
assert len(fused_partition[0].input_nodes) == 1
446-
447-
qspec = FixedQParamsQuantizationSpec(
448-
dtype=torch.int8,
449-
scale=1.0 / 256.0,
450-
zero_point=-128,
451-
quant_min=-128,
452-
quant_max=127,
453-
qscheme=torch.per_tensor_affine,
454-
)
455-
456-
return PartitionAnchors(
457-
inputs=[(node, 0)],
458-
weights=[],
459-
biases=[],
460-
output=[
461-
(node, qspec),
462-
],
463-
)
464-
465-
466470
class SoftMaxPattern(QuantizationPattern):
467471
"""
468472
Quantizer for Softmax operator.
@@ -474,9 +478,47 @@ def partition_types(self) -> List[OpOverload]:
474478
return [torch.ops.aten.softmax.int]
475479

476480
def get_anchors(
477-
self, gm: fx.GraphModule, fused_partition: List[fx.GraphModule]
481+
self, gm: fx.GraphModule, fused_partition: list[fx.GraphModule]
478482
) -> PartitionAnchors:
479-
return get_anchors_for_softmax_like_operators(fused_partition)
483+
return get_anchors_for_fixed_quant_specs(
484+
fused_partition, scale=1.0 / 256.0, zero_point=-128
485+
)
486+
487+
488+
class TanhPattern(QuantizationPattern):
489+
"""
490+
Quantizer for Tanh operator.
491+
492+
The quantization of Tanh output is fixed to scale 1/128, zero point 0, dtype int8.
493+
"""
494+
495+
def partition_types(self):
496+
return [torch.ops.aten.tanh.default]
497+
498+
def get_anchors(
499+
self, gm: fx.GraphModule, fused_partition: list[fx.GraphModule]
500+
) -> PartitionAnchors:
501+
return get_anchors_for_fixed_quant_specs(
502+
fused_partition, scale=1.0 / 128.0, zero_point=0
503+
)
504+
505+
506+
class TanhInPlacePattern(QuantizationPattern):
507+
"""
508+
Quantizer for inplace version of Tanh operator (torch.tanh_).
509+
510+
The quantization of Tanh output is fixed to scale 1/128, zero point 0, dtype int8.
511+
"""
512+
513+
def partition_types(self):
514+
return [torch.ops.aten.tanh_.default]
515+
516+
def get_anchors(
517+
self, gm: fx.GraphModule, fused_partition: list[fx.GraphModule]
518+
) -> PartitionAnchors:
519+
return get_anchors_for_fixed_quant_specs(
520+
fused_partition, scale=1.0 / 128.0, zero_point=0
521+
)
480522

481523

482524
class SigmoidPattern(QuantizationPattern):
@@ -492,4 +534,6 @@ def partition_types(self) -> List[OpOverload]:
492534
def get_anchors(
493535
self, gm: fx.GraphModule, fused_partition: List[fx.GraphModule]
494536
) -> PartitionAnchors:
495-
return get_anchors_for_softmax_like_operators(fused_partition)
537+
return get_anchors_for_fixed_quant_specs(
538+
fused_partition, scale=1.0 / 256.0, zero_point=-128
539+
)

backends/nxp/run_unittests.sh

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,3 +12,5 @@ cd $EXECUTORCH_DIR
1212

1313
# '-c /dev/null' is used to ignore root level pytest.ini.
1414
pytest -c /dev/null backends/nxp/tests/
15+
16+
python -m unittest discover -s backends/nxp/tests/ -v

backends/nxp/tests/ir/__init__.py

Whitespace-only changes.

backends/nxp/tests/ir/converter/__init__.py

Whitespace-only changes.

backends/nxp/tests/ir/converter/node_converter/__init__.py

Whitespace-only changes.

0 commit comments

Comments
 (0)