Skip to content

Commit 9413da0

Browse files
NXP backend: Replace pass to fuse activations functions with joint quantization with activation (#14816)
### Summary This PR replaces optimizations 'fuse_activation_functions.py' by quantization of Conv 2D and Linear ops together with fusable activations - selected activations supported by Neutron (Relu, Relu6, Sigmoid, Tanh). Logic is determined by target specs, now supporting Neutron-C. Unified delegation workflow in `executorch_pipeline.py` and `aot_neutron_compile.py` and changed `NeutronEdgePassManager` usage. Tests updated. Relu has now non-shared, standalone quantization. Small refactoring of unit tests. ### Test plan Unit tests provided (test_edge_passes.py, test_quantizer.py). cc @robert-kalmar @JakeStevens @digantdesai
1 parent 1f114f1 commit 9413da0

21 files changed

+1009
-518
lines changed

backends/nxp/backend/edge_helper.py

Lines changed: 22 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,32 @@
44
# LICENSE file in the root directory of this source tree.
55

66
import torch
7+
8+
from executorch.exir.dialects._ops import ops as exir_ops
9+
710
from torch.fx import GraphModule, Node
811
from torch.nn import Parameter
912

1013

14+
QUANTIZE_OPERATORS = [
15+
exir_ops.edge.quantized_decomposed.quantize_per_channel.default,
16+
exir_ops.edge.quantized_decomposed.quantize_per_tensor.default,
17+
]
18+
19+
DEQUANTIZE_OPERATORS = [
20+
exir_ops.edge.quantized_decomposed.dequantize_per_channel.default,
21+
exir_ops.edge.quantized_decomposed.dequantize_per_tensor.default,
22+
]
23+
24+
25+
def _is_dequantize(node_: Node) -> bool:
26+
return node_.op == "call_function" and node_.target in DEQUANTIZE_OPERATORS
27+
28+
29+
def _is_quantize(node_: Node) -> bool:
30+
return node_.op == "call_function" and node_.target in QUANTIZE_OPERATORS
31+
32+
1133
def input_tensor(node: Node, input_index: int) -> torch.Tensor:
1234
if len(node.all_input_nodes) <= input_index:
1335
raise IndexError
@@ -62,12 +84,6 @@ def node_is_effectively_static_tensor(
6284
if node_is_static_tensor(node, parameters_mapping):
6385
return True
6486

65-
def _is_dequantize(node_: Node) -> bool:
66-
return node_.target.__name__ in {
67-
"quantized_decomposed.dequantize_per_tensor.default",
68-
"quantized_decomposed.dequantize_per_channel.default",
69-
}
70-
7187
return _is_dequantize(node) and node_is_static_tensor(
7288
node.args[0], parameters_mapping
7389
)

backends/nxp/backend/ir/tflite_optimizer/optimizations/fuse_activation_functions.py

Lines changed: 0 additions & 235 deletions
This file was deleted.

backends/nxp/backend/ir/tflite_optimizer/optimizer.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,6 @@
1111

1212
from executorch.backends.nxp.backend.ir import logger
1313
from executorch.backends.nxp.backend.ir.conversion_config import ConversionConfig
14-
from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.fuse_activation_functions import (
15-
FuseActivationFunctions,
16-
)
1714
from executorch.backends.nxp.backend.ir.tflite_optimizer.optimizations.move_relu_before_concat import (
1815
MoveActivationBeforeConcatenation,
1916
)
@@ -27,8 +24,6 @@
2724

2825

2926
class Optimization(Enum):
30-
FUSE_ACTIVATION_FUNCTIONS = 1
31-
3227
FUSE_TRANSPOSE_OPERATORS = 5
3328
REMOVE_IDENTITY_TRANSPOSE_OPERATORS = 6
3429

@@ -64,9 +59,6 @@ def __init__(
6459
self._builder = builder
6560

6661
self.optimization_map = {
67-
Optimization.FUSE_ACTIVATION_FUNCTIONS: FuseActivationFunctions(
68-
builder, conversion_config
69-
),
7062
Optimization.FUSE_TRANSPOSE_OPERATORS: FuseTransposeOperators(
7163
builder, conversion_config
7264
),

0 commit comments

Comments
 (0)