Skip to content

Commit ca3e18d

Browse files
authored
Merge branch 'main' into toupstream/sub_scalar
2 parents 6358b0a + 373be8c commit ca3e18d

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

65 files changed

+3817
-193
lines changed

CMakeLists.txt

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,6 @@
4848
cmake_minimum_required(VERSION 3.24)
4949
project(executorch)
5050

51-
# MARK: - Start EXECUTORCH_H12025_BUILD_MIGRATION
52-
5351
include(${PROJECT_SOURCE_DIR}/tools/cmake/common/preset.cmake)
5452
include(${PROJECT_SOURCE_DIR}/tools/cmake/Utils.cmake)
5553
include(CMakeDependentOption)
@@ -82,6 +80,7 @@ announce_configured_options(BUCK2)
8280

8381
announce_configured_options(CMAKE_CXX_COMPILER_ID)
8482
announce_configured_options(CMAKE_TOOLCHAIN_FILE)
83+
announce_configured_options(BUILD_TESTING)
8584

8685
load_build_preset()
8786
include(${PROJECT_SOURCE_DIR}/tools/cmake/preset/default.cmake)
@@ -97,11 +96,6 @@ else()
9796
endif()
9897
announce_configured_options(CCACHE_PROGRAM)
9998

100-
# Print all the configs that were called with announce_configured_options.
101-
print_configured_options()
102-
103-
# MARK: - End EXECUTORCH_H12025_BUILD_MIGRATION
104-
10599
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
106100

107101
# Setup RPATH. See
@@ -750,3 +744,6 @@ if(EXECUTORCH_BUILD_ANDROID_JNI)
750744
endif()
751745

752746
include(Test.cmake)
747+
748+
# Print all the configs that were called with announce_configured_options.
749+
print_configured_options()

CMakePresets.json

Lines changed: 8 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
},
99
{
1010
"name": "macos",
11-
"displayName": "Build everything buildable on macOS",
11+
"displayName": "Build ExecuTorch for macOS",
1212
"inherits": ["common"],
1313
"generator": "Xcode",
1414
"cacheVariables": {
@@ -25,7 +25,7 @@
2525
},
2626
{
2727
"name": "ios",
28-
"displayName": "Build everything buildable on iOS",
28+
"displayName": "Build ExecuTorch for iOS",
2929
"inherits": ["common"],
3030
"generator": "Xcode",
3131
"cacheVariables": {
@@ -42,7 +42,7 @@
4242
},
4343
{
4444
"name": "ios-simulator",
45-
"displayName": "Build everything buildable on iOS simulator",
45+
"displayName": "Build ExecuTorch for iOS Simulator",
4646
"inherits": ["common"],
4747
"generator": "Xcode",
4848
"cacheVariables": {
@@ -59,7 +59,7 @@
5959
},
6060
{
6161
"name": "linux",
62-
"displayName": "Build everything buildable on Linux",
62+
"displayName": "Build ExecuTorch for Linux",
6363
"inherits": ["common"],
6464
"cacheVariables": {
6565
"CMAKE_SYSTEM_NAME": "Linux",
@@ -88,29 +88,21 @@
8888
{
8989
"name": "llm",
9090
"displayName": "Build LLM libraries",
91-
"inherits": [
92-
"common"
93-
],
91+
"inherits": ["common"],
9492
"cacheVariables": {
9593
"EXECUTORCH_BUILD_PRESET_FILE": "${sourceDir}/tools/cmake/preset/llm.cmake",
9694
"CMAKE_OSX_DEPLOYMENT_TARGET": "12.0"
9795
},
9896
"condition": {
9997
"type": "inList",
10098
"string": "${hostSystemName}",
101-
"list": [
102-
"Darwin",
103-
"Linux",
104-
"Windows"
105-
]
99+
"list": ["Darwin", "Linux", "Windows"]
106100
}
107101
},
108102
{
109103
"name": "zephyr",
110-
"displayName": "Build everything buildable on Zephyr RTOS",
111-
"inherits": [
112-
"common"
113-
],
104+
"displayName": "Build ExecuTorch for Zephyr RTOS",
105+
"inherits": ["common"],
114106
"cacheVariables": {
115107
"EXECUTORCH_BUILD_PRESET_FILE": "${sourceDir}/tools/cmake/preset/zephyr.cmake",
116108
"CMAKE_TOOLCHAIN_FILE": "${sourceDir}/examples/zephyr/x86_64-linux-arm-zephyr-eabi-gcc.cmake"

backends/cadence/aot/tests/test_fusion_ops_passes.py

Lines changed: 0 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212

1313
import executorch.backends.cadence.aot.ops_registrations # noqa
1414
import torch
15-
from executorch.backends.cadence.aot import compiler
1615
from executorch.backends.cadence.aot.fuse_ops import (
1716
FuseCascadedTransposeOrPermuteOps,
1817
FuseCascadedViewOps,
@@ -30,7 +29,6 @@
3029
from executorch.exir.dialects._ops import ops as exir_ops
3130
from executorch.exir.dialects.edge._ops import EdgeOpOverload
3231
from executorch.exir.pass_base import PassResult, ProxyValue
33-
from torch import nn
3432

3533

3634
class TestFusionPassesBase(unittest.TestCase):
@@ -178,43 +176,6 @@ def test_keep_mm_add_with_multiple_users(self) -> None:
178176
self.assertEqual(count_node(converted_graph, exir_ops.edge.aten.mm.default), 1)
179177
self.assertEqual(count_node(converted_graph, exir_ops.edge.aten.add.Tensor), 3)
180178

181-
# TODO(matthiascremon) -> None: enable that pass with new flow
182-
@torch.no_grad()
183-
@unittest.expectedFailure
184-
def test_legacy_conv_bn_fusion(self) -> None:
185-
class ModelConvBN(torch.nn.Module):
186-
def __init__(
187-
self, in_features: int, out_features: int, kernel_size: int
188-
) -> None:
189-
super().__init__()
190-
self.conv1d = nn.Conv1d(in_features, out_features, kernel_size)
191-
self.bn = nn.BatchNorm1d(out_features)
192-
193-
def forward(self, x: torch.Tensor) -> torch.Tensor:
194-
y = self.conv1d(x)
195-
return self.bn(y)
196-
197-
model = ModelConvBN(64, 1, 2)
198-
x = torch.randn(1, 64, 4)
199-
200-
graph_module = (
201-
compiler.export_to_executorch_gen_etrecord(model.eval(), (x,))
202-
.exported_program()
203-
.graph_module
204-
)
205-
# Assert that after running the fusion passes, batchnorm was fused with conv1d
206-
self.assertEqual(
207-
count_node(graph_module, torch.ops.aten.linear.out)
208-
+ count_node(graph_module, torch.ops.cadence.convolution.out),
209-
1,
210-
)
211-
self.assertEqual(
212-
count_node(
213-
graph_module, torch.ops.aten._native_batch_norm_legit_no_training.out
214-
),
215-
0,
216-
)
217-
218179
def test_permute_transpose_fusion(self) -> None:
219180
builder = GraphBuilder()
220181
x = builder.placeholder("x", torch.randn(3, 1, 3, 1, 4, dtype=torch.float32))

backends/nxp/backend/edge_program_converter.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
exir_ops.edge.aten.mm.default: MMConverter, # noqa F405
3232
exir_ops.edge.aten.permute_copy.default: PermuteCopyConverter, # noqa F405
3333
exir_ops.edge.aten.relu.default: ReLUConverter, # noqa F405
34+
exir_ops.edge.aten.hardtanh.default: HardTanhConverter, # noqa F405
3435
exir_ops.edge.aten._softmax.default: SoftmaxConverter, # noqa F405
3536
exir_ops.edge.aten.view_copy.default: ViewCopyConverter, # noqa F405
3637
}

backends/nxp/backend/ir/converter/node_converters/ops_converters/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,9 @@
1010
from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.convolution_converter import (
1111
ConvolutionConverter,
1212
)
13+
from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.hardtanh_converter import (
14+
HardTanhConverter,
15+
)
1316
from executorch.backends.nxp.backend.ir.converter.node_converters.ops_converters.max_pool_2d_converter import (
1417
MaxPool2dConverter,
1518
)
@@ -48,4 +51,5 @@
4851
"ReLUConverter",
4952
"MaxPool2dConverter",
5053
"AvgPool2dConverter",
54+
"HardTanhConverter",
5155
]
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
# Copyright (c) 2025 NXP
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
from executorch.backends.nxp.backend.ir.converter.node_converter import (
8+
NodeConverter,
9+
Target,
10+
)
11+
from executorch.backends.nxp.backend.ir.lib.tflite.BuiltinOperator import (
12+
BuiltinOperator,
13+
)
14+
from torch.fx import Node
15+
from torch.nn import Parameter
16+
17+
18+
class HardTanhConverter(NodeConverter):
19+
supported_targets = [Target.RT700]
20+
21+
# Maps possible input parameters of HardTanh to equivalent ReLU-based operators supported by TFLite.
22+
supported_modes_map = {
23+
(0.0, 6.0): BuiltinOperator.RELU6,
24+
(-1.0, 1.0): BuiltinOperator.RELU_N1_TO_1,
25+
(0.0, 1.0): BuiltinOperator.RELU_0_TO_1,
26+
(0.0, float("inf")): BuiltinOperator.RELU,
27+
}
28+
29+
@staticmethod
30+
def _is_supported_in_IR(
31+
node: Node, parameters_mapping: dict[str, Parameter]
32+
) -> bool:
33+
_, min_value, max_value = node.args
34+
return (min_value, max_value) in HardTanhConverter.supported_modes_map.keys()
35+
36+
def convert(self, node: Node):
37+
"""Convert 'aten::hardtanh' to it's supported ReLU equivalent."""
38+
self.assert_convertible(node)
39+
40+
t_op = self._create_tflite_op_with_io_tensors(node)
41+
42+
_, min_value, max_value = node.args
43+
44+
op = self.supported_modes_map[(min_value, max_value)]
45+
t_op.opcode_index = self.builder.op_code_index_for_op_type(op)
46+
47+
self.builder.append_operators([t_op])

backends/nxp/neutron_partitioner.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,7 @@ def tag_qdq_clusters(self, nodes: List[torch.fx.Node]):
195195
exir_ops.edge.aten.max_pool2d_with_indices.default: MaxPool2dConverter, # noqa F405
196196
exir_ops.edge.aten.mm.default: MMConverter, # noqa F405
197197
exir_ops.edge.aten.relu.default: ReLUConverter, # noqa F405
198+
exir_ops.edge.aten.hardtanh.default: HardTanhConverter, # noqa F405
198199
exir_ops.edge.aten._softmax.default: SoftmaxConverter, # noqa F405
199200
exir_ops.edge.aten.view_copy.default: ViewCopyConverter, # noqa F405
200201
}

backends/nxp/nxp_backend.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,8 @@ def preprocess(
174174
# Otherwise, we get violation that this op is not part of ATen Core ops.
175175
edge_program._verifiers = [
176176
EXIREdgeDialectVerifier(
177-
class_only=True, exception_list=[torch.ops.aten.max_pool2d.default]
177+
class_only=True,
178+
core_aten_ops_exception_list=[torch.ops.aten.max_pool2d.default],
178179
)
179180
]
180181

backends/nxp/quantizer/neutron_quantizer.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@
1515
AvgPoolPattern,
1616
Conv1dPattern,
1717
Conv2dPattern,
18+
HardTanhInPlacePattern,
19+
HardTanhPattern,
1820
LinearPattern,
1921
MaxPoolPattern,
2022
PadPattern,
@@ -199,6 +201,8 @@ def __init__(self):
199201
NeutronAtenQuantizer(PermutePattern(), static_qconfig),
200202
NeutronAtenQuantizer(PadPattern(), static_qconfig),
201203
NeutronAtenQuantizer(ReluPattern(), static_qconfig),
204+
NeutronAtenQuantizer(HardTanhPattern(), static_qconfig),
205+
NeutronAtenQuantizer(HardTanhInPlacePattern(), static_qconfig),
202206
NeutronAtenQuantizer(ReluInPlacePattern(), static_qconfig),
203207
NeutronAtenQuantizer(AvgPoolPattern(), static_qconfig),
204208
NeutronAtenQuantizer(ViewPattern(), static_qconfig),

backends/nxp/quantizer/patterns.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -216,6 +216,26 @@ def get_anchors(
216216
)
217217

218218

219+
class HardTanhPattern(SharedSpecPattern):
220+
"""
221+
Quantizer for HardTanh operator. Shared quantization spec is selected, as activation functions usually follows
222+
computation layer.
223+
"""
224+
225+
def partition_types(self):
226+
return [torch.ops.aten.hardtanh.default]
227+
228+
229+
class HardTanhInPlacePattern(SharedSpecPattern):
230+
"""
231+
Quantizer for HardTanh operator with param inplace=True. Shared quantization spec is selected, as activation
232+
functions usually follows computation layer.
233+
"""
234+
235+
def partition_types(self):
236+
return [torch.ops.aten.hardtanh_.default]
237+
238+
219239
class LinearPattern(QuantizationPattern):
220240
def partition_types(self) -> List[OpOverload]:
221241
return [torch.ops.aten.linear.default]

0 commit comments

Comments
 (0)