Skip to content

Commit 42d3952

Browse files
Erik-Lundellper
authored andcommitted
Arm backend: Refactor bn, layernorm, meandim tests
- Include full core aten op name - Refactor layernorm and meandim to test pipelines Signed-off-by: Erik Lundell <[email protected]> Change-Id: Ic28262e295d39ec78906a488499f2ce5262d1d3d
1 parent e3de804 commit 42d3952

File tree

3 files changed

+294
-472
lines changed

3 files changed

+294
-472
lines changed

backends/arm/test/ops/test_batch_norm.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# Copyright (c) Meta Platforms, Inc. and affiliates.
2-
# Copyright 2024-2025 Arm Limited and/or its affiliates.
32
# All rights reserved.
3+
# Copyright 2024-2025 Arm Limited and/or its affiliates.
44
#
55
# This source code is licensed under the BSD-style license found in the
66
# LICENSE file in the root directory of this source tree.
@@ -647,7 +647,7 @@ def _test_batchnorm2d_u55_BI_pipeline(
647647
)
648648

649649
@parameterized.expand(test_data_suite)
650-
def test_batchnorm2d_tosa_MI(
650+
def test_native_batch_norm_legit_no_training_tosa_MI(
651651
self,
652652
test_name: str,
653653
test_data: torch.Tensor,
@@ -665,7 +665,7 @@ def test_batchnorm2d_tosa_MI(
665665
# Expected to fail since not inplemented
666666
@parameterized.expand(test_no_stats_data_suite)
667667
@unittest.expectedFailure
668-
def test_batchnorm2d_no_stats_tosa_MI(
668+
def test_native_batch_norm_legit_tosa_MI(
669669
self,
670670
test_name: str,
671671
test_data: torch.Tensor,
@@ -686,7 +686,7 @@ def test_batchnorm2d_no_stats_tosa_MI(
686686
@unittest.skip(
687687
reason="Expected to fail since TOSAQuantizer (for BI) cannot quantize a BatchNorm layer"
688688
)
689-
def test_batchnorm2d_tosa_BI(
689+
def test_native_batch_norm_legit_no_training_tosa_BI(
690690
self,
691691
test_name: str,
692692
test_data: torch.Tensor,
@@ -708,7 +708,7 @@ def test_batchnorm2d_tosa_BI(
708708
reason="Expected to fail since EthosUQuantizer cannot quantize a BatchNorm layer"
709709
)
710710
@unittest.expectedFailure
711-
def test_batchnorm2d_u55_BI(
711+
def test_native_batch_norm_legit_no_training_u55_BI(
712712
self,
713713
test_name: str,
714714
test_data: torch.Tensor,
Lines changed: 110 additions & 170 deletions
Original file line numberDiff line numberDiff line change
@@ -1,183 +1,123 @@
11
# Copyright 2024-2025 Arm Limited and/or its affiliates.
2-
# All rights reserved.
32
#
43
# This source code is licensed under the BSD-style license found in the
54
# LICENSE file in the root directory of this source tree.
65

7-
import unittest
8-
9-
from typing import List, Tuple, Union
10-
11-
import pytest
6+
from typing import List, Union
127

138
import torch
14-
from executorch.backends.arm.test import common, conftest
15-
from executorch.backends.arm.test.tester.arm_tester import ArmTester
16-
from executorch.exir.backend.backend_details import CompileSpec
17-
from parameterized import parameterized
18-
19-
20-
test_data_suite = [
21-
# (test_name, test_data, [normalized_shape, eps, elementwise_affine, has_bias] )
22-
("randn_last_dim", torch.randn(1, 5, 5, 5), [[5]]),
23-
("rand_last_two_dims", torch.rand(1, 5, 5, 5), [[5, 5]]),
24-
(
25-
"rand_last_two_dims_not_elementwise_affine",
26-
torch.rand(1, 5, 5, 5),
27-
[[5, 5], 1e-5, False],
28-
),
29-
(
30-
"rand_last_two_dims_not_elementwise_affine_no_bias",
31-
torch.rand(1, 5, 5, 5),
32-
[[5, 5], 1e-5, False, False],
33-
),
34-
("randn_last_three_dims", torch.randn(1, 15, 10, 5), [[15, 10, 5]]),
35-
(
36-
"randn_last_three_dims_no_bias",
37-
torch.randn(1, 15, 10, 5),
38-
[[15, 10, 5], 1e-2, False, False],
39-
),
40-
]
41-
42-
43-
class TestLayerNorm(unittest.TestCase):
44-
45-
class LayerNorm(torch.nn.Module):
46-
47-
def __init__(
48-
self,
49-
normalized_shape: Union[int, List[int]],
50-
eps: float = 1e-5,
51-
elementwise_affine: bool = True,
52-
has_bias: bool = True,
53-
):
54-
super().__init__()
55-
self.layer_norm = torch.nn.LayerNorm(
56-
normalized_shape,
57-
eps=eps,
58-
elementwise_affine=elementwise_affine,
59-
bias=has_bias,
60-
)
61-
if elementwise_affine:
62-
self.layer_norm.weight = torch.nn.Parameter(
63-
torch.ones(normalized_shape)
64-
)
65-
if has_bias:
66-
self.layer_norm.bias = torch.nn.Parameter(
67-
torch.rand(normalized_shape)
68-
)
69-
70-
def forward(self, x):
71-
return self.layer_norm(x)
72-
73-
def _test_layernorm_tosa_MI_pipeline(
74-
self, module: torch.nn.Module, test_data: Tuple[torch.Tensor]
75-
):
76-
(
77-
ArmTester(
78-
model=module,
79-
example_inputs=test_data,
80-
compile_spec=common.get_tosa_compile_spec(
81-
"TOSA-0.80+MI",
82-
),
83-
)
84-
.export()
85-
.check(["torch.ops.aten.layer_norm.default"])
86-
.to_edge()
87-
.partition()
88-
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
89-
.check_not(["torch.ops.aten.layer_norm.default"])
90-
.to_executorch()
91-
.run_method_and_compare_outputs(inputs=test_data)
92-
)
9+
from executorch.backends.arm.test import common
10+
from executorch.backends.arm.test.tester.test_pipeline import (
11+
EthosU55PipelineBI,
12+
EthosU85PipelineBI,
13+
TosaPipelineBI,
14+
TosaPipelineMI,
15+
)
9316

94-
def _test_layernorm_tosa_BI_pipeline(
95-
self, module: torch.nn.Module, test_data: Tuple[torch.Tensor]
96-
):
97-
(
98-
ArmTester(
99-
model=module,
100-
example_inputs=test_data,
101-
compile_spec=common.get_tosa_compile_spec(
102-
"TOSA-0.80+BI",
103-
),
104-
)
105-
.quantize()
106-
.check_not(["torch.ops.aten.layer_norm.default"])
107-
.export()
108-
.to_edge()
109-
.partition()
110-
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
111-
.to_executorch()
112-
.run_method_and_compare_outputs(qtol=1, inputs=test_data)
113-
)
11417

115-
def _test_layernorm_ethosu_BI_pipeline(
116-
self,
117-
module: torch.nn.Module,
118-
compile_spec: CompileSpec,
119-
test_data: Tuple[torch.Tensor],
120-
):
121-
tester = (
122-
ArmTester(
123-
model=module,
124-
example_inputs=test_data,
125-
compile_spec=compile_spec,
126-
)
127-
.quantize()
128-
.check_not(["torch.ops.aten.layer_norm.default"])
129-
.export()
130-
.to_edge()
131-
.partition()
132-
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
133-
.to_executorch()
134-
.serialize()
135-
)
136-
if conftest.is_option_enabled("corstone_fvp"):
137-
tester.run_method_and_compare_outputs(qtol=1, inputs=test_data)
18+
class LayerNorm(torch.nn.Module):
13819

139-
@parameterized.expand(test_data_suite)
140-
def test_layer_norm_tosa_MI(
20+
def __init__(
14121
self,
142-
test_name: str,
143-
test_data: torch.Tensor,
144-
model_params,
22+
normalized_shape: Union[int, List[int]],
23+
eps: float = 1e-5,
24+
elementwise_affine: bool = True,
25+
has_bias: bool = True,
14526
):
146-
self._test_layernorm_tosa_MI_pipeline(
147-
self.LayerNorm(*model_params), (test_data,)
148-
)
149-
150-
@parameterized.expand(test_data_suite)
151-
def test_layer_norm_tosa_BI(
152-
self,
153-
test_name: str,
154-
test_data: torch.Tensor,
155-
model_params,
156-
):
157-
self._test_layernorm_tosa_BI_pipeline(
158-
self.LayerNorm(*model_params), (test_data,)
159-
)
160-
161-
@parameterized.expand(test_data_suite)
162-
@pytest.mark.corstone_fvp
163-
def test_layer_norm_u55_BI(
164-
self,
165-
test_name: str,
166-
test_data: torch.Tensor,
167-
model_params,
168-
):
169-
self._test_layernorm_ethosu_BI_pipeline(
170-
self.LayerNorm(*model_params), common.get_u55_compile_spec(), (test_data,)
171-
)
172-
173-
@parameterized.expand(test_data_suite)
174-
@pytest.mark.corstone_fvp
175-
def test_layer_norm_u85_BI(
176-
self,
177-
test_name: str,
178-
test_data: torch.Tensor,
179-
model_params,
180-
):
181-
self._test_layernorm_ethosu_BI_pipeline(
182-
self.LayerNorm(*model_params), common.get_u85_compile_spec(), (test_data,)
27+
super().__init__()
28+
self.layer_norm = torch.nn.LayerNorm(
29+
normalized_shape,
30+
eps=eps,
31+
elementwise_affine=elementwise_affine,
32+
bias=has_bias,
18333
)
34+
if elementwise_affine:
35+
self.layer_norm.weight = torch.nn.Parameter(torch.ones(normalized_shape))
36+
if has_bias:
37+
self.layer_norm.bias = torch.nn.Parameter(torch.rand(normalized_shape))
38+
39+
def forward(self, x):
40+
return self.layer_norm(x)
41+
42+
43+
input_t = tuple[torch.Tensor]
44+
test_data_suite = {
45+
"randn_last_dim": ((torch.randn(1, 5, 5, 5),), LayerNorm([5])),
46+
"rand_last_two_dims": ((torch.rand(1, 5, 5, 5),), LayerNorm([5, 5])),
47+
"rand_last_two_dims_not_elementwise_affine": (
48+
(torch.rand(1, 5, 5, 5),),
49+
LayerNorm([5, 5], 1e-5, False),
50+
),
51+
"rand_last_two_dims_not_elementwise_affine_no_bias": (
52+
(torch.rand(1, 5, 5, 5),),
53+
LayerNorm([5, 5], 1e-5, False, False),
54+
),
55+
"randn_last_three_dims": ((torch.randn(1, 15, 10, 5),), LayerNorm([15, 10, 5])),
56+
"randn_last_three_dims_no_bias": (
57+
(torch.randn(1, 15, 10, 5),),
58+
LayerNorm([15, 10, 5], 1e-2, False, False),
59+
),
60+
}
61+
62+
63+
@common.parametrize("test_data", test_data_suite)
64+
def test_native_layer_norm_tosa_MI(test_data):
65+
pipeline = TosaPipelineMI[input_t](
66+
test_data[1],
67+
test_data[0],
68+
"torch.ops.aten.layer_norm.default",
69+
)
70+
pipeline.run()
71+
72+
73+
@common.parametrize("test_data", test_data_suite)
74+
def test_native_layer_norm_tosa_BI(test_data):
75+
pipeline = TosaPipelineBI[input_t](
76+
test_data[1],
77+
test_data[0],
78+
"torch.ops.aten.sub.Tensor", # Just check for sub op included in the layernorm decomposition
79+
)
80+
pipeline.run()
81+
82+
83+
@common.parametrize("test_data", test_data_suite)
84+
def test_native_layer_norm_u55_BI(test_data):
85+
pipeline = EthosU55PipelineBI[input_t](
86+
test_data[1],
87+
test_data[0],
88+
"torch.ops.aten.sub.Tensor", # Just check for sub op included in the layernorm decomposition
89+
)
90+
pipeline.run()
91+
92+
93+
@common.parametrize("test_data", test_data_suite)
94+
def test_native_layer_norm_u85_BI(test_data):
95+
pipeline = EthosU85PipelineBI[input_t](
96+
test_data[1],
97+
test_data[0],
98+
"torch.ops.aten.sub.Tensor", # Just check for sub op included in the layernorm decomposition
99+
)
100+
pipeline.run()
101+
102+
103+
@common.parametrize("test_data", test_data_suite)
104+
@common.SkipIfNoCorstone300
105+
def test_native_layer_norm_u55_BI_on_fvp(test_data):
106+
pipeline = EthosU55PipelineBI[input_t](
107+
test_data[1],
108+
test_data[0],
109+
"torch.ops.aten.sub.Tensor", # Just check for sub op included in the layernorm decomposition
110+
)
111+
pipeline.run()
112+
113+
114+
@common.parametrize("test_data", test_data_suite)
115+
@common.SkipIfNoCorstone320
116+
def test_native_layer_norm_u85_BI_on_fvp(test_data):
117+
pipeline = EthosU85PipelineBI[input_t](
118+
test_data[1],
119+
test_data[0],
120+
"torch.ops.aten.sub.Tensor", # Just check for sub op included in the layernorm decomposition
121+
run_on_fvp=True,
122+
)
123+
pipeline.run()

0 commit comments

Comments
 (0)