Skip to content

Commit 54bf735

Browse files
committed
[EE/BE] Adding legacy partitioner tests
1 parent 793a988 commit 54bf735

33 files changed

+1135
-797
lines changed

backends/xnnpack/test/ops/avgpool2d.py

Lines changed: 48 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@
77
import unittest
88

99
import torch
10+
11+
from executorch.backends.xnnpack.test import tester
1012
from executorch.backends.xnnpack.test.tester import Tester
1113

1214

@@ -29,17 +31,22 @@ def forward(self, x):
2931
return self.avgPool(x)
3032

3133
def _test_argpool2d(self, inputs):
32-
(
33-
Tester(self.AvgPool2d(), inputs)
34-
.export()
35-
.check_count({"torch.ops.aten.avg_pool2d.default": 1})
36-
.to_edge_transform_and_lower()
37-
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
38-
.check_not(["executorch_exir_dialects_edge__ops_aten_avg_pool2d_default"])
39-
.to_executorch()
40-
.serialize()
41-
.run_method_and_compare_outputs()
42-
)
34+
for legacy in (True, False):
35+
tester = Tester(self.AvgPool2d(), inputs)
36+
tester.export()
37+
tester.check_count({"torch.ops.aten.avg_pool2d.default": 1})
38+
if legacy:
39+
tester.to_edge()
40+
tester.partition()
41+
else:
42+
tester.to_edge_transform_and_lower()
43+
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
44+
tester.check_not(
45+
["executorch_exir_dialects_edge__ops_aten_avg_pool2d_default"]
46+
)
47+
tester.to_executorch()
48+
tester.serialize()
49+
tester.run_method_and_compare_outputs()
4350

4451
def test_fp16_avgpool2d(self):
4552
inputs = (torch.randn(1, 1, 10, 10).to(torch.float16),)
@@ -54,36 +61,45 @@ def test_fp32_avgpool2d_ceil_mode_unsupported(self):
5461
The XNNPACK backend does not support ceil mode.
5562
"""
5663
inputs = (torch.randn(1, 1, 10, 10),)
57-
(
58-
Tester(self.AvgPool2d(ceil_mode=True), inputs)
59-
.export()
60-
.check_count({"torch.ops.aten.avg_pool2d.default": 1})
61-
.to_edge_transform_and_lower()
62-
.check_not(["torch.ops.higher_order.executorch_call_delegate"])
63-
)
64+
for legacy in (True, False):
65+
tester = Tester(self.AvgPool2d(ceil_mode=True), inputs)
66+
tester.export()
67+
tester.check_count({"torch.ops.aten.avg_pool2d.default": 1})
68+
if legacy:
69+
tester.to_edge()
70+
tester.partition()
71+
else:
72+
tester.to_edge_transform_and_lower()
73+
tester.check_not(["torch.ops.higher_order.executorch_call_delegate"])
6474

6575
def test_fp32_avgpool2d_count_include_pad_unsupported(self):
6676
"""
6777
The XNNPACK backend does not support count_include_pad=True.
6878
"""
6979
inputs = (torch.randn(1, 1, 10, 10),)
70-
(
71-
Tester(self.AvgPool2d(count_include_pad=True), inputs)
72-
.export()
73-
.check_count({"torch.ops.aten.avg_pool2d.default": 1})
74-
.to_edge_transform_and_lower()
75-
.check_not(["torch.ops.higher_order.executorch_call_delegate"])
76-
)
80+
for legacy in (True, False):
81+
tester = Tester(self.AvgPool2d(count_include_pad=True), inputs)
82+
tester.export()
83+
tester.check_count({"torch.ops.aten.avg_pool2d.default": 1})
84+
if legacy:
85+
tester.to_edge()
86+
tester.partition()
87+
else:
88+
tester.to_edge_transform_and_lower()
89+
tester.check_not(["torch.ops.higher_order.executorch_call_delegate"])
7790

7891
def test_fp32_avgpool2d_divisor_override(self):
7992
"""
8093
The XNNPACK backend does not support divisor overrides not equal to the pooling region.
8194
"""
8295
inputs = (torch.randn(1, 1, 10, 10),)
83-
(
84-
Tester(self.AvgPool2d(divisor_override=5), inputs)
85-
.export()
86-
.check_count({"torch.ops.aten.avg_pool2d.default": 1})
87-
.to_edge_transform_and_lower()
88-
.check_not(["torch.ops.higher_order.executorch_call_delegate"])
89-
)
96+
for legacy in (True, False):
97+
tester = Tester(self.AvgPool2d(divisor_override=5), inputs)
98+
tester.export()
99+
tester.check_count({"torch.ops.aten.avg_pool2d.default": 1})
100+
if legacy:
101+
tester.to_edge()
102+
tester.partition()
103+
else:
104+
tester.to_edge_transform_and_lower()
105+
tester.check_not(["torch.ops.higher_order.executorch_call_delegate"])

backends/xnnpack/test/ops/bilinear2d.py

Lines changed: 57 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -78,43 +78,74 @@ def forward(self, x):
7878
"executorch_exir_dialects_edge__ops_aten_clamp_default",
7979
}
8080

81+
@unittest.skip('Expected to not find "aten_index_Tensor"')
82+
def test_fp32_static_resize_bilinear2d_legacy(self):
83+
example_inputs = (torch.randn(2, 3, 4, 5),)
84+
tester = Tester(self.StaticResizeBilinear2dModule(), example_inputs)
85+
tester.export()
86+
tester.to_edge()
87+
tester.partition()
88+
tester.check_not(self.ops)
89+
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
90+
tester.to_executorch()
91+
tester.serialize()
92+
tester.run_method_and_compare_outputs()
93+
8194
def test_fp32_static_resize_bilinear2d(self):
8295
example_inputs = (torch.randn(2, 3, 4, 5),)
83-
(
84-
Tester(self.StaticResizeBilinear2dModule(), example_inputs)
85-
.export()
86-
.to_edge_transform_and_lower()
87-
.check_not(self.ops)
88-
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
89-
.to_executorch()
90-
.serialize()
91-
.run_method_and_compare_outputs()
92-
)
96+
tester = Tester(self.StaticResizeBilinear2dModule(), example_inputs)
97+
tester.export()
98+
tester.to_edge_transform_and_lower()
99+
tester.check_not(self.ops)
100+
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
101+
tester.to_executorch()
102+
tester.serialize()
103+
tester.run_method_and_compare_outputs()
104+
105+
@unittest.skip('Expected to not find "aten_index_Tensor"')
106+
def test_fp32_static_resize_bilinear2d_with_align_corners_legacy(self):
107+
example_inputs = (torch.randn(2, 3, 4, 5),)
108+
for legacy in (True, False):
109+
tester = Tester(
110+
self.StaticResizeBilinear2dModuleWithAlignCorners(), example_inputs
111+
)
112+
tester.export()
113+
tester.to_edge()
114+
tester.partition()
115+
tester.to_edge_transform_and_lower()
116+
tester.check_not(self.ops)
117+
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
118+
tester.to_executorch()
119+
tester.serialize()
120+
tester.run_method_and_compare_outputs()
93121

94122
def test_fp32_static_resize_bilinear2d_with_align_corners(self):
95123
example_inputs = (torch.randn(2, 3, 4, 5),)
96-
(
97-
Tester(self.StaticResizeBilinear2dModuleWithAlignCorners(), example_inputs)
98-
.export()
99-
.to_edge_transform_and_lower()
100-
.check_not(self.ops)
101-
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
102-
.to_executorch()
103-
.serialize()
104-
.run_method_and_compare_outputs()
124+
tester = Tester(
125+
self.StaticResizeBilinear2dModuleWithAlignCorners(), example_inputs
105126
)
127+
tester.export()
128+
tester.to_edge_transform_and_lower()
129+
tester.check_not(self.ops)
130+
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
131+
tester.to_executorch()
132+
tester.serialize()
133+
tester.run_method_and_compare_outputs()
106134

107135
def test_fp32_static_resize_bilinear2d_antialiased(self):
108136
# Check bilinear2d_aa is not partitioned
109137
example_inputs = (torch.randn(2, 3, 4, 5),)
110-
(
111-
Tester(self.Bilinear2dAntiAlias(), example_inputs)
112-
.export()
113-
.to_edge_transform_and_lower()
114-
.check_count(
138+
for legacy in (True, False):
139+
tester = Tester(self.Bilinear2dAntiAlias(), example_inputs)
140+
tester.export()
141+
if legacy:
142+
tester.to_edge()
143+
tester.partition()
144+
else:
145+
tester.to_edge_transform_and_lower()
146+
tester.check_count(
115147
{
116148
"executorch_exir_dialects_edge__ops_aten__upsample_bilinear2d_aa_default": 2
117149
}
118150
)
119-
.check_not(["torch.ops.higher_order.executorch_call_delegate"])
120-
)
151+
tester.check_not(["torch.ops.higher_order.executorch_call_delegate"])

backends/xnnpack/test/ops/bmm.py

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -19,17 +19,20 @@ def forward(self, x, y):
1919
return torch.bmm(x, y)
2020

2121
def _test_bmm(self, inputs):
22-
(
23-
Tester(self.BMM(), inputs)
24-
.export()
25-
.check_count({"torch.ops.aten.bmm.default": 1})
26-
.to_edge_transform_and_lower()
27-
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
28-
.check_not(["executorch_exir_dialects_edge__ops_aten_bmm_default"])
29-
.to_executorch()
30-
.serialize()
31-
.run_method_and_compare_outputs()
32-
)
22+
for legacy in (True, False):
23+
tester = Tester(self.BMM(), inputs)
24+
tester.export()
25+
tester.check_count({"torch.ops.aten.bmm.default": 1})
26+
if legacy:
27+
tester.to_edge()
28+
tester.partition()
29+
else:
30+
tester.to_edge_transform_and_lower()
31+
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
32+
tester.check_not(["executorch_exir_dialects_edge__ops_aten_bmm_default"])
33+
tester.to_executorch()
34+
tester.serialize()
35+
tester.run_method_and_compare_outputs()
3336

3437
def test_fp16_bmm(self):
3538
inputs = (

backends/xnnpack/test/ops/ceil.py

Lines changed: 16 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@
77
import unittest
88

99
import torch
10+
11+
from executorch.backends.xnnpack.test import tester
1012
from executorch.backends.xnnpack.test.tester import Tester
1113

1214

@@ -20,17 +22,20 @@ def forward(self, x):
2022
return z
2123

2224
def _test_ceil(self, inputs):
23-
(
24-
Tester(self.Ceil(), inputs)
25-
.export()
26-
.check_count({"torch.ops.aten.ceil.default": 1})
27-
.to_edge_transform_and_lower()
28-
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
29-
.check_not(["executorch_exir_dialects_edge__ops_aten_ceil_default"])
30-
.to_executorch()
31-
.serialize()
32-
.run_method_and_compare_outputs()
33-
)
25+
for legacy in (True, False):
26+
tester = Tester(self.Ceil(), inputs)
27+
tester.export()
28+
tester.check_count({"torch.ops.aten.ceil.default": 1})
29+
if legacy:
30+
tester.to_edge()
31+
tester.partition()
32+
else:
33+
tester.to_edge_transform_and_lower()
34+
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
35+
tester.check_not(["executorch_exir_dialects_edge__ops_aten_ceil_default"])
36+
tester.to_executorch()
37+
tester.serialize()
38+
tester.run_method_and_compare_outputs()
3439

3540
def test_fp16_ceil(self):
3641
inputs = (

backends/xnnpack/test/ops/clamp.py

Lines changed: 30 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -22,17 +22,20 @@ def forward(self, x):
2222
return z + z
2323

2424
def _test_clamp(self, module, inputs):
25-
(
26-
Tester(module, inputs)
27-
.export()
28-
.check_count({"torch.ops.aten.clamp.default": 1})
29-
.to_edge_transform_and_lower()
30-
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
31-
.check_not(["executorch_exir_dialects_edge__ops_aten_clamp_default"])
32-
.to_executorch()
33-
.serialize()
34-
.run_method_and_compare_outputs()
35-
)
25+
for legacy in (True, False):
26+
tester = Tester(module, inputs)
27+
tester.export()
28+
tester.check_count({"torch.ops.aten.clamp.default": 1})
29+
if legacy:
30+
tester.to_edge()
31+
tester.partition()
32+
else:
33+
tester.to_edge_transform_and_lower()
34+
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
35+
tester.check_not(["executorch_exir_dialects_edge__ops_aten_clamp_default"])
36+
tester.to_executorch()
37+
tester.serialize()
38+
tester.run_method_and_compare_outputs()
3639

3740
def test_fp16_clamp(self):
3841
inputs = (torch.randn(1, 4, 122, 122).to(torch.float16) * 2,)
@@ -56,21 +59,24 @@ def test_fp32_clamp_upper(self):
5659

5760
def test_qs8_clamp(self):
5861
inputs = (torch.randn(1, 4, 122, 122),)
59-
(
60-
Tester(self.Clamp(min_val=-1, max_val=1), inputs)
61-
.quantize()
62-
.export()
63-
.check_count({"torch.ops.aten.clamp.default": 1})
64-
.check(["torch.ops.quantized_decomposed"])
65-
.to_edge_transform_and_lower()
66-
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
67-
.check_not(
62+
for legacy in (True, False):
63+
tester = Tester(self.Clamp(min_val=-1, max_val=1), inputs)
64+
tester.quantize()
65+
tester.export()
66+
tester.check_count({"torch.ops.aten.clamp.default": 1})
67+
tester.check(["torch.ops.quantized_decomposed"])
68+
if legacy:
69+
tester.to_edge()
70+
tester.partition()
71+
else:
72+
tester.to_edge_transform_and_lower()
73+
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
74+
tester.check_not(
6875
[
6976
"executorch_exir_dialects_edge__ops_aten_clamp_default",
7077
"torch.ops.quantized_decomposed",
7178
]
7279
)
73-
.to_executorch()
74-
.serialize()
75-
.run_method_and_compare_outputs()
76-
)
80+
tester.to_executorch()
81+
tester.serialize()
82+
tester.run_method_and_compare_outputs()

0 commit comments

Comments
 (0)