Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
78 changes: 46 additions & 32 deletions backends/xnnpack/test/ops/avgpool2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,17 +29,22 @@ def forward(self, x):
return self.avgPool(x)

def _test_argpool2d(self, inputs):
(
Tester(self.AvgPool2d(), inputs)
.export()
.check_count({"torch.ops.aten.avg_pool2d.default": 1})
.to_edge_transform_and_lower()
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.check_not(["executorch_exir_dialects_edge__ops_aten_avg_pool2d_default"])
.to_executorch()
.serialize()
.run_method_and_compare_outputs()
)
for legacy in (True, False):
tester = Tester(self.AvgPool2d(), inputs)
tester.export()
tester.check_count({"torch.ops.aten.avg_pool2d.default": 1})
if legacy:
tester.to_edge()
tester.partition()
else:
tester.to_edge_transform_and_lower()
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
tester.check_not(
["executorch_exir_dialects_edge__ops_aten_avg_pool2d_default"]
)
tester.to_executorch()
tester.serialize()
tester.run_method_and_compare_outputs()

def test_fp16_avgpool2d(self):
inputs = (torch.randn(1, 1, 10, 10).to(torch.float16),)
Expand All @@ -54,36 +59,45 @@ def test_fp32_avgpool2d_ceil_mode_unsupported(self):
The XNNPACK backend does not support ceil mode.
"""
inputs = (torch.randn(1, 1, 10, 10),)
(
Tester(self.AvgPool2d(ceil_mode=True), inputs)
.export()
.check_count({"torch.ops.aten.avg_pool2d.default": 1})
.to_edge_transform_and_lower()
.check_not(["torch.ops.higher_order.executorch_call_delegate"])
)
for legacy in (True, False):
tester = Tester(self.AvgPool2d(ceil_mode=True), inputs)
tester.export()
tester.check_count({"torch.ops.aten.avg_pool2d.default": 1})
if legacy:
tester.to_edge()
tester.partition()
else:
tester.to_edge_transform_and_lower()
tester.check_not(["torch.ops.higher_order.executorch_call_delegate"])

def test_fp32_avgpool2d_count_include_pad_unsupported(self):
"""
The XNNPACK backend does not support count_include_pad=True.
"""
inputs = (torch.randn(1, 1, 10, 10),)
(
Tester(self.AvgPool2d(count_include_pad=True), inputs)
.export()
.check_count({"torch.ops.aten.avg_pool2d.default": 1})
.to_edge_transform_and_lower()
.check_not(["torch.ops.higher_order.executorch_call_delegate"])
)
for legacy in (True, False):
tester = Tester(self.AvgPool2d(count_include_pad=True), inputs)
tester.export()
tester.check_count({"torch.ops.aten.avg_pool2d.default": 1})
if legacy:
tester.to_edge()
tester.partition()
else:
tester.to_edge_transform_and_lower()
tester.check_not(["torch.ops.higher_order.executorch_call_delegate"])

def test_fp32_avgpool2d_divisor_override(self):
"""
The XNNPACK backend does not support divisor overrides not equal to the pooling region.
"""
inputs = (torch.randn(1, 1, 10, 10),)
(
Tester(self.AvgPool2d(divisor_override=5), inputs)
.export()
.check_count({"torch.ops.aten.avg_pool2d.default": 1})
.to_edge_transform_and_lower()
.check_not(["torch.ops.higher_order.executorch_call_delegate"])
)
for legacy in (True, False):
tester = Tester(self.AvgPool2d(divisor_override=5), inputs)
tester.export()
tester.check_count({"torch.ops.aten.avg_pool2d.default": 1})
if legacy:
tester.to_edge()
tester.partition()
else:
tester.to_edge_transform_and_lower()
tester.check_not(["torch.ops.higher_order.executorch_call_delegate"])
79 changes: 54 additions & 25 deletions backends/xnnpack/test/ops/bilinear2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,43 +78,72 @@ def forward(self, x):
"executorch_exir_dialects_edge__ops_aten_clamp_default",
}

@unittest.skip('Expected to not find "aten_index_Tensor"')
def test_fp32_static_resize_bilinear2d_legacy(self):
example_inputs = (torch.randn(2, 3, 4, 5),)
tester = Tester(self.StaticResizeBilinear2dModule(), example_inputs)
tester.export()
tester.to_edge()
tester.partition()
tester.check_not(self.ops)
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
tester.to_executorch()
tester.serialize()
tester.run_method_and_compare_outputs()

def test_fp32_static_resize_bilinear2d(self):
example_inputs = (torch.randn(2, 3, 4, 5),)
(
Tester(self.StaticResizeBilinear2dModule(), example_inputs)
.export()
.to_edge_transform_and_lower()
.check_not(self.ops)
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.to_executorch()
.serialize()
.run_method_and_compare_outputs()
tester = Tester(self.StaticResizeBilinear2dModule(), example_inputs)
tester.export()
tester.to_edge_transform_and_lower()
tester.check_not(self.ops)
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
tester.to_executorch()
tester.serialize()
tester.run_method_and_compare_outputs()

@unittest.skip('Expected to not find "aten_index_Tensor"')
def test_fp32_static_resize_bilinear2d_with_align_corners_legacy(self):
example_inputs = (torch.randn(2, 3, 4, 5),)
tester = Tester(
self.StaticResizeBilinear2dModuleWithAlignCorners(), example_inputs
)
tester.export()
tester.to_edge()
tester.partition()
tester.check_not(self.ops)
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
tester.to_executorch()
tester.serialize()
tester.run_method_and_compare_outputs()

def test_fp32_static_resize_bilinear2d_with_align_corners(self):
example_inputs = (torch.randn(2, 3, 4, 5),)
(
Tester(self.StaticResizeBilinear2dModuleWithAlignCorners(), example_inputs)
.export()
.to_edge_transform_and_lower()
.check_not(self.ops)
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.to_executorch()
.serialize()
.run_method_and_compare_outputs()
tester = Tester(
self.StaticResizeBilinear2dModuleWithAlignCorners(), example_inputs
)
tester.export()
tester.to_edge_transform_and_lower()
tester.check_not(self.ops)
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
tester.to_executorch()
tester.serialize()
tester.run_method_and_compare_outputs()

def test_fp32_static_resize_bilinear2d_antialiased(self):
# Check bilinear2d_aa is not partitioned
example_inputs = (torch.randn(2, 3, 4, 5),)
(
Tester(self.Bilinear2dAntiAlias(), example_inputs)
.export()
.to_edge_transform_and_lower()
.check_count(
for legacy in (True, False):
tester = Tester(self.Bilinear2dAntiAlias(), example_inputs)
tester.export()
if legacy:
tester.to_edge()
tester.partition()
else:
tester.to_edge_transform_and_lower()
tester.check_count(
{
"executorch_exir_dialects_edge__ops_aten__upsample_bilinear2d_aa_default": 2
}
)
.check_not(["torch.ops.higher_order.executorch_call_delegate"])
)
tester.check_not(["torch.ops.higher_order.executorch_call_delegate"])
25 changes: 14 additions & 11 deletions backends/xnnpack/test/ops/bmm.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,17 +19,20 @@ def forward(self, x, y):
return torch.bmm(x, y)

def _test_bmm(self, inputs):
(
Tester(self.BMM(), inputs)
.export()
.check_count({"torch.ops.aten.bmm.default": 1})
.to_edge_transform_and_lower()
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.check_not(["executorch_exir_dialects_edge__ops_aten_bmm_default"])
.to_executorch()
.serialize()
.run_method_and_compare_outputs()
)
for legacy in (True, False):
tester = Tester(self.BMM(), inputs)
tester.export()
tester.check_count({"torch.ops.aten.bmm.default": 1})
if legacy:
tester.to_edge()
tester.partition()
else:
tester.to_edge_transform_and_lower()
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
tester.check_not(["executorch_exir_dialects_edge__ops_aten_bmm_default"])
tester.to_executorch()
tester.serialize()
tester.run_method_and_compare_outputs()

def test_fp16_bmm(self):
inputs = (
Expand Down
25 changes: 14 additions & 11 deletions backends/xnnpack/test/ops/ceil.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,20 @@ def forward(self, x):
return z

def _test_ceil(self, inputs):
(
Tester(self.Ceil(), inputs)
.export()
.check_count({"torch.ops.aten.ceil.default": 1})
.to_edge_transform_and_lower()
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.check_not(["executorch_exir_dialects_edge__ops_aten_ceil_default"])
.to_executorch()
.serialize()
.run_method_and_compare_outputs()
)
for legacy in (True, False):
tester = Tester(self.Ceil(), inputs)
tester.export()
tester.check_count({"torch.ops.aten.ceil.default": 1})
if legacy:
tester.to_edge()
tester.partition()
else:
tester.to_edge_transform_and_lower()
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
tester.check_not(["executorch_exir_dialects_edge__ops_aten_ceil_default"])
tester.to_executorch()
tester.serialize()
tester.run_method_and_compare_outputs()

def test_fp16_ceil(self):
inputs = (
Expand Down
54 changes: 30 additions & 24 deletions backends/xnnpack/test/ops/clamp.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,17 +22,20 @@ def forward(self, x):
return z + z

def _test_clamp(self, module, inputs):
(
Tester(module, inputs)
.export()
.check_count({"torch.ops.aten.clamp.default": 1})
.to_edge_transform_and_lower()
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.check_not(["executorch_exir_dialects_edge__ops_aten_clamp_default"])
.to_executorch()
.serialize()
.run_method_and_compare_outputs()
)
for legacy in (True, False):
tester = Tester(module, inputs)
tester.export()
tester.check_count({"torch.ops.aten.clamp.default": 1})
if legacy:
tester.to_edge()
tester.partition()
else:
tester.to_edge_transform_and_lower()
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
tester.check_not(["executorch_exir_dialects_edge__ops_aten_clamp_default"])
tester.to_executorch()
tester.serialize()
tester.run_method_and_compare_outputs()

def test_fp16_clamp(self):
inputs = (torch.randn(1, 4, 122, 122).to(torch.float16) * 2,)
Expand All @@ -56,21 +59,24 @@ def test_fp32_clamp_upper(self):

def test_qs8_clamp(self):
inputs = (torch.randn(1, 4, 122, 122),)
(
Tester(self.Clamp(min_val=-1, max_val=1), inputs)
.quantize()
.export()
.check_count({"torch.ops.aten.clamp.default": 1})
.check(["torch.ops.quantized_decomposed"])
.to_edge_transform_and_lower()
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.check_not(
for legacy in (True, False):
tester = Tester(self.Clamp(min_val=-1, max_val=1), inputs)
tester.quantize()
tester.export()
tester.check_count({"torch.ops.aten.clamp.default": 1})
tester.check(["torch.ops.quantized_decomposed"])
if legacy:
tester.to_edge()
tester.partition()
else:
tester.to_edge_transform_and_lower()
tester.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
tester.check_not(
[
"executorch_exir_dialects_edge__ops_aten_clamp_default",
"torch.ops.quantized_decomposed",
]
)
.to_executorch()
.serialize()
.run_method_and_compare_outputs()
)
tester.to_executorch()
tester.serialize()
tester.run_method_and_compare_outputs()
Loading
Loading