diff --git a/backends/arm/operator_support/tosa_supported_operators.py b/backends/arm/operator_support/tosa_supported_operators.py index 607ae017a56..7a9ce29ff52 100644 --- a/backends/arm/operator_support/tosa_supported_operators.py +++ b/backends/arm/operator_support/tosa_supported_operators.py @@ -195,6 +195,11 @@ def is_node_supported( exir_ops.edge.aten.bitwise_xor.Tensor, exir_ops.edge.aten.amax.default, exir_ops.edge.aten.amin.default, + exir_ops.edge.aten.eq.Tensor, + exir_ops.edge.aten.ge.Tensor, + exir_ops.edge.aten.gt.Tensor, + exir_ops.edge.aten.le.Tensor, + exir_ops.edge.aten.lt.Tensor, ] if node.target in unsupported_ops: diff --git a/backends/arm/test/ops/test_eq.py b/backends/arm/test/ops/test_eq.py index 263a042ea1c..329f65dfead 100644 --- a/backends/arm/test/ops/test_eq.py +++ b/backends/arm/test/ops/test_eq.py @@ -1,145 +1,136 @@ # Copyright 2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest +from typing import Tuple +import pytest import torch from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized - -test_data_suite = [ - # (test_name, input, other,) See torch.eq() for info - ( - "op_eq_rank1_ones", - torch.ones(5), - torch.ones(5), - ), - ( - "op_eq_rank2_rand", - torch.rand(4, 5), - torch.rand(1, 5), - ), - ( - "op_eq_rank3_randn", - torch.randn(10, 5, 2), - torch.randn(10, 5, 2), - ), - ( - "op_eq_rank4_randn", - torch.randn(3, 2, 2, 2), - torch.randn(3, 2, 2, 2), - ), -] - - -class TestEqual(unittest.TestCase): - class Equal(torch.nn.Module): - def forward( - self, - input_: torch.Tensor, - other_: torch.Tensor, - ): - return input_ == other_ - - def _test_eq_tosa_MI_pipeline( - self, - compile_spec: list[CompileSpec], - module: torch.nn.Module, - test_data: tuple[torch.Tensor, torch.Tensor], - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .export() - .check_count({"torch.ops.aten.eq.Tensor": 1}) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_eq_tosa_BI_pipeline( - self, - compile_spec: list[CompileSpec], - module: torch.nn.Module, - test_data: tuple[torch.Tensor, torch.Tensor], - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.eq.Tensor": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - @parameterized.expand(test_data_suite) - def test_eq_tosa_MI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_eq_tosa_MI_pipeline( - common.get_tosa_compile_spec("TOSA-0.80+MI"), self.Equal(), test_data - ) - @parameterized.expand(test_data_suite) - def test_eq_tosa_BI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_eq_tosa_BI_pipeline( - common.get_tosa_compile_spec("TOSA-0.80+BI"), self.Equal(), test_data - ) - - @parameterized.expand(test_data_suite) - @unittest.skip - def test_eq_u55_BI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_eq_tosa_BI_pipeline( - common.get_u55_compile_spec(permute_memory_to_nhwc=True), - self.Equal(), - test_data, - ) - - @parameterized.expand(test_data_suite) - @unittest.skip - def test_eq_u85_BI( +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU85PipelineBI, + OpNotSupportedPipeline, + TosaPipelineBI, + TosaPipelineMI, +) + +aten_op = "torch.ops.aten.eq.Tensor" +exir_op = "executorch_exir_dialects_edge__ops_aten_eq_Tensor" + +input_t = Tuple[torch.Tensor] + + +class Equal(torch.nn.Module): + def __init__(self, input, other): + super().__init__() + self.input_ = input + self.other_ = other + + def forward( self, - test_name: str, input_: torch.Tensor, other_: torch.Tensor, ): - test_data = (input_, other_) - self._test_eq_tosa_BI_pipeline( - common.get_u85_compile_spec(permute_memory_to_nhwc=True), - self.Equal(), - test_data, - ) + return input_ == other_ + + def get_inputs(self): + return (self.input_, self.other_) + + +op_eq_rank1_ones = Equal( + torch.ones(5), + torch.ones(5), +) +op_eq_rank2_rand = Equal( + torch.rand(4, 5), + torch.rand(1, 5), +) +op_eq_rank3_randn = Equal( + torch.randn(10, 5, 2), + torch.randn(10, 5, 2), +) +op_eq_rank4_randn = Equal( + torch.randn(3, 2, 2, 2), + torch.randn(3, 2, 2, 2), +) + +test_data_common = { + "eq_rank1_ones": op_eq_rank1_ones, + "eq_rank2_rand": op_eq_rank2_rand, + "eq_rank3_randn": op_eq_rank3_randn, + "eq_rank4_randn": op_eq_rank4_randn, +} + + +@common.parametrize("test_module", test_data_common) +def test_eq_tosa_MI(test_module): + pipeline = TosaPipelineMI[input_t]( + test_module, test_module.get_inputs(), aten_op, exir_op + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +def test_eq_tosa_BI(test_module): + pipeline = TosaPipelineBI[input_t]( + test_module, test_module.get_inputs(), aten_op, exir_op + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +def test_eq_u55_BI(test_module): + # EQUAL is not supported on U55. + pipeline = OpNotSupportedPipeline[input_t]( + test_module, + test_module.get_inputs(), + "TOSA-0.80+BI+u55", + {exir_op: 1}, + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +def test_eq_u85_BI(test_module): + pipeline = EthosU85PipelineBI[input_t]( + test_module, + test_module.get_inputs(), + aten_op, + exir_op, + run_on_fvp=False, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +@pytest.mark.skip(reason="The same as test_eq_u55_BI") +def test_eq_u55_BI_on_fvp(test_module): + # EQUAL is not supported on U55. + pipeline = OpNotSupportedPipeline[input_t]( + test_module, + test_module.get_inputs(), + "TOSA-0.80+BI+u55", + {exir_op: 1}, + ) + pipeline.run() + + +@common.parametrize( + "test_module", + test_data_common, + xfails={"eq_rank4_randn": "4D fails because boolean Tensors can't be subtracted"}, +) +@common.SkipIfNoCorstone320 +def test_eq_u85_BI_on_fvp(test_module): + pipeline = EthosU85PipelineBI[input_t]( + test_module, + test_module.get_inputs(), + aten_op, + exir_op, + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_ge.py b/backends/arm/test/ops/test_ge.py index ff6cacd1f97..a6193f6ea08 100644 --- a/backends/arm/test/ops/test_ge.py +++ b/backends/arm/test/ops/test_ge.py @@ -1,140 +1,136 @@ # Copyright 2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest +from typing import Tuple +import pytest import torch from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized - -test_data_suite = [ - # (test_name, input, other,) See torch.ge() for info - ( - "op_ge_rank1_ones", - torch.ones(5), - torch.ones(5), - ), - ( - "op_ge_rank2_rand", - torch.rand(4, 5), - torch.rand(1, 5), - ), - ( - "op_ge_rank3_randn", - torch.randn(10, 5, 2), - torch.randn(10, 5, 2), - ), - ( - "op_ge_rank4_randn", - torch.randn(3, 2, 2, 2), - torch.randn(3, 2, 2, 2), - ), -] - - -class TestGreaterEqual(unittest.TestCase): - class GreaterEqual(torch.nn.Module): - def forward( - self, - input_: torch.Tensor, - other_: torch.Tensor, - ): - return input_ >= other_ - - def _test_ge_tosa_pipeline( - self, module: torch.nn.Module, test_data: tuple[torch.Tensor, torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({"torch.ops.aten.ge.Tensor": 1}) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_ge_tosa_BI_pipeline( - self, - compile_spec: list[CompileSpec], - module: torch.nn.Module, - test_data: tuple[torch.Tensor, torch.Tensor], - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.ge.Tensor": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - @parameterized.expand(test_data_suite) - def test_ge_tosa_MI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_ge_tosa_pipeline(self.GreaterEqual(), test_data) - @parameterized.expand(test_data_suite) - def test_ge_tosa_BI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_ge_tosa_BI_pipeline( - common.get_tosa_compile_spec("TOSA-0.80+BI"), self.GreaterEqual(), test_data - ) - - @parameterized.expand(test_data_suite) - @unittest.skip - def test_ge_u55_BI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_ge_tosa_BI_pipeline( - common.get_u55_compile_spec(permute_memory_to_nhwc=True), - self.GreaterEqual(), - test_data, - ) - - @parameterized.expand(test_data_suite) - @unittest.skip - def test_ge_u85_BI( +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU85PipelineBI, + OpNotSupportedPipeline, + TosaPipelineBI, + TosaPipelineMI, +) + +aten_op = "torch.ops.aten.ge.Tensor" +exir_op = "executorch_exir_dialects_edge__ops_aten_ge_Tensor" + +input_t = Tuple[torch.Tensor] + + +class GreaterEqual(torch.nn.Module): + def __init__(self, input, other): + super().__init__() + self.input_ = input + self.other_ = other + + def forward( self, - test_name: str, input_: torch.Tensor, other_: torch.Tensor, ): - test_data = (input_, other_) - self._test_ge_tosa_BI_pipeline( - common.get_u85_compile_spec(permute_memory_to_nhwc=True), - self.GreaterEqual(), - test_data, - ) + return input_ >= other_ + + def get_inputs(self): + return (self.input_, self.other_) + + +op_ge_rank1_ones = GreaterEqual( + torch.ones(5), + torch.ones(5), +) +op_ge_rank2_rand = GreaterEqual( + torch.rand(4, 5), + torch.rand(1, 5), +) +op_ge_rank3_randn = GreaterEqual( + torch.randn(10, 5, 2), + torch.randn(10, 5, 2), +) +op_ge_rank4_randn = GreaterEqual( + torch.randn(3, 2, 2, 2), + torch.randn(3, 2, 2, 2), +) + +test_data_common = { + "ge_rank1_ones": op_ge_rank1_ones, + "ge_rank2_rand": op_ge_rank2_rand, + "ge_rank3_randn": op_ge_rank3_randn, + "ge_rank4_randn": op_ge_rank4_randn, +} + + +@common.parametrize("test_module", test_data_common) +def test_ge_tosa_MI(test_module): + pipeline = TosaPipelineMI[input_t]( + test_module, test_module.get_inputs(), aten_op, exir_op + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +def test_ge_tosa_BI(test_module): + pipeline = TosaPipelineBI[input_t]( + test_module, test_module.get_inputs(), aten_op, exir_op + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +def test_ge_u55_BI(test_module): + # GREATER_EQUAL is not supported on U55. + pipeline = OpNotSupportedPipeline[input_t]( + test_module, + test_module.get_inputs(), + "TOSA-0.80+BI+u55", + {exir_op: 1}, + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +def test_ge_u85_BI(test_module): + pipeline = EthosU85PipelineBI[input_t]( + test_module, + test_module.get_inputs(), + aten_op, + exir_op, + run_on_fvp=False, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +@pytest.mark.skip(reason="The same as test_ge_u55_BI") +def test_ge_u55_BI_on_fvp(test_module): + # GREATER_EQUAL is not supported on U55. + pipeline = OpNotSupportedPipeline[input_t]( + test_module, + test_module.get_inputs(), + "TOSA-0.80+BI+u55", + {exir_op: 1}, + ) + pipeline.run() + + +@common.parametrize( + "test_module", + test_data_common, + xfails={"ge_rank4_randn": "4D fails because boolean Tensors can't be subtracted"}, +) +@common.SkipIfNoCorstone320 +def test_ge_u85_BI_on_fvp(test_module): + pipeline = EthosU85PipelineBI[input_t]( + test_module, + test_module.get_inputs(), + aten_op, + exir_op, + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_gt.py b/backends/arm/test/ops/test_gt.py index 33899f64492..2095f781bdb 100644 --- a/backends/arm/test/ops/test_gt.py +++ b/backends/arm/test/ops/test_gt.py @@ -1,140 +1,136 @@ # Copyright 2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest +from typing import Tuple +import pytest import torch from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized - -test_data_suite = [ - # (test_name, input, other,) See torch.gt() for info - ( - "op_gt_rank1_ones", - torch.ones(5), - torch.ones(5), - ), - ( - "op_gt_rank2_rand", - torch.rand(4, 5), - torch.rand(1, 5), - ), - ( - "op_gt_rank3_randn", - torch.randn(10, 5, 2), - torch.randn(10, 5, 2), - ), - ( - "op_gt_rank4_randn", - torch.randn(3, 2, 2, 2), - torch.randn(3, 2, 2, 2), - ), -] - - -class TestGreater(unittest.TestCase): - class Greater(torch.nn.Module): - def forward( - self, - input_: torch.Tensor, - other_: torch.Tensor, - ): - return input_ > other_ - - def _test_gt_tosa_pipeline( - self, module: torch.nn.Module, test_data: tuple[torch.Tensor, torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({"torch.ops.aten.gt.Tensor": 1}) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_gt_tosa_BI_pipeline( - self, - compile_spec: list[CompileSpec], - module: torch.nn.Module, - test_data: tuple[torch.Tensor, torch.Tensor], - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.gt.Tensor": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - @parameterized.expand(test_data_suite) - def test_gt_tosa_MI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_gt_tosa_pipeline(self.Greater(), test_data) - @parameterized.expand(test_data_suite) - def test_gt_tosa_BI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_gt_tosa_BI_pipeline( - common.get_tosa_compile_spec("TOSA-0.80+BI"), self.Greater(), test_data - ) - - @parameterized.expand(test_data_suite) - @unittest.skip - def test_gt_u55_BI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_gt_tosa_BI_pipeline( - common.get_u55_compile_spec(permute_memory_to_nhwc=True), - self.Greater(), - test_data, - ) - - @parameterized.expand(test_data_suite) - @unittest.skip - def test_gt_u85_BI( +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU85PipelineBI, + OpNotSupportedPipeline, + TosaPipelineBI, + TosaPipelineMI, +) + +aten_op = "torch.ops.aten.gt.Tensor" +exir_op = "executorch_exir_dialects_edge__ops_aten_gt_Tensor" + +input_t = Tuple[torch.Tensor] + + +class Greater(torch.nn.Module): + def __init__(self, input, other): + super().__init__() + self.input_ = input + self.other_ = other + + def forward( self, - test_name: str, input_: torch.Tensor, other_: torch.Tensor, ): - test_data = (input_, other_) - self._test_gt_tosa_BI_pipeline( - common.get_u85_compile_spec(permute_memory_to_nhwc=True), - self.Greater(), - test_data, - ) + return input_ > other_ + + def get_inputs(self): + return (self.input_, self.other_) + + +op_gt_rank1_ones = Greater( + torch.ones(5), + torch.ones(5), +) +op_gt_rank2_rand = Greater( + torch.rand(4, 5), + torch.rand(1, 5), +) +op_gt_rank3_randn = Greater( + torch.randn(10, 5, 2), + torch.randn(10, 5, 2), +) +op_gt_rank4_randn = Greater( + torch.randn(3, 2, 2, 2), + torch.randn(3, 2, 2, 2), +) + +test_data_common = { + "gt_rank1_ones": op_gt_rank1_ones, + "gt_rank2_rand": op_gt_rank2_rand, + "gt_rank3_randn": op_gt_rank3_randn, + "gt_rank4_randn": op_gt_rank4_randn, +} + + +@common.parametrize("test_module", test_data_common) +def test_gt_tosa_MI(test_module): + pipeline = TosaPipelineMI[input_t]( + test_module, test_module.get_inputs(), aten_op, exir_op + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +def test_gt_tosa_BI(test_module): + pipeline = TosaPipelineBI[input_t]( + test_module, test_module.get_inputs(), aten_op, exir_op + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +def test_gt_u55_BI(test_module): + # GREATER is not supported on U55. + pipeline = OpNotSupportedPipeline[input_t]( + test_module, + test_module.get_inputs(), + "TOSA-0.80+BI+u55", + {exir_op: 1}, + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +def test_gt_u85_BI(test_module): + pipeline = EthosU85PipelineBI[input_t]( + test_module, + test_module.get_inputs(), + aten_op, + exir_op, + run_on_fvp=False, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +@pytest.mark.skip(reason="The same as test_gt_u55_BI") +def test_gt_u55_BI_on_fvp(test_module): + # GREATER is not supported on U55. + pipeline = OpNotSupportedPipeline[input_t]( + test_module, + test_module.get_inputs(), + "TOSA-0.80+BI+u55", + {exir_op: 1}, + ) + pipeline.run() + + +@common.parametrize( + "test_module", + test_data_common, + xfails={"gt_rank4_randn": "4D fails because boolean Tensors can't be subtracted"}, +) +@common.SkipIfNoCorstone320 +def test_gt_u85_BI_on_fvp(test_module): + pipeline = EthosU85PipelineBI[input_t]( + test_module, + test_module.get_inputs(), + aten_op, + exir_op, + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_le.py b/backends/arm/test/ops/test_le.py index 0710f483a0b..7e243ead620 100644 --- a/backends/arm/test/ops/test_le.py +++ b/backends/arm/test/ops/test_le.py @@ -1,140 +1,136 @@ # Copyright 2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest +from typing import Tuple +import pytest import torch from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized - -test_data_suite = [ - # (test_name, input, other,) See torch.le() for info - ( - "op_le_rank1_ones", - torch.ones(5), - torch.ones(5), - ), - ( - "op_le_rank2_rand", - torch.rand(4, 5), - torch.rand(1, 5), - ), - ( - "op_le_rank3_randn", - torch.randn(10, 5, 2), - torch.randn(10, 5, 2), - ), - ( - "op_le_rank4_randn", - torch.randn(3, 2, 2, 2), - torch.randn(3, 2, 2, 2), - ), -] - - -class TestLessEqual(unittest.TestCase): - class LessEqual(torch.nn.Module): - def forward( - self, - input_: torch.Tensor, - other_: torch.Tensor, - ): - return torch.le(input_, other_) - - def _test_le_tosa_pipeline( - self, module: torch.nn.Module, test_data: tuple[torch.Tensor, torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({"torch.ops.aten.le.Tensor": 1}) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_le_tosa_BI_pipeline( - self, - compile_spec: list[CompileSpec], - module: torch.nn.Module, - test_data: tuple[torch.Tensor, torch.Tensor], - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.le.Tensor": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - @parameterized.expand(test_data_suite) - def test_le_tosa_MI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_le_tosa_pipeline(self.LessEqual(), test_data) - @parameterized.expand(test_data_suite) - def test_le_tosa_BI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_le_tosa_BI_pipeline( - common.get_tosa_compile_spec("TOSA-0.80+BI"), self.LessEqual(), test_data - ) - - @parameterized.expand(test_data_suite) - @unittest.skip - def test_le_u55_BI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_le_tosa_BI_pipeline( - common.get_u55_compile_spec(permute_memory_to_nhwc=True), - self.LessEqual(), - test_data, - ) - - @parameterized.expand(test_data_suite) - @unittest.skip - def test_le_u85_BI( +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU85PipelineBI, + OpNotSupportedPipeline, + TosaPipelineBI, + TosaPipelineMI, +) + +aten_op = "torch.ops.aten.le.Tensor" +exir_op = "executorch_exir_dialects_edge__ops_aten_le_Tensor" + +input_t = Tuple[torch.Tensor] + + +class GreaterEqual(torch.nn.Module): + def __init__(self, input, other): + super().__init__() + self.input_ = input + self.other_ = other + + def forward( self, - test_name: str, input_: torch.Tensor, other_: torch.Tensor, ): - test_data = (input_, other_) - self._test_le_tosa_BI_pipeline( - common.get_u85_compile_spec(permute_memory_to_nhwc=True), - self.LessEqual(), - test_data, - ) + return input_ <= other_ + + def get_inputs(self): + return (self.input_, self.other_) + + +op_le_rank1_ones = GreaterEqual( + torch.ones(5), + torch.ones(5), +) +op_le_rank2_rand = GreaterEqual( + torch.rand(4, 5), + torch.rand(1, 5), +) +op_le_rank3_randn = GreaterEqual( + torch.randn(10, 5, 2), + torch.randn(10, 5, 2), +) +op_le_rank4_randn = GreaterEqual( + torch.randn(3, 2, 2, 2), + torch.randn(3, 2, 2, 2), +) + +test_data_common = { + "le_rank1_ones": op_le_rank1_ones, + "le_rank2_rand": op_le_rank2_rand, + "le_rank3_randn": op_le_rank3_randn, + "le_rank4_randn": op_le_rank4_randn, +} + + +@common.parametrize("test_module", test_data_common) +def test_le_tosa_MI(test_module): + pipeline = TosaPipelineMI[input_t]( + test_module, test_module.get_inputs(), aten_op, exir_op + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +def test_le_tosa_BI(test_module): + pipeline = TosaPipelineBI[input_t]( + test_module, test_module.get_inputs(), aten_op, exir_op + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +def test_le_u55_BI(test_module): + # GREATER_EQUAL is not supported on U55. LE uses the GREATER_EQUAL Tosa operator. + pipeline = OpNotSupportedPipeline[input_t]( + test_module, + test_module.get_inputs(), + "TOSA-0.80+BI+u55", + {exir_op: 1}, + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +def test_le_u85_BI(test_module): + pipeline = EthosU85PipelineBI[input_t]( + test_module, + test_module.get_inputs(), + aten_op, + exir_op, + run_on_fvp=False, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +@pytest.mark.skip(reason="The same as test_le_u55_BI") +def test_le_u55_BI_on_fvp(test_module): + # GREATER_EQUAL is not supported on U55. LE uses the GREATER_EQUAL Tosa operator. + pipeline = OpNotSupportedPipeline[input_t]( + test_module, + test_module.get_inputs(), + "TOSA-0.80+BI+u55", + {exir_op: 1}, + ) + pipeline.run() + + +@common.parametrize( + "test_module", + test_data_common, + xfails={"le_rank4_randn": "4D fails because boolean Tensors can't be subtracted"}, +) +@common.SkipIfNoCorstone320 +def test_le_u85_BI_on_fvp(test_module): + pipeline = EthosU85PipelineBI[input_t]( + test_module, + test_module.get_inputs(), + aten_op, + exir_op, + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() diff --git a/backends/arm/test/ops/test_lt.py b/backends/arm/test/ops/test_lt.py index 398df8c2036..cae119cd7a8 100644 --- a/backends/arm/test/ops/test_lt.py +++ b/backends/arm/test/ops/test_lt.py @@ -1,140 +1,136 @@ # Copyright 2025 Arm Limited and/or its affiliates. -# All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -import unittest +from typing import Tuple +import pytest import torch from executorch.backends.arm.test import common -from executorch.backends.arm.test.tester.arm_tester import ArmTester -from executorch.exir.backend.compile_spec_schema import CompileSpec -from parameterized import parameterized - -test_data_suite = [ - # (test_name, input, other,) See torch.lt() for info - ( - "op_lt_rank1_ones", - torch.ones(5), - torch.ones(5), - ), - ( - "op_lt_rank2_rand", - torch.rand(4, 5), - torch.rand(1, 5), - ), - ( - "op_lt_rank3_randn", - torch.randn(10, 5, 2), - torch.randn(10, 5, 2), - ), - ( - "op_lt_rank4_randn", - torch.randn(3, 2, 2, 2), - torch.randn(3, 2, 2, 2), - ), -] - - -class TestLessThan(unittest.TestCase): - class LessThan(torch.nn.Module): - def forward( - self, - input_: torch.Tensor, - other_: torch.Tensor, - ): - return torch.lt(input_, other_) - - def _test_lt_tosa_pipeline( - self, module: torch.nn.Module, test_data: tuple[torch.Tensor, torch.Tensor] - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), - ) - .export() - .check_count({"torch.ops.aten.lt.Tensor": 1}) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - def _test_lt_tosa_BI_pipeline( - self, - compile_spec: list[CompileSpec], - module: torch.nn.Module, - test_data: tuple[torch.Tensor, torch.Tensor], - ): - ( - ArmTester( - module, - example_inputs=test_data, - compile_spec=compile_spec, - ) - .quantize() - .export() - .check_count({"torch.ops.aten.lt.Tensor": 1}) - .check(["torch.ops.quantized_decomposed"]) - .to_edge() - .partition() - .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) - .to_executorch() - .run_method_and_compare_outputs(inputs=test_data) - ) - - @parameterized.expand(test_data_suite) - def test_lt_tosa_MI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_lt_tosa_pipeline(self.LessThan(), test_data) - @parameterized.expand(test_data_suite) - def test_lt_tosa_BI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_lt_tosa_BI_pipeline( - common.get_tosa_compile_spec("TOSA-0.80+BI"), self.LessThan(), test_data - ) - - @parameterized.expand(test_data_suite) - @unittest.skip - def test_lt_u55_BI( - self, - test_name: str, - input_: torch.Tensor, - other_: torch.Tensor, - ): - test_data = (input_, other_) - self._test_lt_tosa_BI_pipeline( - common.get_u55_compile_spec(permute_memory_to_nhwc=True), - self.LessThan(), - test_data, - ) - - @parameterized.expand(test_data_suite) - @unittest.skip - def test_lt_u85_BI( +from executorch.backends.arm.test.tester.test_pipeline import ( + EthosU85PipelineBI, + OpNotSupportedPipeline, + TosaPipelineBI, + TosaPipelineMI, +) + +aten_op = "torch.ops.aten.lt.Tensor" +exir_op = "executorch_exir_dialects_edge__ops_aten_lt_Tensor" + +input_t = Tuple[torch.Tensor] + + +class LessThan(torch.nn.Module): + def __init__(self, input, other): + super().__init__() + self.input_ = input + self.other_ = other + + def forward( self, - test_name: str, input_: torch.Tensor, other_: torch.Tensor, ): - test_data = (input_, other_) - self._test_lt_tosa_BI_pipeline( - common.get_u85_compile_spec(permute_memory_to_nhwc=True), - self.LessThan(), - test_data, - ) + return input_ < other_ + + def get_inputs(self): + return (self.input_, self.other_) + + +op_lt_rank1_ones = LessThan( + torch.ones(5), + torch.ones(5), +) +op_lt_rank2_rand = LessThan( + torch.rand(4, 5), + torch.rand(1, 5), +) +op_lt_rank3_randn = LessThan( + torch.randn(10, 5, 2), + torch.randn(10, 5, 2), +) +op_lt_rank4_randn = LessThan( + torch.randn(3, 2, 2, 2), + torch.randn(3, 2, 2, 2), +) + +test_data_common = { + "lt_rank1_ones": op_lt_rank1_ones, + "lt_rank2_rand": op_lt_rank2_rand, + "lt_rank3_randn": op_lt_rank3_randn, + "lt_rank4_randn": op_lt_rank4_randn, +} + + +@common.parametrize("test_module", test_data_common) +def test_lt_tosa_MI(test_module): + pipeline = TosaPipelineMI[input_t]( + test_module, test_module.get_inputs(), aten_op, exir_op + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +def test_lt_tosa_BI(test_module): + pipeline = TosaPipelineBI[input_t]( + test_module, test_module.get_inputs(), aten_op, exir_op + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +def test_lt_u55_BI(test_module): + # GREATER is not supported on U55. LT uses the GREATER Tosa operator. + pipeline = OpNotSupportedPipeline[input_t]( + test_module, + test_module.get_inputs(), + "TOSA-0.80+BI+u55", + {exir_op: 1}, + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +def test_lt_u85_BI(test_module): + pipeline = EthosU85PipelineBI[input_t]( + test_module, + test_module.get_inputs(), + aten_op, + exir_op, + run_on_fvp=False, + use_to_edge_transform_and_lower=True, + ) + pipeline.run() + + +@common.parametrize("test_module", test_data_common) +@pytest.mark.skip(reason="The same as test_lt_u55_BI") +def test_lt_u55_BI_on_fvp(test_module): + # GREATER is not supported on U55. LT uses the GREATER Tosa operator. + pipeline = OpNotSupportedPipeline[input_t]( + test_module, + test_module.get_inputs(), + "TOSA-0.80+BI+u55", + {exir_op: 1}, + ) + pipeline.run() + + +@common.parametrize( + "test_module", + test_data_common, + xfails={"lt_rank4_randn": "4D fails because boolean Tensors can't be subtracted"}, +) +@common.SkipIfNoCorstone320 +def test_lt_u85_BI_on_fvp(test_module): + pipeline = EthosU85PipelineBI[input_t]( + test_module, + test_module.get_inputs(), + aten_op, + exir_op, + run_on_fvp=True, + use_to_edge_transform_and_lower=True, + ) + pipeline.run()