diff --git a/backends/apple/coreml/test/tester.py b/backends/apple/coreml/test/tester.py index f4a5f51ecbd..eee4c4e5893 100644 --- a/backends/apple/coreml/test/tester.py +++ b/backends/apple/coreml/test/tester.py @@ -4,23 +4,64 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -from typing import Any, List, Optional, Tuple +from typing import Any, List, Optional, Sequence, Tuple +import coremltools as ct import executorch import executorch.backends.test.harness.stages as BaseStages - +import functools import torch + +from executorch.backends.apple.coreml.compiler import CoreMLBackend from executorch.backends.apple.coreml.partition import CoreMLPartitioner +from executorch.backends.apple.coreml.quantizer import CoreMLQuantizer from executorch.backends.test.harness import Tester as TesterBase from executorch.backends.test.harness.stages import StageType from executorch.exir import EdgeCompileConfig from executorch.exir.backend.partitioner import Partitioner +def _get_static_int8_qconfig(): + return ct.optimize.torch.quantization.LinearQuantizerConfig( + global_config=ct.optimize.torch.quantization.ModuleLinearQuantizerConfig( + quantization_scheme="symmetric", + activation_dtype=torch.quint8, + weight_dtype=torch.qint8, + weight_per_channel=True, + ) + ) + + +class Quantize(BaseStages.Quantize): + def __init__( + self, + quantizer: Optional[CoreMLQuantizer] = None, + quantization_config: Optional[Any] = None, + calibrate: bool = True, + calibration_samples: Optional[Sequence[Any]] = None, + is_qat: Optional[bool] = False, + ): + super().__init__( + quantizer=quantizer or CoreMLQuantizer(quantization_config or _get_static_int8_qconfig()), + calibrate=calibrate, + calibration_samples=calibration_samples, + is_qat=is_qat, + ) + + + class Partition(BaseStages.Partition): - def __init__(self, partitioner: Optional[Partitioner] = None): + def __init__( + self, + partitioner: Optional[Partitioner] = None, + minimum_deployment_target: Optional[Any] = ct.target.iOS15, + ): super().__init__( - partitioner=partitioner or CoreMLPartitioner, + partitioner=partitioner or CoreMLPartitioner( + compile_specs=CoreMLBackend.generate_compile_specs( + minimum_deployment_target=minimum_deployment_target + ) + ), ) @@ -29,9 +70,14 @@ def __init__( self, partitioners: Optional[List[Partitioner]] = None, edge_compile_config: Optional[EdgeCompileConfig] = None, + minimum_deployment_target: Optional[Any] = ct.target.iOS15, ): super().__init__( - default_partitioner_cls=CoreMLPartitioner, + default_partitioner_cls=lambda: CoreMLPartitioner( + compile_specs=CoreMLBackend.generate_compile_specs( + minimum_deployment_target=minimum_deployment_target + ) + ), partitioners=partitioners, edge_compile_config=edge_compile_config, ) @@ -43,13 +89,15 @@ def __init__( module: torch.nn.Module, example_inputs: Tuple[torch.Tensor], dynamic_shapes: Optional[Tuple[Any]] = None, + minimum_deployment_target: Optional[Any] = ct.target.iOS15, ): # Specialize for XNNPACK stage_classes = ( executorch.backends.test.harness.Tester.default_stage_classes() | { - StageType.PARTITION: Partition, - StageType.TO_EDGE_TRANSFORM_AND_LOWER: ToEdgeTransformAndLower, + StageType.QUANTIZE: Quantize, + StageType.PARTITION: functools.partial(Partition, minimum_deployment_target=minimum_deployment_target), + StageType.TO_EDGE_TRANSFORM_AND_LOWER: functools.partial(ToEdgeTransformAndLower, minimum_deployment_target=minimum_deployment_target), } ) diff --git a/backends/test/harness/stages/quantize.py b/backends/test/harness/stages/quantize.py index e03db058080..dd61d3acacb 100644 --- a/backends/test/harness/stages/quantize.py +++ b/backends/test/harness/stages/quantize.py @@ -31,7 +31,8 @@ def __init__( self.calibrate = calibrate self.calibration_samples = calibration_samples - self.quantizer.set_global(self.quantization_config) + if self.quantization_config is not None: + self.quantizer.set_global(self.quantization_config) self.converted_graph = None self.is_qat = is_qat diff --git a/backends/test/harness/tester.py b/backends/test/harness/tester.py index e418f795b35..06db1aae13d 100644 --- a/backends/test/harness/tester.py +++ b/backends/test/harness/tester.py @@ -1,6 +1,6 @@ import random from collections import Counter, OrderedDict -from typing import Any, Dict, List, Optional, Tuple, Type +from typing import Any, Callable, Dict, List, Optional, Tuple import torch @@ -33,7 +33,7 @@ def __init__( self, module: torch.nn.Module, example_inputs: Tuple[torch.Tensor], - stage_classes: Dict[StageType, Type], + stage_classes: Dict[StageType, Callable], dynamic_shapes: Optional[Tuple[Any]] = None, ): module.eval() @@ -81,7 +81,7 @@ def __init__( self.stage_output = None @staticmethod - def default_stage_classes() -> Dict[StageType, Type]: + def default_stage_classes() -> Dict[StageType, Callable]: """ Returns a map of StageType to default Stage implementation. """ diff --git a/backends/test/suite/__init__.py b/backends/test/suite/__init__.py index 86cb5a5716f..43d4e16818f 100644 --- a/backends/test/suite/__init__.py +++ b/backends/test/suite/__init__.py @@ -9,18 +9,11 @@ import logging import os -import unittest - -from enum import Enum -from typing import Callable import executorch.backends.test.suite.flow -import torch -from executorch.backends.test.suite.context import get_active_test_context, TestContext from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.reporting import log_test_summary -from executorch.backends.test.suite.runner import run_test, runner_main +from executorch.backends.test.suite.runner import runner_main logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) @@ -62,109 +55,6 @@ def get_test_flows() -> dict[str, TestFlow]: return _ALL_TEST_FLOWS -DTYPES = [ - # torch.int8, - # torch.uint8, - # torch.int16, - # torch.uint16, - # torch.int32, - # torch.uint32, - # torch.int64, - # torch.uint64, - # torch.float16, - torch.float32, - # torch.float64, -] - -FLOAT_DTYPES = [ - torch.float16, - torch.float32, - torch.float64, -] - - -# The type of test function. This controls the test generation and expected signature. -# Standard tests are run, as is. Dtype tests get a variant generated for each dtype and -# take an additional dtype parameter. -class TestType(Enum): - STANDARD = 1 - DTYPE = 2 - - -# Function annotation for dtype tests. This instructs the test framework to run the test -# for each supported dtype and to pass dtype as a test parameter. -def dtype_test(func): - func.test_type = TestType.DTYPE - return func - - -# Class annotation for operator tests. This triggers the test framework to register -# the tests. -def operator_test(cls): - _create_tests(cls) - return cls - - -# Generate test cases for each backend flow. -def _create_tests(cls): - for key in dir(cls): - if key.startswith("test_"): - _expand_test(cls, key) - - -# Expand a test into variants for each registered flow. -def _expand_test(cls, test_name: str): - test_func = getattr(cls, test_name) - for flow in get_test_flows().values(): - _create_test_for_backend(cls, test_func, flow) - delattr(cls, test_name) - - -def _make_wrapped_test( - test_func: Callable, - test_name: str, - flow: TestFlow, - params: dict | None = None, -): - def wrapped_test(self): - with TestContext(test_name, flow.name, params): - test_kwargs = params or {} - test_kwargs["tester_factory"] = flow.tester_factory - - test_func(self, **test_kwargs) - - wrapped_test._name = test_name - wrapped_test._flow = flow - - return wrapped_test - - -def _create_test_for_backend( - cls, - test_func: Callable, - flow: TestFlow, -): - test_type = getattr(test_func, "test_type", TestType.STANDARD) - - if test_type == TestType.STANDARD: - wrapped_test = _make_wrapped_test(test_func, test_func.__name__, flow) - test_name = f"{test_func.__name__}_{flow.name}" - setattr(cls, test_name, wrapped_test) - elif test_type == TestType.DTYPE: - for dtype in DTYPES: - wrapped_test = _make_wrapped_test( - test_func, - test_func.__name__, - flow, - {"dtype": dtype}, - ) - dtype_name = str(dtype)[6:] # strip "torch." - test_name = f"{test_func.__name__}_{dtype_name}_{flow.name}" - setattr(cls, test_name, wrapped_test) - else: - raise NotImplementedError(f"Unknown test type {test_type}.") - - def load_tests(loader, suite, pattern): package_dir = os.path.dirname(__file__) discovered_suite = loader.discover( @@ -174,33 +64,5 @@ def load_tests(loader, suite, pattern): return suite -class OperatorTest(unittest.TestCase): - def _test_op(self, model, inputs, tester_factory): - context = get_active_test_context() - - # This should be set in the wrapped test. See _make_wrapped_test above. - assert context is not None, "Missing test context." - - run_summary = run_test( - model, - inputs, - tester_factory, - context.test_name, - context.flow_name, - context.params, - ) - - log_test_summary(run_summary) - - if not run_summary.result.is_success(): - if run_summary.result.is_backend_failure(): - raise RuntimeError("Test failure.") from run_summary.error - else: - # Non-backend failure indicates a bad test. Mark as skipped. - raise unittest.SkipTest( - f"Test failed for reasons other than backend failure. Error: {run_summary.error}" - ) - - if __name__ == "__main__": runner_main() diff --git a/backends/test/suite/discovery.py b/backends/test/suite/discovery.py index e7af0d0923d..f3ba26af69b 100644 --- a/backends/test/suite/discovery.py +++ b/backends/test/suite/discovery.py @@ -9,7 +9,9 @@ import os import unittest +from dataclasses import dataclass from types import ModuleType +from typing import Pattern from executorch.backends.test.suite.flow import TestFlow @@ -18,8 +20,19 @@ # +@dataclass +class TestFilter: + """A set of filters for test discovery.""" + + backends: set[str] | None + """ The set of backends to include. If None, all backends are included. """ + + name_regex: Pattern[str] | None + """ A regular expression to filter test names. If None, all tests are included. """ + + def discover_tests( - root_module: ModuleType, backends: set[str] | None + root_module: ModuleType, test_filter: TestFilter ) -> unittest.TestSuite: # Collect all tests using the unittest discovery mechanism then filter down. @@ -32,20 +45,20 @@ def discover_tests( module_dir = os.path.dirname(module_file) suite = loader.discover(module_dir) - return _filter_tests(suite, backends) + return _filter_tests(suite, test_filter) def _filter_tests( - suite: unittest.TestSuite, backends: set[str] | None + suite: unittest.TestSuite, test_filter: TestFilter ) -> unittest.TestSuite: # Recursively traverse the test suite and add them to the filtered set. filtered_suite = unittest.TestSuite() for child in suite: if isinstance(child, unittest.TestSuite): - filtered_suite.addTest(_filter_tests(child, backends)) + filtered_suite.addTest(_filter_tests(child, test_filter)) elif isinstance(child, unittest.TestCase): - if _is_test_enabled(child, backends): + if _is_test_enabled(child, test_filter): filtered_suite.addTest(child) else: raise RuntimeError(f"Unexpected test type: {type(child)}") @@ -53,11 +66,27 @@ def _filter_tests( return filtered_suite -def _is_test_enabled(test_case: unittest.TestCase, backends: set[str] | None) -> bool: +def _is_test_enabled(test_case: unittest.TestCase, test_filter: TestFilter) -> bool: test_method = getattr(test_case, test_case._testMethodName) - - if backends is not None: - flow: TestFlow = test_method._flow - return flow.backend in backends - else: + + # Handle import / discovery failures - leave them enabled to report nicely at the + # top level. There might be a better way to do this. Internally, unittest seems to + # replace it with a stub method to report the failure. + if "testFailure" in str(test_method): + print(f"Warning: Test {test_case._testMethodName} failed to import.") return True + + if not hasattr(test_method, "_flow"): + raise RuntimeError(f"Test missing flow: {test_case._testMethodName} {test_method}") + + flow: TestFlow = test_method._flow + + if test_filter.backends is not None and flow.backend not in test_filter.backends: + return False + + if test_filter.name_regex is not None and not test_filter.name_regex.search( + test_case.id() + ): + return False + + return True diff --git a/backends/test/suite/flow.py b/backends/test/suite/flow.py index bda85a76ffa..a9ddec22864 100644 --- a/backends/test/suite/flow.py +++ b/backends/test/suite/flow.py @@ -1,9 +1,10 @@ import logging -from dataclasses import dataclass +from dataclasses import dataclass, field from typing import Callable from executorch.backends.test.harness import Tester +from executorch.backends.test.harness.stages import Quantize logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) @@ -21,42 +22,35 @@ class TestFlow: backend: str """ The name of the target backend. """ - - tester_factory: Callable[[], Tester] + + tester_factory: Callable[..., Tester] """ A factory function that returns a Tester instance for this lowering flow. """ + quantize: bool = field(default=False) + """ Whether to tester should run the quantize stage on the model. """ + + quantize_stage_factory: Callable[..., Quantize] | None = None + """ A factory function which instantiates a Quantize stage. Can be None to use the tester's default. """ -def create_xnnpack_flow() -> TestFlow | None: +def all_flows() -> dict[str, TestFlow]: + flows = [] + try: - from executorch.backends.xnnpack.test.tester import Tester as XnnpackTester - - return TestFlow( - name="xnnpack", - backend="xnnpack", - tester_factory=XnnpackTester, - ) - except Exception: - logger.info("Skipping XNNPACK flow registration due to import failure.") - return None - + from executorch.backends.test.suite.flows.xnnpack import XNNPACK_TEST_FLOW, XNNPACK_STATIC_INT8_TEST_FLOW + flows += [ + XNNPACK_TEST_FLOW, + XNNPACK_STATIC_INT8_TEST_FLOW, + ] + except Exception as e: + logger.info(f"Skipping XNNPACK flow registration: {e}") -def create_coreml_flow() -> TestFlow | None: try: - from executorch.backends.apple.coreml.test.tester import CoreMLTester + from executorch.backends.test.suite.flows.coreml import COREML_TEST_FLOW, COREML_STATIC_INT8_TEST_FLOW + flows += [ + COREML_TEST_FLOW, + COREML_STATIC_INT8_TEST_FLOW, + ] + except Exception as e: + logger.info(f"Skipping Core ML flow registration: {e}") - return TestFlow( - name="coreml", - backend="coreml", - tester_factory=CoreMLTester, - ) - except Exception: - logger.info("Skipping Core ML flow registration due to import failure.") - return None - - -def all_flows() -> dict[str, TestFlow]: - flows = [ - create_xnnpack_flow(), - create_coreml_flow(), - ] return {f.name: f for f in flows if f is not None} diff --git a/backends/test/suite/flows/__init__.py b/backends/test/suite/flows/__init__.py new file mode 100644 index 00000000000..6ac1a72bde6 --- /dev/null +++ b/backends/test/suite/flows/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe diff --git a/backends/test/suite/flows/coreml.py b/backends/test/suite/flows/coreml.py new file mode 100644 index 00000000000..443457bd695 --- /dev/null +++ b/backends/test/suite/flows/coreml.py @@ -0,0 +1,24 @@ +import coremltools +import functools + +from executorch.backends.apple.coreml.test.tester import CoreMLTester +from executorch.backends.test.suite.flow import TestFlow +from typing import Any + +def _create_coreml_flow( + name: str, + quantize: bool = False, + minimum_deployment_target: Any = coremltools.target.iOS15 +) -> TestFlow: + return TestFlow( + name, + backend="coreml", + tester_factory=functools.partial(CoreMLTester, minimum_deployment_target=minimum_deployment_target), + quantize=quantize, + ) + +COREML_TEST_FLOW = _create_coreml_flow("coreml") +COREML_STATIC_INT8_TEST_FLOW = _create_coreml_flow( + "coreml_static_int8", + quantize=True, + minimum_deployment_target=coremltools.target.iOS17) diff --git a/backends/test/suite/flows/xnnpack.py b/backends/test/suite/flows/xnnpack.py new file mode 100644 index 00000000000..af079f83018 --- /dev/null +++ b/backends/test/suite/flows/xnnpack.py @@ -0,0 +1,36 @@ +from executorch.backends.test.harness.stages import Quantize +from executorch.backends.test.suite.flow import TestFlow +from executorch.backends.xnnpack.quantizer.xnnpack_quantizer import get_symmetric_quantization_config +from executorch.backends.xnnpack.test.tester import ( + Quantize as XnnpackQuantize, + Tester as XnnpackTester +) +from typing import Callable + +import logging + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + +def _create_xnnpack_flow_base(name: str, quantize_stage_factory: Callable[..., Quantize] | None = None) -> TestFlow: + return TestFlow( + name, + backend="xnnpack", + tester_factory=XnnpackTester, + quantize=True, + quantize_stage_factory=quantize_stage_factory, + ) + +def _create_xnnpack_flow() -> TestFlow: + return _create_xnnpack_flow_base("xnnpack") + +def _create_xnnpack_static_int8_flow() -> TestFlow: + def create_quantize_stage() -> Quantize: + qparams = get_symmetric_quantization_config(is_per_channel=True) + return XnnpackQuantize( + quantization_config=qparams, + ) + return _create_xnnpack_flow_base("xnnpack_static_int8", create_quantize_stage) + +XNNPACK_TEST_FLOW = _create_xnnpack_flow() +XNNPACK_STATIC_INT8_TEST_FLOW = _create_xnnpack_static_int8_flow() diff --git a/backends/test/suite/models/__init__.py b/backends/test/suite/models/__init__.py new file mode 100644 index 00000000000..b33878995d7 --- /dev/null +++ b/backends/test/suite/models/__init__.py @@ -0,0 +1,135 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import itertools +import os +import unittest +from typing import Any, Callable + +import torch +from executorch.backends.test.harness import Tester +from executorch.backends.test.suite import get_test_flows +from executorch.backends.test.suite.context import get_active_test_context, TestContext +from executorch.backends.test.suite.flow import TestFlow +from executorch.backends.test.suite.reporting import log_test_summary +from executorch.backends.test.suite.runner import run_test + + +DTYPES: list[torch.dtype] = [ + torch.float16, + torch.float32, + torch.float64, +] + + +def load_tests(loader, suite, pattern): + package_dir = os.path.dirname(__file__) + discovered_suite = loader.discover( + start_dir=package_dir, pattern=pattern or "test_*.py" + ) + suite.addTests(discovered_suite) + return suite + + +def _create_test( + cls, + test_func: Callable, + flow: TestFlow, + dtype: torch.dtype, + use_dynamic_shapes: bool, +): + def wrapped_test(self): + params = { + "dtype": dtype, + "use_dynamic_shapes": use_dynamic_shapes, + } + with TestContext(test_name, flow.name, params): + test_func(self, flow, dtype, use_dynamic_shapes) + + dtype_name = str(dtype)[6:] # strip "torch." + test_name = f"{test_func.__name__}_{flow.name}_{dtype_name}" + if use_dynamic_shapes: + test_name += "_dynamic_shape" + + wrapped_test._name = test_func.__name__ # type: ignore + wrapped_test._flow = flow # type: ignore + + setattr(cls, test_name, wrapped_test) + + +# Expand a test into variants for each registered flow. +def _expand_test(cls, test_name: str) -> None: + test_func = getattr(cls, test_name) + supports_dynamic_shapes = getattr(test_func, "supports_dynamic_shapes", True) + dynamic_shape_values = [True, False] if supports_dynamic_shapes else [False] + dtypes = getattr(test_func, "dtypes", DTYPES) + + for flow, dtype, use_dynamic_shapes in itertools.product( + get_test_flows().values(), dtypes, dynamic_shape_values + ): + _create_test(cls, test_func, flow, dtype, use_dynamic_shapes) + delattr(cls, test_name) + + +def model_test_cls(cls) -> Callable | None: + """Decorator for model tests. Handles generating test variants for each test flow and configuration.""" + for key in dir(cls): + if key.startswith("test_"): + _expand_test(cls, key) + return cls + + +def model_test_params( + supports_dynamic_shapes: bool = True, + dtypes: list[torch.dtype] | None = None, +) -> Callable: + """Optional parameter decorator for model tests. Specifies test pararameters. Only valid with a class decorated by model_test_cls.""" + + def inner_decorator(func: Callable) -> Callable: + func.supports_dynamic_shapes = supports_dynamic_shapes # type: ignore + + if dtypes is not None: + func.dtypes = dtypes # type: ignore + + return func + + return inner_decorator + + +def run_model_test( + model: torch.nn.Module, + inputs: tuple[Any], + flow: TestFlow, + dtype: torch.dtype, + dynamic_shapes: Any | None, +): + model = model.to(dtype) + context = get_active_test_context() + + # This should be set in the wrapped test. See _create_test above. + assert context is not None, "Missing test context." + + run_summary = run_test( + model, + inputs, + flow, + context.test_name, + context.params, + dynamic_shapes=dynamic_shapes, + ) + + log_test_summary(run_summary) + + if not run_summary.result.is_success(): + if run_summary.result.is_backend_failure(): + raise RuntimeError("Test failure.") from run_summary.error + else: + # Non-backend failure indicates a bad test. Mark as skipped. + raise unittest.SkipTest( + f"Test failed for reasons other than backend failure. Error: {run_summary.error}" + ) diff --git a/backends/test/suite/models/test_torchaudio.py b/backends/test/suite/models/test_torchaudio.py new file mode 100644 index 00000000000..11ea71b558d --- /dev/null +++ b/backends/test/suite/models/test_torchaudio.py @@ -0,0 +1,105 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import unittest +from typing import Callable, Tuple + +import torch +import torchaudio + +from executorch.backends.test.suite.flow import TestFlow +from executorch.backends.test.suite.models import ( + model_test_cls, + model_test_params, + run_model_test, +) +from torch.export import Dim + +# +# This file contains model integration tests for supported torchaudio models. +# + + +class PatchedConformer(torch.nn.Module): + """ + A lightly modified version of the top-level Conformer module, such that it can be exported. + Instead of taking lengths and computing the padding mask, it takes the padding mask directly. + See https://github.com/pytorch/audio/blob/main/src/torchaudio/models/conformer.py#L215 + """ + + def __init__(self, conformer): + super().__init__() + self.conformer = conformer + + def forward( + self, input: torch.Tensor, encoder_padding_mask: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + x = input.transpose(0, 1) + for layer in self.conformer.conformer_layers: + x = layer(x, encoder_padding_mask) + return x.transpose(0, 1) + + +@model_test_cls +class TorchAudio(unittest.TestCase): + @model_test_params(dtypes=[torch.float32], supports_dynamic_shapes=False) + def test_conformer( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + inner_model = torchaudio.models.Conformer( + input_dim=80, + num_heads=4, + ffn_dim=128, + num_layers=4, + depthwise_conv_kernel_size=31, + ) + model = PatchedConformer(inner_model) + lengths = torch.randint(1, 400, (10,)) + + encoder_padding_mask = torchaudio.models.conformer._lengths_to_padding_mask( + lengths + ) + inputs = ( + torch.rand(10, int(lengths.max()), 80), + encoder_padding_mask, + ) + + run_model_test(model, inputs, flow, dtype, None) + + @model_test_params(dtypes=[torch.float32]) + def test_wav2letter( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchaudio.models.Wav2Letter() + inputs = (torch.randn(1, 1, 1024, dtype=dtype),) + dynamic_shapes = ( + { + "x": { + 2: Dim("d", min=900, max=1024), + } + } + if use_dynamic_shapes + else None + ) + run_model_test(model, inputs, flow, dtype, dynamic_shapes) + + @unittest.skip("This model times out on all backends.") + def test_wavernn( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool, + ): + model = torchaudio.models.WaveRNN( + upsample_scales=[5, 5, 8], n_classes=512, hop_length=200 + ).eval() + + # See https://docs.pytorch.org/audio/stable/generated/torchaudio.models.WaveRNN.html#forward + inputs = ( + torch.randn(1, 1, (64 - 5 + 1) * 200), # waveform + torch.randn(1, 1, 128, 64), # specgram + ) + + run_model_test(model, inputs, flow, dtype, None) diff --git a/backends/test/suite/models/test_torchvision.py b/backends/test/suite/models/test_torchvision.py new file mode 100644 index 00000000000..fed4d31130e --- /dev/null +++ b/backends/test/suite/models/test_torchvision.py @@ -0,0 +1,172 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import unittest + +import torch +import torchvision + +from executorch.backends.test.suite.flow import TestFlow +from executorch.backends.test.suite.models import ( + model_test_cls, + model_test_params, + run_model_test, +) +from torch.export import Dim + +# +# This file contains model integration tests for supported torchvision models. +# + + +@model_test_cls +class TorchVision(unittest.TestCase): + def _test_cv_model( + self, + model: torch.nn.Module, + flow: TestFlow, + dtype: torch.dtype, + use_dynamic_shapes: bool, + ): + # Test a CV model that follows the standard conventions. + inputs = (torch.randn(1, 3, 224, 224, dtype=dtype),) + + dynamic_shapes = ( + ( + { + 2: Dim("height", min=1, max=16) * 16, + 3: Dim("width", min=1, max=16) * 16, + }, + ) + if use_dynamic_shapes + else None + ) + + run_model_test(model, inputs, flow, dtype, dynamic_shapes) + + def test_alexnet( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.alexnet() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + def test_convnext_small( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.convnext_small() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + def test_densenet161( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.densenet161() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + def test_efficientnet_b4( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.efficientnet_b4() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + def test_efficientnet_v2_s( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.efficientnet_v2_s() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + def test_googlenet( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.googlenet() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + def test_inception_v3( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.inception_v3() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + @model_test_params(supports_dynamic_shapes=False) + def test_maxvit_t( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.maxvit_t() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + def test_mnasnet1_0( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.mnasnet1_0() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + def test_mobilenet_v2( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.mobilenet_v2() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + def test_mobilenet_v3_small( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.mobilenet_v3_small() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + def test_regnet_y_1_6gf( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.regnet_y_1_6gf() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + def test_resnet50( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.resnet50() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + def test_resnext50_32x4d( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.resnext50_32x4d() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + def test_shufflenet_v2_x1_0( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.shufflenet_v2_x1_0() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + def test_squeezenet1_1( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.squeezenet1_1() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + def test_swin_v2_t( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.swin_v2_t() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + def test_vgg11( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.vgg11() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + @model_test_params(supports_dynamic_shapes=False) + def test_vit_b_16( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.vit_b_16() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) + + def test_wide_resnet50_2( + self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool + ): + model = torchvision.models.wide_resnet50_2() + self._test_cv_model(model, flow, dtype, use_dynamic_shapes) diff --git a/backends/test/suite/operators/__init__.py b/backends/test/suite/operators/__init__.py index 0fb9ecd1dff..25f56fb05bc 100644 --- a/backends/test/suite/operators/__init__.py +++ b/backends/test/suite/operators/__init__.py @@ -7,7 +7,17 @@ # pyre-unsafe import os +import unittest +from enum import Enum +from typing import Callable + +import torch +from executorch.backends.test.suite import get_test_flows +from executorch.backends.test.suite.context import get_active_test_context, TestContext +from executorch.backends.test.suite.flow import TestFlow +from executorch.backends.test.suite.reporting import log_test_summary +from executorch.backends.test.suite.runner import run_test def load_tests(loader, suite, pattern): package_dir = os.path.dirname(__file__) @@ -16,3 +26,133 @@ def load_tests(loader, suite, pattern): ) suite.addTests(discovered_suite) return suite + + +DTYPES = [ + # torch.int8, + # torch.uint8, + # torch.int16, + # torch.uint16, + # torch.int32, + # torch.uint32, + # torch.int64, + # torch.uint64, + # torch.float16, + torch.float32, + # torch.float64, +] + +FLOAT_DTYPES = [ + torch.float16, + torch.float32, + torch.float64, +] + + +# The type of test function. This controls the test generation and expected signature. +# Standard tests are run, as is. Dtype tests get a variant generated for each dtype and +# take an additional dtype parameter. +class TestType(Enum): + STANDARD = 1 + DTYPE = 2 + + +# Function annotation for dtype tests. This instructs the test framework to run the test +# for each supported dtype and to pass dtype as a test parameter. +def dtype_test(func): + func.test_type = TestType.DTYPE + return func + + +# Class annotation for operator tests. This triggers the test framework to register +# the tests. +def operator_test(cls): + _create_tests(cls) + return cls + + +# Generate test cases for each backend flow. +def _create_tests(cls): + for key in dir(cls): + if key.startswith("test_"): + _expand_test(cls, key) + + +# Expand a test into variants for each registered flow. +def _expand_test(cls, test_name: str): + test_func = getattr(cls, test_name) + for flow in get_test_flows().values(): + _create_test_for_backend(cls, test_func, flow) + delattr(cls, test_name) + + +def _make_wrapped_test( + test_func: Callable, + test_name: str, + flow: TestFlow, + params: dict | None = None, +): + def wrapped_test(self): + with TestContext(test_name, flow.name, params): + test_kwargs = params or {} + test_kwargs["flow"] = flow + + test_func(self, **test_kwargs) + + wrapped_test._name = test_name + wrapped_test._flow = flow + + return wrapped_test + + +def _create_test_for_backend( + cls, + test_func: Callable, + flow: TestFlow, +): + test_type = getattr(test_func, "test_type", TestType.STANDARD) + + if test_type == TestType.STANDARD: + wrapped_test = _make_wrapped_test(test_func, test_func.__name__, flow) + test_name = f"{test_func.__name__}_{flow.name}" + setattr(cls, test_name, wrapped_test) + elif test_type == TestType.DTYPE: + for dtype in DTYPES: + wrapped_test = _make_wrapped_test( + test_func, + test_func.__name__, + flow, + {"dtype": dtype}, + ) + dtype_name = str(dtype)[6:] # strip "torch." + test_name = f"{test_func.__name__}_{dtype_name}_{flow.name}" + setattr(cls, test_name, wrapped_test) + else: + raise NotImplementedError(f"Unknown test type {test_type}.") + + +class OperatorTest(unittest.TestCase): + def _test_op(self, model, inputs, flow: TestFlow): + context = get_active_test_context() + + # This should be set in the wrapped test. See _make_wrapped_test above. + assert context is not None, "Missing test context." + + run_summary = run_test( + model, + inputs, + flow, + context.test_name, + context.params, + ) + + log_test_summary(run_summary) + + if not run_summary.result.is_success(): + if run_summary.result.is_backend_failure(): + raise RuntimeError("Test failure.") from run_summary.error + else: + # Non-backend failure indicates a bad test. Mark as skipped. + raise unittest.SkipTest( + f"Test failed for reasons other than backend failure. Error: {run_summary.error}" + ) diff --git a/backends/test/suite/operators/test_add.py b/backends/test/suite/operators/test_add.py index 970a4babbf0..decdbdd585e 100644 --- a/backends/test/suite/operators/test_add.py +++ b/backends/test/suite/operators/test_add.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -31,52 +30,52 @@ def forward(self, x, y): @operator_test class Add(OperatorTest): @dtype_test - def test_add_dtype(self, dtype, tester_factory: Callable) -> None: + def test_add_dtype(self, flow: TestFlow, dtype) -> None: self._test_op( Model(), ( (torch.rand(2, 10) * 100).to(dtype), (torch.rand(2, 10) * 100).to(dtype), ), - tester_factory, + flow, ) - def test_add_f32_bcast_first(self, tester_factory: Callable) -> None: + def test_add_f32_bcast_first(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(5), torch.randn(1, 5, 1, 5), ), - tester_factory, + flow, ) - def test_add_f32_bcast_second(self, tester_factory: Callable) -> None: + def test_add_f32_bcast_second(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(4, 4, 2, 7), torch.randn(2, 7), ), - tester_factory, + flow, ) - def test_add_f32_bcast_unary(self, tester_factory: Callable) -> None: + def test_add_f32_bcast_unary(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(5), torch.randn(1, 1, 5), ), - tester_factory, + flow, ) - def test_add_f32_alpha(self, tester_factory: Callable) -> None: + def test_add_f32_alpha(self, flow: TestFlow) -> None: self._test_op( ModelAlpha(alpha=2), ( torch.randn(1, 25), torch.randn(1, 25), ), - tester_factory, + flow, ) diff --git a/backends/test/suite/operators/test_div.py b/backends/test/suite/operators/test_div.py index 9e98775e855..1a84aaacb7a 100644 --- a/backends/test/suite/operators/test_div.py +++ b/backends/test/suite/operators/test_div.py @@ -7,11 +7,12 @@ # pyre-unsafe -from typing import Callable, Optional +from typing import Optional import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -31,7 +32,7 @@ def forward(self, x, y): @operator_test class Divide(OperatorTest): @dtype_test - def test_divide_dtype(self, dtype, tester_factory: Callable) -> None: + def test_divide_dtype(self, flow: TestFlow, dtype) -> None: self._test_op( Model(), ( @@ -40,10 +41,10 @@ def test_divide_dtype(self, dtype, tester_factory: Callable) -> None: dtype ), # Adding 0.1 to avoid division by zero ), - tester_factory, + flow, ) - def test_divide_f32_bcast_first(self, tester_factory: Callable) -> None: + def test_divide_f32_bcast_first(self, flow: TestFlow) -> None: self._test_op( Model(), ( @@ -51,10 +52,10 @@ def test_divide_f32_bcast_first(self, tester_factory: Callable) -> None: torch.randn(1, 5, 1, 5).abs() + 0.1, # Using abs and adding 0.1 to avoid division by zero ), - tester_factory, + flow, ) - def test_divide_f32_bcast_second(self, tester_factory: Callable) -> None: + def test_divide_f32_bcast_second(self, flow: TestFlow) -> None: self._test_op( Model(), ( @@ -62,10 +63,10 @@ def test_divide_f32_bcast_second(self, tester_factory: Callable) -> None: torch.randn(2, 7).abs() + 0.1, # Using abs and adding 0.1 to avoid division by zero ), - tester_factory, + flow, ) - def test_divide_f32_bcast_unary(self, tester_factory: Callable) -> None: + def test_divide_f32_bcast_unary(self, flow: TestFlow) -> None: self._test_op( Model(), ( @@ -73,10 +74,10 @@ def test_divide_f32_bcast_unary(self, tester_factory: Callable) -> None: torch.randn(1, 1, 5).abs() + 0.1, # Using abs and adding 0.1 to avoid division by zero ), - tester_factory, + flow, ) - def test_divide_f32_trunc(self, tester_factory: Callable) -> None: + def test_divide_f32_trunc(self, flow: TestFlow) -> None: self._test_op( ModelWithRounding(rounding_mode="trunc"), ( @@ -84,10 +85,10 @@ def test_divide_f32_trunc(self, tester_factory: Callable) -> None: torch.randn(3, 4).abs() + 0.1, # Using abs and adding 0.1 to avoid division by zero ), - tester_factory, + flow, ) - def test_divide_f32_floor(self, tester_factory: Callable) -> None: + def test_divide_f32_floor(self, flow: TestFlow) -> None: self._test_op( ModelWithRounding(rounding_mode="floor"), ( @@ -95,5 +96,5 @@ def test_divide_f32_floor(self, tester_factory: Callable) -> None: torch.randn(3, 4).abs() + 0.1, # Using abs and adding 0.1 to avoid division by zero ), - tester_factory, + flow, ) diff --git a/backends/test/suite/operators/test_elu.py b/backends/test/suite/operators/test_elu.py index 371a13aa26c..52f381994e8 100644 --- a/backends/test/suite/operators/test_elu.py +++ b/backends/test/suite/operators/test_elu.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -27,17 +26,17 @@ def forward(self, x): @operator_test class TestELU(OperatorTest): @dtype_test - def test_elu_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op(Model(), ((torch.rand(2, 10) * 100).to(dtype),), tester_factory) + def test_elu_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.rand(2, 10) * 100).to(dtype),), flow) - def test_elu_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_elu_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_elu_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_elu_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_elu_f32_alpha(self, tester_factory: Callable) -> None: - self._test_op(Model(alpha=0.5), (torch.randn(3, 4, 5),), tester_factory) + def test_elu_f32_alpha(self, flow: TestFlow) -> None: + self._test_op(Model(alpha=0.5), (torch.randn(3, 4, 5),), flow) - def test_elu_f32_inplace(self, tester_factory: Callable) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), tester_factory) + def test_elu_f32_inplace(self, flow: TestFlow) -> None: + self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) diff --git a/backends/test/suite/operators/test_gelu.py b/backends/test/suite/operators/test_gelu.py index 639b2fbb9b1..3132614aa25 100644 --- a/backends/test/suite/operators/test_gelu.py +++ b/backends/test/suite/operators/test_gelu.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -26,28 +25,28 @@ def forward(self, x): @operator_test class TestGELU(OperatorTest): @dtype_test - def test_gelu_dtype(self, dtype, tester_factory: Callable) -> None: + def test_gelu_dtype(self, flow: TestFlow, dtype) -> None: self._test_op( - Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), tester_factory + Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow ) - def test_gelu_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_gelu_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_gelu_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_gelu_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_gelu_f32_tanh_approximation(self, tester_factory: Callable) -> None: + def test_gelu_f32_tanh_approximation(self, flow: TestFlow) -> None: self._test_op( - Model(approximate="tanh"), (torch.randn(3, 4, 5),), tester_factory + Model(approximate="tanh"), (torch.randn(3, 4, 5),), flow ) - def test_gelu_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_gelu_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific values spanning negative and positive ranges x = torch.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) - def test_gelu_f32_tanh_boundary_values(self, tester_factory: Callable) -> None: + def test_gelu_f32_tanh_boundary_values(self, flow: TestFlow) -> None: # Test tanh approximation with specific values x = torch.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]) - self._test_op(Model(approximate="tanh"), (x,), tester_factory) + self._test_op(Model(approximate="tanh"), (x,), flow) diff --git a/backends/test/suite/operators/test_glu.py b/backends/test/suite/operators/test_glu.py index 74f46bb9532..82510f659af 100644 --- a/backends/test/suite/operators/test_glu.py +++ b/backends/test/suite/operators/test_glu.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -26,26 +25,26 @@ def forward(self, x): @operator_test class TestGLU(OperatorTest): @dtype_test - def test_glu_dtype(self, dtype, tester_factory: Callable) -> None: + def test_glu_dtype(self, flow: TestFlow, dtype) -> None: # Input must have even number of elements in the specified dimension self._test_op( - Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), tester_factory + Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow ) - def test_glu_f32_dim_last(self, tester_factory: Callable) -> None: + def test_glu_f32_dim_last(self, flow: TestFlow) -> None: # Default dim is -1 (last dimension) - self._test_op(Model(), (torch.randn(3, 4, 6),), tester_factory) + self._test_op(Model(), (torch.randn(3, 4, 6),), flow) - def test_glu_f32_dim_first(self, tester_factory: Callable) -> None: + def test_glu_f32_dim_first(self, flow: TestFlow) -> None: # Test with dim=0 (first dimension) - self._test_op(Model(dim=0), (torch.randn(4, 3, 5),), tester_factory) + self._test_op(Model(dim=0), (torch.randn(4, 3, 5),), flow) - def test_glu_f32_dim_middle(self, tester_factory: Callable) -> None: + def test_glu_f32_dim_middle(self, flow: TestFlow) -> None: # Test with dim=1 (middle dimension) - self._test_op(Model(dim=1), (torch.randn(3, 8, 5),), tester_factory) + self._test_op(Model(dim=1), (torch.randn(3, 8, 5),), flow) - def test_glu_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_glu_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific values spanning negative and positive ranges # Input must have even number of elements in the specified dimension x = torch.tensor([[-10.0, -5.0, -1.0, 0.0], [1.0, 5.0, 10.0, -2.0]]) - self._test_op(Model(dim=1), (x,), tester_factory) + self._test_op(Model(dim=1), (x,), flow) diff --git a/backends/test/suite/operators/test_hardsigmoid.py b/backends/test/suite/operators/test_hardsigmoid.py index f26877782db..4104d8b3f56 100644 --- a/backends/test/suite/operators/test_hardsigmoid.py +++ b/backends/test/suite/operators/test_hardsigmoid.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -26,19 +25,19 @@ def forward(self, x): @operator_test class TestHardsigmoid(OperatorTest): @dtype_test - def test_hardsigmoid_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op(Model(), ((torch.rand(2, 10)).to(dtype),), tester_factory) + def test_hardsigmoid_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.rand(2, 10)).to(dtype),), flow) - def test_hardsigmoid_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_hardsigmoid_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_hardsigmoid_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_hardsigmoid_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_hardsigmoid_f32_inplace(self, tester_factory: Callable) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), tester_factory) + def test_hardsigmoid_f32_inplace(self, flow: TestFlow) -> None: + self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) - def test_hardsigmoid_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_hardsigmoid_f32_boundary_values(self, flow: TestFlow) -> None: # Test with values that span the hardsigmoid's piecewise regions x = torch.tensor([-5.0, -3.0, -1.0, 0.0, 1.0, 3.0, 5.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_hardswish.py b/backends/test/suite/operators/test_hardswish.py index 0c2c6915760..0e6fb3b004d 100644 --- a/backends/test/suite/operators/test_hardswish.py +++ b/backends/test/suite/operators/test_hardswish.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -26,19 +25,19 @@ def forward(self, x): @operator_test class TestHardswish(OperatorTest): @dtype_test - def test_hardswish_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op(Model(), ((torch.rand(2, 10)).to(dtype),), tester_factory) + def test_hardswish_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.rand(2, 10)).to(dtype),), flow) - def test_hardswish_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_hardswish_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_hardswish_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_hardswish_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_hardswish_f32_inplace(self, tester_factory: Callable) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), tester_factory) + def test_hardswish_f32_inplace(self, flow: TestFlow) -> None: + self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) - def test_hardswish_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_hardswish_f32_boundary_values(self, flow: TestFlow) -> None: # Test with values that span the hardswish's piecewise regions x = torch.tensor([-5.0, -3.0, -1.0, 0.0, 1.0, 3.0, 5.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_hardtanh.py b/backends/test/suite/operators/test_hardtanh.py index f74c52e93db..c72045a3a49 100644 --- a/backends/test/suite/operators/test_hardtanh.py +++ b/backends/test/suite/operators/test_hardtanh.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -30,24 +29,24 @@ def forward(self, x): @operator_test class TestHardtanh(OperatorTest): @dtype_test - def test_hardtanh_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op(Model(), ((torch.rand(2, 10) * 4 - 2).to(dtype),), tester_factory) + def test_hardtanh_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.rand(2, 10) * 4 - 2).to(dtype),), flow) - def test_hardtanh_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_hardtanh_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_hardtanh_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_hardtanh_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_hardtanh_f32_custom_range(self, tester_factory: Callable) -> None: + def test_hardtanh_f32_custom_range(self, flow: TestFlow) -> None: self._test_op( - Model(min_val=-2.0, max_val=2.0), (torch.randn(3, 4, 5),), tester_factory + Model(min_val=-2.0, max_val=2.0), (torch.randn(3, 4, 5),), flow ) - def test_hardtanh_f32_inplace(self, tester_factory: Callable) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), tester_factory) + def test_hardtanh_f32_inplace(self, flow: TestFlow) -> None: + self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) - def test_hardtanh_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_hardtanh_f32_boundary_values(self, flow: TestFlow) -> None: # Test with values that span the hardtanh's piecewise regions x = torch.tensor([-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_leaky_relu.py b/backends/test/suite/operators/test_leaky_relu.py index 01d30e9c682..56c5fe463db 100644 --- a/backends/test/suite/operators/test_leaky_relu.py +++ b/backends/test/suite/operators/test_leaky_relu.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -29,24 +28,24 @@ def forward(self, x): @operator_test class TestLeakyReLU(OperatorTest): @dtype_test - def test_leaky_relu_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op(Model(), ((torch.rand(2, 10) * 2 - 1).to(dtype),), tester_factory) + def test_leaky_relu_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.rand(2, 10) * 2 - 1).to(dtype),), flow) - def test_leaky_relu_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_leaky_relu_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_leaky_relu_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_leaky_relu_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_leaky_relu_f32_custom_slope(self, tester_factory: Callable) -> None: + def test_leaky_relu_f32_custom_slope(self, flow: TestFlow) -> None: self._test_op( - Model(negative_slope=0.1), (torch.randn(3, 4, 5),), tester_factory + Model(negative_slope=0.1), (torch.randn(3, 4, 5),), flow ) - def test_leaky_relu_f32_inplace(self, tester_factory: Callable) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), tester_factory) + def test_leaky_relu_f32_inplace(self, flow: TestFlow) -> None: + self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) - def test_leaky_relu_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_leaky_relu_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific positive and negative values x = torch.tensor([-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_logsigmoid.py b/backends/test/suite/operators/test_logsigmoid.py index ff6a2df83ae..5354e995149 100644 --- a/backends/test/suite/operators/test_logsigmoid.py +++ b/backends/test/suite/operators/test_logsigmoid.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -22,18 +21,18 @@ def forward(self, x): @operator_test class TestLogSigmoid(OperatorTest): @dtype_test - def test_logsigmoid_dtype(self, dtype, tester_factory: Callable) -> None: + def test_logsigmoid_dtype(self, flow: TestFlow, dtype) -> None: self._test_op( - Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), tester_factory + Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow ) - def test_logsigmoid_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_logsigmoid_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_logsigmoid_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_logsigmoid_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_logsigmoid_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_logsigmoid_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific values spanning negative and positive ranges x = torch.tensor([-10.0, -5.0, -1.0, 0.0, 1.0, 5.0, 10.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_mul.py b/backends/test/suite/operators/test_mul.py index 19d1c8e939d..bfda5b883a9 100644 --- a/backends/test/suite/operators/test_mul.py +++ b/backends/test/suite/operators/test_mul.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -22,42 +21,42 @@ def forward(self, x, y): @operator_test class Multiply(OperatorTest): @dtype_test - def test_multiply_dtype(self, dtype, tester_factory: Callable) -> None: + def test_multiply_dtype(self, flow: TestFlow, dtype) -> None: self._test_op( Model(), ( (torch.rand(2, 10) * 100).to(dtype), (torch.rand(2, 10) * 100).to(dtype), ), - tester_factory, + flow, ) - def test_multiply_f32_bcast_first(self, tester_factory: Callable) -> None: + def test_multiply_f32_bcast_first(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(5), torch.randn(1, 5, 1, 5), ), - tester_factory, + flow, ) - def test_multiply_f32_bcast_second(self, tester_factory: Callable) -> None: + def test_multiply_f32_bcast_second(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(4, 4, 2, 7), torch.randn(2, 7), ), - tester_factory, + flow, ) - def test_multiply_f32_bcast_unary(self, tester_factory: Callable) -> None: + def test_multiply_f32_bcast_unary(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(5), torch.randn(1, 1, 5), ), - tester_factory, + flow, ) diff --git a/backends/test/suite/operators/test_prelu.py b/backends/test/suite/operators/test_prelu.py index a9aee50bc18..75f4c1a63b7 100644 --- a/backends/test/suite/operators/test_prelu.py +++ b/backends/test/suite/operators/test_prelu.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -26,33 +25,33 @@ def forward(self, x): @operator_test class TestPReLU(OperatorTest): @dtype_test - def test_prelu_dtype(self, dtype, tester_factory: Callable) -> None: + def test_prelu_dtype(self, flow: TestFlow, dtype) -> None: self._test_op( - Model().to(dtype), ((torch.rand(2, 10) * 2 - 1).to(dtype),), tester_factory + Model().to(dtype), ((torch.rand(2, 10) * 2 - 1).to(dtype),), flow ) - def test_prelu_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_prelu_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_prelu_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_prelu_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_prelu_f32_custom_init(self, tester_factory: Callable) -> None: - self._test_op(Model(init=0.1), (torch.randn(3, 4, 5),), tester_factory) + def test_prelu_f32_custom_init(self, flow: TestFlow) -> None: + self._test_op(Model(init=0.1), (torch.randn(3, 4, 5),), flow) - def test_prelu_f32_channel_shared(self, tester_factory: Callable) -> None: + def test_prelu_f32_channel_shared(self, flow: TestFlow) -> None: # Default num_parameters=1 means the parameter is shared across all channels self._test_op( - Model(num_parameters=1), (torch.randn(2, 3, 4, 5),), tester_factory + Model(num_parameters=1), (torch.randn(2, 3, 4, 5),), flow ) - def test_prelu_f32_per_channel_parameter(self, tester_factory: Callable) -> None: + def test_prelu_f32_per_channel_parameter(self, flow: TestFlow) -> None: # num_parameters=3 means each channel has its own parameter (for dim=1) self._test_op( - Model(num_parameters=3), (torch.randn(2, 3, 4, 5),), tester_factory + Model(num_parameters=3), (torch.randn(2, 3, 4, 5),), flow ) - def test_prelu_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_prelu_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific positive and negative values x = torch.tensor([-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_relu.py b/backends/test/suite/operators/test_relu.py index ab6d93d6279..796395eaaf6 100644 --- a/backends/test/suite/operators/test_relu.py +++ b/backends/test/suite/operators/test_relu.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -26,14 +25,14 @@ def forward(self, x): @operator_test class TestReLU(OperatorTest): @dtype_test - def test_relu_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op(Model(), ((torch.rand(2, 10) * 100).to(dtype),), tester_factory) + def test_relu_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.rand(2, 10) * 100).to(dtype),), flow) - def test_relu_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_relu_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_relu_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_relu_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_relu_f32_inplace(self, tester_factory: Callable) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), tester_factory) + def test_relu_f32_inplace(self, flow: TestFlow) -> None: + self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) diff --git a/backends/test/suite/operators/test_sigmoid.py b/backends/test/suite/operators/test_sigmoid.py index 7e70b30ff19..6623533dda5 100644 --- a/backends/test/suite/operators/test_sigmoid.py +++ b/backends/test/suite/operators/test_sigmoid.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -22,18 +21,18 @@ def forward(self, x): @operator_test class TestSigmoid(OperatorTest): @dtype_test - def test_sigmoid_dtype(self, dtype, tester_factory: Callable) -> None: + def test_sigmoid_dtype(self, flow: TestFlow, dtype) -> None: self._test_op( - Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), tester_factory + Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow ) - def test_sigmoid_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_sigmoid_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_sigmoid_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_sigmoid_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_sigmoid_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_sigmoid_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific values spanning negative and positive ranges x = torch.tensor([-10.0, -5.0, -1.0, 0.0, 1.0, 5.0, 10.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_silu.py b/backends/test/suite/operators/test_silu.py index a30b47a1c57..331e835433c 100644 --- a/backends/test/suite/operators/test_silu.py +++ b/backends/test/suite/operators/test_silu.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -26,19 +25,19 @@ def forward(self, x): @operator_test class TestSiLU(OperatorTest): @dtype_test - def test_silu_dtype(self, dtype, tester_factory: Callable) -> None: - self._test_op(Model(), ((torch.randn(2, 10) * 100).to(dtype),), tester_factory) + def test_silu_dtype(self, flow: TestFlow, dtype) -> None: + self._test_op(Model(), ((torch.randn(2, 10) * 100).to(dtype),), flow) - def test_silu_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_silu_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_silu_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_silu_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_silu_f32_inplace(self, tester_factory: Callable) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), tester_factory) + def test_silu_f32_inplace(self, flow: TestFlow) -> None: + self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) - def test_silu_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_silu_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific values spanning negative and positive ranges x = torch.tensor([-10.0, -5.0, -1.0, 0.0, 1.0, 5.0, 10.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_sub.py b/backends/test/suite/operators/test_sub.py index 19884419637..fad64e7f000 100644 --- a/backends/test/suite/operators/test_sub.py +++ b/backends/test/suite/operators/test_sub.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -31,52 +30,52 @@ def forward(self, x, y): @operator_test class Subtract(OperatorTest): @dtype_test - def test_subtract_dtype(self, dtype, tester_factory: Callable) -> None: + def test_subtract_dtype(self, flow: TestFlow, dtype) -> None: self._test_op( Model(), ( (torch.rand(2, 10) * 100).to(dtype), (torch.rand(2, 10) * 100).to(dtype), ), - tester_factory, + flow, ) - def test_subtract_f32_bcast_first(self, tester_factory: Callable) -> None: + def test_subtract_f32_bcast_first(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(5), torch.randn(1, 5, 1, 5), ), - tester_factory, + flow, ) - def test_subtract_f32_bcast_second(self, tester_factory: Callable) -> None: + def test_subtract_f32_bcast_second(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(4, 4, 2, 7), torch.randn(2, 7), ), - tester_factory, + flow, ) - def test_subtract_f32_bcast_unary(self, tester_factory: Callable) -> None: + def test_subtract_f32_bcast_unary(self, flow: TestFlow) -> None: self._test_op( Model(), ( torch.randn(5), torch.randn(1, 1, 5), ), - tester_factory, + flow, ) - def test_subtract_f32_alpha(self, tester_factory: Callable) -> None: + def test_subtract_f32_alpha(self, flow: TestFlow) -> None: self._test_op( ModelAlpha(alpha=2), ( torch.randn(1, 25), torch.randn(1, 25), ), - tester_factory, + flow, ) diff --git a/backends/test/suite/operators/test_tanh.py b/backends/test/suite/operators/test_tanh.py index 1d7889a95da..b911fcfd1a0 100644 --- a/backends/test/suite/operators/test_tanh.py +++ b/backends/test/suite/operators/test_tanh.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -22,18 +21,18 @@ def forward(self, x): @operator_test class TestTanh(OperatorTest): @dtype_test - def test_tanh_dtype(self, dtype, tester_factory: Callable) -> None: + def test_tanh_dtype(self, flow: TestFlow, dtype) -> None: self._test_op( - Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), tester_factory + Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow ) - def test_tanh_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_tanh_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_tanh_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_tanh_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_tanh_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_tanh_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific values spanning negative and positive ranges x = torch.tensor([-10.0, -5.0, -1.0, 0.0, 1.0, 5.0, 10.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) diff --git a/backends/test/suite/operators/test_threshold.py b/backends/test/suite/operators/test_threshold.py index 97c84c58404..6708fd69971 100644 --- a/backends/test/suite/operators/test_threshold.py +++ b/backends/test/suite/operators/test_threshold.py @@ -7,11 +7,10 @@ # pyre-unsafe -from typing import Callable - import torch -from executorch.backends.test.suite import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.operators import dtype_test, operator_test, OperatorTest +from executorch.backends.test.suite.flow import TestFlow class Model(torch.nn.Module): @@ -30,42 +29,42 @@ def forward(self, x): @operator_test class TestThreshold(OperatorTest): @dtype_test - def test_threshold_dtype(self, dtype, tester_factory: Callable) -> None: + def test_threshold_dtype(self, flow: TestFlow, dtype) -> None: self._test_op( - Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), tester_factory + Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow ) - def test_threshold_f32_single_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(20),), tester_factory) + def test_threshold_f32_single_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(20),), flow) - def test_threshold_f32_multi_dim(self, tester_factory: Callable) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), tester_factory) + def test_threshold_f32_multi_dim(self, flow: TestFlow) -> None: + self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) - def test_threshold_f32_custom_threshold(self, tester_factory: Callable) -> None: - self._test_op(Model(threshold=1.0), (torch.randn(3, 4, 5),), tester_factory) + def test_threshold_f32_custom_threshold(self, flow: TestFlow) -> None: + self._test_op(Model(threshold=1.0), (torch.randn(3, 4, 5),), flow) - def test_threshold_f32_custom_value(self, tester_factory: Callable) -> None: - self._test_op(Model(value=2.0), (torch.randn(3, 4, 5),), tester_factory) + def test_threshold_f32_custom_value(self, flow: TestFlow) -> None: + self._test_op(Model(value=2.0), (torch.randn(3, 4, 5),), flow) def test_threshold_f32_custom_threshold_value( - self, tester_factory: Callable + self, flow: TestFlow ) -> None: self._test_op( - Model(threshold=0.5, value=1.0), (torch.randn(3, 4, 5),), tester_factory + Model(threshold=0.5, value=1.0), (torch.randn(3, 4, 5),), flow ) - def test_threshold_f32_inplace(self, tester_factory: Callable) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), tester_factory) + def test_threshold_f32_inplace(self, flow: TestFlow) -> None: + self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) - def test_threshold_f32_boundary_values(self, tester_factory: Callable) -> None: + def test_threshold_f32_boundary_values(self, flow: TestFlow) -> None: # Test with specific values around the threshold x = torch.tensor([-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]) - self._test_op(Model(), (x,), tester_factory) + self._test_op(Model(), (x,), flow) - def test_threshold_f32_all_params(self, tester_factory: Callable) -> None: + def test_threshold_f32_all_params(self, flow: TestFlow) -> None: # Test with all parameters customized self._test_op( Model(threshold=0.5, value=3.0, inplace=True), (torch.randn(3, 4, 5),), - tester_factory, + flow, ) diff --git a/backends/test/suite/reporting.py b/backends/test/suite/reporting.py index d7181300873..b5a4609447e 100644 --- a/backends/test/suite/reporting.py +++ b/backends/test/suite/reporting.py @@ -14,23 +14,26 @@ class TestResult(IntEnum): EAGER_FAIL = 2 """ The test failed due to the model failing to run in eager mode. """ + + QUANTIZE_FAIL = 3 + """ The test failed due to the quantization stage failing. """ - EXPORT_FAIL = 3 + EXPORT_FAIL = 4 """ The test failed due to the model failing to export. """ - LOWER_FAIL = 4 + LOWER_FAIL = 5 """ The test failed due to a failure in partitioning or lowering. """ - PTE_LOAD_FAIL = 5 + PTE_LOAD_FAIL = 6 """ The test failed due to the resulting PTE failing to load. """ - PTE_RUN_FAIL = 6 + PTE_RUN_FAIL = 7 """ The test failed due to the resulting PTE failing to run. """ - OUTPUT_MISMATCH_FAIL = 7 + OUTPUT_MISMATCH_FAIL = 8 """ The test failed due to a mismatch between runtime and reference outputs. """ - UNKNOWN_FAIL = 8 + UNKNOWN_FAIL = 9 """ The test failed in an unknown or unexpected manner. """ def is_success(self): @@ -49,6 +52,8 @@ def display_name(self): return "Success (Undelegated)" elif self == TestResult.EAGER_FAIL: return "Fail (Eager)" + elif self == TestResult.QUANTIZE_FAIL: + return "Fail (Quantize)" elif self == TestResult.EXPORT_FAIL: return "Fail (Export)" elif self == TestResult.LOWER_FAIL: diff --git a/backends/test/suite/runner.py b/backends/test/suite/runner.py index 34a860e8f0b..5e019400131 100644 --- a/backends/test/suite/runner.py +++ b/backends/test/suite/runner.py @@ -1,13 +1,16 @@ import argparse import importlib +import re import unittest -from typing import Callable +from typing import Any, Callable import torch from executorch.backends.test.harness import Tester -from executorch.backends.test.suite.discovery import discover_tests +from executorch.backends.test.harness.stages import StageType +from executorch.backends.test.suite.discovery import discover_tests, TestFilter +from executorch.backends.test.suite.flow import TestFlow from executorch.backends.test.suite.reporting import ( begin_test_session, complete_test_session, @@ -19,17 +22,18 @@ # A list of all runnable test suites and the corresponding python package. NAMED_SUITES = { + "models": "executorch.backends.test.suite.models", "operators": "executorch.backends.test.suite.operators", } def run_test( # noqa: C901 model: torch.nn.Module, - inputs: any, - tester_factory: Callable[[], Tester], + inputs: Any, + flow: TestFlow, test_name: str, - flow_name: str, params: dict | None, + dynamic_shapes: Any | None = None, ) -> TestCaseSummary: """ Top-level test run function for a model, input set, and tester. Handles test execution @@ -42,11 +46,13 @@ def build_result( ) -> TestCaseSummary: return TestCaseSummary( name=test_name, - flow=flow_name, + flow=flow.name, params=params, result=result, error=error, ) + + model.eval() # Ensure the model can run in eager mode. try: @@ -55,12 +61,21 @@ def build_result( return build_result(TestResult.EAGER_FAIL, e) try: - tester = tester_factory(model, inputs) + tester = flow.tester_factory(model, inputs) except Exception as e: return build_result(TestResult.UNKNOWN_FAIL, e) - + + if flow.quantize: + try: + tester.quantize(flow.quantize_stage_factory() if flow.quantize_stage_factory else None) + except Exception as e: + return build_result(TestResult.QUANTIZE_FAIL, e) + try: - tester.export() + # TODO Use Tester dynamic_shapes parameter once input generation can properly handle derived dims. + tester.export( + tester._get_default_stage(StageType.EXPORT, dynamic_shapes=dynamic_shapes), + ) except Exception as e: return build_result(TestResult.EXPORT_FAIL, e) @@ -117,6 +132,9 @@ def print_summary(summary: RunSummary): print() print("[Failure]") + print( + f"{summary.aggregated_results.get(TestResult.QUANTIZE_FAIL, 0):>5} Quantization Fail" + ) print( f"{summary.aggregated_results.get(TestResult.LOWER_FAIL, 0):>5} Lowering Fail" ) @@ -148,18 +166,17 @@ def parse_args(): parser.add_argument( "-b", "--backend", nargs="*", help="The backend or backends to test." ) + parser.add_argument( + "-f", "--filter", nargs="?", help="A regular expression filter for test names." + ) return parser.parse_args() -def test(suite): - if isinstance(suite, unittest.TestSuite): - print(f"Suite: {suite}") - for t in suite: - test(t) - else: - print(f"Leaf: {type(suite)} {suite}") - print(f" {suite.__name__}") - print(f" {callable(suite)}") +def build_test_filter(args: argparse.Namespace) -> TestFilter: + return TestFilter( + backends=set(args.backend) if args.backend is not None else None, + name_regex=re.compile(args.filter) if args.filter is not None else None, + ) def runner_main(): @@ -172,7 +189,9 @@ def runner_main(): test_path = NAMED_SUITES[args.suite[0]] test_root = importlib.import_module(test_path) - suite = discover_tests(test_root, args.backend) + test_filter = build_test_filter(args) + + suite = discover_tests(test_root, test_filter) unittest.TextTestRunner(verbosity=2).run(suite) summary = complete_test_session()