From a6c9a306b328d9fd208744a0d86a8807e89a592a Mon Sep 17 00:00:00 2001 From: Gregory Comer Date: Sun, 21 Sep 2025 21:08:37 -0700 Subject: [PATCH 1/9] Update [ghstack-poisoned] --- backends/test/suite/__init__.py | 6 + backends/test/suite/conftest.py | 109 ++++++++ backends/test/suite/flow.py | 3 + .../suite/generate_markdown_summary_json.py | 251 ++++++++++++++++++ backends/test/suite/models/__init__.py | 127 +++------ backends/test/suite/models/test_torchaudio.py | 127 ++++----- backends/test/suite/operators/__init__.py | 128 +++------ .../suite/operators/test_add_pytestified.py | 83 ++++++ backends/test/suite/operators/test_sub.py | 5 + backends/test/suite/runner.py | 1 + 10 files changed, 588 insertions(+), 252 deletions(-) create mode 100644 backends/test/suite/conftest.py create mode 100644 backends/test/suite/generate_markdown_summary_json.py create mode 100644 backends/test/suite/operators/test_add_pytestified.py diff --git a/backends/test/suite/__init__.py b/backends/test/suite/__init__.py index 43d4e16818f..734a6690fd2 100644 --- a/backends/test/suite/__init__.py +++ b/backends/test/suite/__init__.py @@ -11,6 +11,7 @@ import os import executorch.backends.test.suite.flow +import torch from executorch.backends.test.suite.flow import TestFlow from executorch.backends.test.suite.runner import runner_main @@ -55,6 +56,11 @@ def get_test_flows() -> dict[str, TestFlow]: return _ALL_TEST_FLOWS +def dtype_to_str(dtype: torch.dtype) -> str: + # Strip off "torch." + return str(dtype)[6:] + + def load_tests(loader, suite, pattern): package_dir = os.path.dirname(__file__) discovered_suite = loader.discover( diff --git a/backends/test/suite/conftest.py b/backends/test/suite/conftest.py new file mode 100644 index 00000000000..797e61f8785 --- /dev/null +++ b/backends/test/suite/conftest.py @@ -0,0 +1,109 @@ +import pytest +import torch + +from executorch.backends.test.suite.flow import TestFlow, all_flows +from executorch.backends.test.suite.reporting import _sum_op_counts +from executorch.backends.test.suite.runner import run_test + +from typing import Any + +BACKENDS = ["xnnpack", "coreml", "vulkan", "qnn", "arm"] + +def pytest_configure(config): + for backend in BACKENDS: + config.addinivalue_line("markers", f"backend_{backend}: mark a test as testing the {backend} backend") + +class TestRunner: + def __init__(self, flow, test_name, test_base_name): + self._flow = flow + self._test_name = test_name + self._test_base_name = test_base_name + self._subtest = 0 + self._results = [] + + def lower_and_run_model(self, model: torch.nn.Module, inputs: Any, generate_random_test_inputs=True): + run_summary = run_test( + model, + inputs, + self._flow, + self._test_name, + self._test_base_name, + self._subtest, + None, + generate_random_test_inputs=generate_random_test_inputs, + ) + + self._subtest += 1 + self._results.append(run_summary) + + if not run_summary.result.is_success(): + raise RuntimeError("Test failure.") from run_summary.error + if run_summary.result.is_backend_failure(): + raise RuntimeError("Test failure.") from run_summary.error + else: + # Non-backend failure indicates a bad test. Mark as skipped. + pytest.skip( + f"Test failed for reasons other than backend failure. Error: {run_summary.error}" + ) + +@pytest.fixture(params=all_flows().values(), ids=str) +def test_runner(request): + return TestRunner(request.param, request.node.name, request.node.originalname) + +@pytest.hookimpl(optionalhook=True) +def pytest_json_runtest_metadata(item, call): + metadata = { + "subtests": [] + } + + if hasattr(item, "funcargs") and "test_runner" in item.funcargs: + runner_instance = item.funcargs["test_runner"] + + for record in runner_instance._results: + subtest_metadata = {} + + error_message = "" + if record.error is not None: + error_str = str(record.error) + if len(error_str) > 400: + error_message = error_str[:200] + "..." + error_str[-200:] + else: + error_message = error_str + + subtest_metadata["Test ID"] = record.name + subtest_metadata["Test Case"] = record.base_name + subtest_metadata["Subtest"] = record.subtest_index + subtest_metadata["Flow"] = record.flow + subtest_metadata["Params"] = record.params + subtest_metadata["Result"] = record.result.to_short_str() + subtest_metadata["Result Detail"] = record.result.to_detail_str() + subtest_metadata["Error"] = error_message + subtest_metadata["Delegated"] = "True" if record.is_delegated() else "False" + subtest_metadata["Quantize Time (s)"] = ( + f"{record.quantize_time.total_seconds():.3f}" + if record.quantize_time + else None + ) + subtest_metadata["Lower Time (s)"] = ( + f"{record.lower_time.total_seconds():.3f}" if record.lower_time else None + ) + + for output_idx, error_stats in enumerate(record.tensor_error_statistics): + subtest_metadata[f"Output {output_idx} Error Max"] = f"{error_stats.error_max:.3f}" + subtest_metadata[f"Output {output_idx} Error MAE"] = f"{error_stats.error_mae:.3f}" + subtest_metadata[f"Output {output_idx} SNR"] = f"{error_stats.sqnr:.3f}" + + subtest_metadata["Delegated Nodes"] = _sum_op_counts(record.delegated_op_counts) + subtest_metadata["Undelegated Nodes"] = _sum_op_counts(record.undelegated_op_counts) + if record.delegated_op_counts: + subtest_metadata["Delegated Ops"] = dict(record.delegated_op_counts) + if record.undelegated_op_counts: + subtest_metadata["Undelegated Ops"] = dict(record.undelegated_op_counts) + subtest_metadata["PTE Size (Kb)"] = ( + f"{record.pte_size_bytes / 1000.0:.3f}" if record.pte_size_bytes else "" + ) + + metadata["subtests"].append(subtest_metadata) + + + return metadata diff --git a/backends/test/suite/flow.py b/backends/test/suite/flow.py index a4b34fee98d..05fc760683d 100644 --- a/backends/test/suite/flow.py +++ b/backends/test/suite/flow.py @@ -44,6 +44,9 @@ class TestFlow: def should_skip_test(self, test_name: str) -> bool: return any(pattern in test_name for pattern in self.skip_patterns) + def __str__(self): + return self.name + def all_flows() -> dict[str, TestFlow]: flows = [] diff --git a/backends/test/suite/generate_markdown_summary_json.py b/backends/test/suite/generate_markdown_summary_json.py new file mode 100644 index 00000000000..85b6b6d3803 --- /dev/null +++ b/backends/test/suite/generate_markdown_summary_json.py @@ -0,0 +1,251 @@ +import argparse +import csv +import functools +import json +import sys + +from dataclasses import dataclass, field + + +@dataclass +class ResultCounts: + """ + Represents aggregated result counts for each status. + """ + + total: int = 0 + passes: int = 0 + fails: int = 0 + skips: int = 0 + by_detail: dict[str, int] = field(default_factory=lambda: {}) + + def add_row(self, result_value: str, result_detail: str) -> None: + """ + Update the result counts for the specified row. + """ + + self.total += 1 + + if result_value == "Pass": + self.passes += 1 + elif result_value == "Fail": + self.fails += 1 + elif result_value == "Skip": + self.skips += 1 + else: + raise RuntimeError(f"Unknown result value {result_value}") + + if result_detail: + if result_detail not in self.by_detail: + self.by_detail[result_detail] = 0 + + self.by_detail[result_detail] += 1 + + +@dataclass +class AggregatedSummary: + """ + Represents aggegrated summary data for the test run. + """ + + counts: ResultCounts + counts_by_params: dict[str, ResultCounts] + failed_tests: list[list[str]] + + +# +# A standalone script to generate a Markdown representation of a test report. +# This is primarily intended to be used with GitHub actions to generate a nice +# representation of the test results when looking at the action run. +# +# Usage: python executorch/backends/test/suite/generate_markdown_summary.py +# Markdown is written to stdout. +# + + +def aggregate_results(json_path: str) -> AggregatedSummary: + with open(json_path) as f: + data = json.load(f) + + # Count results and prepare data + counts = ResultCounts() + failed_tests = [] + counts_by_param = {} + + for test_data in data["tests"]: + result_meta = test_data.get("metadata") + if result_meta: + for subtest_meta in result_meta["subtests"]: + result = subtest_meta["Result"] + result_detail = subtest_meta.get("Result Detail") or "" + + counts.add_row(result, result_detail) + + params = subtest_meta["Params"] + if params: + if params not in counts_by_param: + counts_by_param[params] = ResultCounts() + counts_by_param[params].add_row(result, result_detail) + + if result.lower() == "fail": + failed_tests.append(subtest_meta) + + return AggregatedSummary( + counts=counts, + failed_tests=failed_tests, + counts_by_params=counts_by_param, + ) + + +def escape_for_markdown(text: str) -> str: + """ + Modify a string to properly display in a markdown table cell. + """ + if not text: + return text + + # Replace newlines with
tags + escaped = text.replace("\n", "
") + + # Escape backslashes. + escaped = escaped.replace("\\", "\\\\") + + # Escape pipe characters that would break table structure + escaped = escaped.replace("|", "\\|") + + return escaped + + +def generate_markdown(json_path: str, exit_code: int = 0): # noqa (C901) + # Print warning if exit code is non-zero + if exit_code != 0: + print("> [!WARNING]") + print( + f"> Exit code {exit_code} was non-zero. Test process may have crashed. Check the job logs for more information.\n" + ) + + results = aggregate_results(json_path) + + # Generate Summary section + print("# Summary\n") + total_excluding_skips = results.counts.passes + results.counts.fails + pass_fraction = results.counts.passes / total_excluding_skips + fail_fraction = results.counts.fails / total_excluding_skips + print( + f"- **Pass**: {results.counts.passes}/{total_excluding_skips} ({pass_fraction*100:.2f}%)" + ) + print( + f"- **Fail**: {results.counts.fails}/{total_excluding_skips} ({fail_fraction*100:.2f}%)" + ) + print(f"- **Skip**: {results.counts.skips}") + + if results.counts_by_params: + print("\n## Results by Parameters\n") + + # Extract all unique parameter keys from the JSON strings + all_param_keys = set() + parsed_params = {} + + for params_str in results.counts_by_params.keys(): + # Parse the JSON string (it's a string representation of a dict) + params_dict = json.loads(params_str) + parsed_params[params_str] = params_dict + all_param_keys.update(params_dict.keys()) + + if parsed_params and len(parsed_params) > 1: + # Sort parameter keys for consistent column ordering + sorted_param_keys = sorted(all_param_keys) + + # Create table header + header_cols = sorted_param_keys + ["Pass", "Fail", "Skip", "Pass %"] + print("| " + " | ".join(header_cols) + " |") + print("|" + "|".join(["---"] * len(header_cols)) + "|") + + # Create table rows + for params_str, counts in results.counts_by_params.items(): + if params_str in parsed_params: + params_dict = parsed_params[params_str] + row_values = [] + + # Add parameter values + for key in sorted_param_keys: + value = params_dict.get(key, "") + row_values.append(str(value)) + + pass_fraction = counts.passes / (counts.passes + counts.fails) + + # Add count values + row_values.extend( + [ + str(counts.passes), + str(counts.fails), + str(counts.skips), + f"{pass_fraction*100:.2f}%", + ] + ) + + print("| " + " | ".join(row_values) + " |") + + print() + + print("## Failure Breakdown:") + total_rows_with_result_detail = sum(results.counts.by_detail.values()) + for detail, count in sorted(results.counts.by_detail.items()): + print(f"- **{detail}**: {count}/{total_rows_with_result_detail}") + + # Generate Failed Tests section + print("# Failed Tests\n") + if results.failed_tests: + header = build_header(results.failed_tests) + + escaped_header = [escape_for_markdown(col) for col in header.keys()] + print("| " + " | ".join(escaped_header) + " |") + print("|" + "|".join(["---"] * len(escaped_header)) + "|") + for rec in results.failed_tests: + row = build_row(rec, header) + print("| " + " | ".join(row) + " |") + else: + print("No failed tests.\n") + + +def build_header(data) -> dict[str, int]: + """ + Find the union of all keys and return a dict of header keys and indices. Try to preserve + ordering as much as possible. + """ + + keys = max(data, key=len) + + header = { + k:i for (i,k) in enumerate(keys) + } + + for rec in data: + keys = set(rec.keys()) + for k in keys: + if k not in header: + header[k] = len(header) + + return header + +def build_row(rec, header: dict[str, int]) -> list[str]: + row = [""] * len(header) + for k, v in rec.items(): + row[header[k]] = escape_for_markdown(str(v)) + return row + + +def main(): + parser = argparse.ArgumentParser( + description="Generate a Markdown representation of a test report." + ) + parser.add_argument("csv_path", help="Path to the test report CSV file.") + parser.add_argument( + "--exit-code", type=int, default=0, help="Exit code from the test process." + ) + args = parser.parse_args() + generate_markdown(args.csv_path, args.exit_code) + + +if __name__ == "__main__": + main() diff --git a/backends/test/suite/models/__init__.py b/backends/test/suite/models/__init__.py index ea44275a463..15741445ea3 100644 --- a/backends/test/suite/models/__init__.py +++ b/backends/test/suite/models/__init__.py @@ -25,66 +25,37 @@ ] -def load_tests(loader, suite, pattern): - package_dir = os.path.dirname(__file__) - discovered_suite = loader.discover( - start_dir=package_dir, pattern=pattern or "test_*.py" - ) - suite.addTests(discovered_suite) - return suite - - -def _create_test( - cls, - test_func: Callable, - flow: TestFlow, - dtype: torch.dtype, - use_dynamic_shapes: bool, -): - dtype_name = str(dtype)[6:] # strip "torch." - test_name = f"{test_func.__name__}_{flow.name}_{dtype_name}" - if use_dynamic_shapes: - test_name += "_dynamic_shape" - - def wrapped_test(self): - params = { - "dtype": dtype, - "use_dynamic_shapes": use_dynamic_shapes, - } - with TestContext(test_name, test_func.__name__, flow.name, params): - if flow.should_skip_test(test_name): - raise unittest.SkipTest( - f"Skipping test due to matching flow {flow.name} skip patterns" - ) - - test_func(self, flow, dtype, use_dynamic_shapes) - - wrapped_test._name = test_func.__name__ # type: ignore - wrapped_test._flow = flow # type: ignore - - setattr(cls, test_name, wrapped_test) - - -# Expand a test into variants for each registered flow. -def _expand_test(cls, test_name: str) -> None: - test_func = getattr(cls, test_name) - supports_dynamic_shapes = getattr(test_func, "supports_dynamic_shapes", True) - dynamic_shape_values = [True, False] if supports_dynamic_shapes else [False] - dtypes = getattr(test_func, "dtypes", DTYPES) - - for flow, dtype, use_dynamic_shapes in itertools.product( - get_test_flows().values(), dtypes, dynamic_shape_values - ): - _create_test(cls, test_func, flow, dtype, use_dynamic_shapes) - delattr(cls, test_name) - - -def model_test_cls(cls) -> Callable | None: - """Decorator for model tests. Handles generating test variants for each test flow and configuration.""" - for key in dir(cls): - if key.startswith("test_"): - _expand_test(cls, key) - return cls +class ModelTest(unittest.TestCase): + pass + + +class TestCaseShim: + def __init__(self, test_runner): + self._test_runner = test_runner + + def _test_op(self, model, args, flow, generate_random_test_inputs=True): + self._test_runner.lower_and_run_model(model, args) + + +def wrap_test(original_func, test_type): + def wrapped_func(test_runner): + shim = TestCaseShim(test_runner) + original_func(shim, test_runner._flow) + + return wrapped_func + + +def model_test_cls(cls): + parent_module = sys.modules[cls.__module__] + + for func_name in dir(cls): + if func_name.startswith("test"): + original_func = getattr(cls, func_name) + test_type = getattr(original_func, "test_type", TestType.STANDARD) + wrapped_func = wrap_test(original_func, test_type) + setattr(parent_module, func_name, wrapped_func) + + return None def model_test_params( @@ -102,39 +73,3 @@ def inner_decorator(func: Callable) -> Callable: return func return inner_decorator - - -def run_model_test( - model: torch.nn.Module, - inputs: tuple[Any], - flow: TestFlow, - dtype: torch.dtype, - dynamic_shapes: Any | None, -): - model = model.to(dtype) - context = get_active_test_context() - - # This should be set in the wrapped test. See _create_test above. - assert context is not None, "Missing test context." - - run_summary = run_test( - model, - inputs, - flow, - context.test_name, - context.test_base_name, - 0, # subtest_index - currently unused for model tests - context.params, - dynamic_shapes=dynamic_shapes, - ) - - log_test_summary(run_summary) - - if not run_summary.result.is_success(): - if run_summary.result.is_backend_failure(): - raise RuntimeError("Test failure.") from run_summary.error - else: - # Non-backend failure indicates a bad test. Mark as skipped. - raise unittest.SkipTest( - f"Test failed for reasons other than backend failure. Error: {run_summary.error}" - ) diff --git a/backends/test/suite/models/test_torchaudio.py b/backends/test/suite/models/test_torchaudio.py index 69f6de4684f..a6b9a62588a 100644 --- a/backends/test/suite/models/test_torchaudio.py +++ b/backends/test/suite/models/test_torchaudio.py @@ -9,15 +9,12 @@ import unittest from typing import Tuple +import pytest import torch import torchaudio +from executorch.backends.test.suite import dtype_to_str from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.models import ( - model_test_cls, - model_test_params, - run_model_test, -) from torch.export import Dim # @@ -47,64 +44,72 @@ def forward( return x.transpose(0, 1) -@model_test_cls -class TorchAudio(unittest.TestCase): - @model_test_params(dtypes=[torch.float32], supports_dynamic_shapes=False) - def test_conformer( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - inner_model = torchaudio.models.Conformer( - input_dim=80, - num_heads=4, - ffn_dim=128, - num_layers=4, - depthwise_conv_kernel_size=31, - ) - model = PatchedConformer(inner_model) - lengths = torch.randint(1, 400, (10,)) +@pytest.mark.parametrize("dtype", [torch.float32], ids=dtype_to_str) +@pytest.mark.parametrize( + "use_dynamic_shapes", [False, True], ids=["static_shapes", "dynamic_shapes"] +) +def test_conformer(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + inner_model = torchaudio.models.Conformer( + input_dim=80, + num_heads=4, + ffn_dim=128, + num_layers=4, + depthwise_conv_kernel_size=31, + ) + model = PatchedConformer(inner_model).eval().to(dtype) + lengths = torch.randint(1, 400, (10,)) + + encoder_padding_mask = torchaudio.models.conformer._lengths_to_padding_mask(lengths) + inputs = ( + torch.rand(10, int(lengths.max()), 80), + encoder_padding_mask, + ) + + test_runner.lower_and_run_model(model, inputs) + + +@pytest.mark.parametrize("dtype", [torch.float32], ids=dtype_to_str) +@pytest.mark.parametrize( + "use_dynamic_shapes", [False, True], ids=["static_shapes", "dynamic_shapes"] +) +def test_wav2letter(flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchaudio.models.Wav2Letter().to(dtype) + inputs = (torch.randn(1, 1, 1024, dtype=dtype),) + dynamic_shapes = ( + { + "x": { + 2: Dim("d", min=900, max=1024), + } + } + if use_dynamic_shapes + else None + ) - encoder_padding_mask = torchaudio.models.conformer._lengths_to_padding_mask( - lengths - ) - inputs = ( - torch.rand(10, int(lengths.max()), 80), - encoder_padding_mask, - ) + test_runner.lower_and_run_model(model, inputs) - run_model_test(model, inputs, flow, dtype, None) - - @model_test_params(dtypes=[torch.float32]) - def test_wav2letter( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchaudio.models.Wav2Letter() - inputs = (torch.randn(1, 1, 1024, dtype=dtype),) - dynamic_shapes = ( - { - "x": { - 2: Dim("d", min=900, max=1024), - } - } - if use_dynamic_shapes - else None - ) - run_model_test(model, inputs, flow, dtype, dynamic_shapes) - - @unittest.skip("This model times out on all backends.") - def test_wavernn( - self, - flow: TestFlow, - dtype: torch.dtype, - use_dynamic_shapes: bool, - ): - model = torchaudio.models.WaveRNN( - upsample_scales=[5, 5, 8], n_classes=512, hop_length=200 - ).eval() - # See https://docs.pytorch.org/audio/stable/generated/torchaudio.models.WaveRNN.html#forward - inputs = ( - torch.randn(1, 1, (64 - 5 + 1) * 200), # waveform - torch.randn(1, 1, 128, 64), # specgram +@pytest.mark.parametrize("dtype", [torch.float32], ids=dtype_to_str) +@pytest.mark.parametrize( + "use_dynamic_shapes", [False, True], ids=["static_shapes", "dynamic_shapes"] +) +@unittest.skip("This model times out on all backends.") +def test_wavernn( + test_runner, + dtype: torch.dtype, + use_dynamic_shapes: bool, +): + model = ( + torchaudio.models.WaveRNN( + upsample_scales=[5, 5, 8], n_classes=512, hop_length=200 ) + .eval() + .to(dtype) + ) + + # See https://docs.pytorch.org/audio/stable/generated/torchaudio.models.WaveRNN.html#forward + inputs = ( + torch.randn(1, 1, (64 - 5 + 1) * 200), # waveform + torch.randn(1, 1, 128, 64), # specgram + ).to(dtype) - run_model_test(model, inputs, flow, dtype, None) + test_runner.lower_and_run_model(model, inputs) diff --git a/backends/test/suite/operators/__init__.py b/backends/test/suite/operators/__init__.py index 9c550b3a49c..a55e11efd2b 100644 --- a/backends/test/suite/operators/__init__.py +++ b/backends/test/suite/operators/__init__.py @@ -8,11 +8,13 @@ import copy import os +import sys import unittest from enum import Enum from typing import Callable +import pytest import torch from executorch.backends.test.suite import get_test_flows from executorch.backends.test.suite.context import get_active_test_context, TestContext @@ -66,112 +68,48 @@ def dtype_test(func): return func -# Class annotation for operator tests. This triggers the test framework to register -# the tests. -def operator_test(cls): - _create_tests(cls) - return cls - - -# Generate test cases for each backend flow. -def _create_tests(cls): - for key in dir(cls): - if key.startswith("test_"): - _expand_test(cls, key) - - -# Expand a test into variants for each registered flow. -def _expand_test(cls, test_name: str): - test_func = getattr(cls, test_name) - for flow in get_test_flows().values(): - _create_test_for_backend(cls, test_func, flow) - delattr(cls, test_name) - +class OperatorTest(unittest.TestCase): + pass -def _make_wrapped_test( - test_func: Callable, - test_name: str, - test_base_name: str, - flow: TestFlow, - params: dict | None = None, -): - def wrapped_test(self): - with TestContext(test_name, test_base_name, flow.name, params): - if flow.should_skip_test(test_name): - raise unittest.SkipTest( - f"Skipping test due to matching flow {flow.name} skip patterns" - ) - test_kwargs = copy.copy(params) or {} - test_kwargs["flow"] = flow +class TestCaseShim: + def __init__(self, test_runner): + self._test_runner = test_runner - test_func(self, **test_kwargs) + def _test_op(self, model, args, flow, generate_random_test_inputs=True): + self._test_runner.lower_and_run_model(model, args) - wrapped_test._name = test_name - wrapped_test._flow = flow - return wrapped_test +def wrap_test(original_func, test_type): + if test_type == TestType.STANDARD: + def wrapped_func(test_runner): + shim = TestCaseShim(test_runner) + original_func(shim, test_runner._flow) -def _create_test_for_backend( - cls, - test_func: Callable, - flow: TestFlow, -): - test_type = getattr(test_func, "test_type", TestType.STANDARD) + return wrapped_func + elif test_type == TestType.DTYPE: - if test_type == TestType.STANDARD: - test_name = f"{test_func.__name__}_{flow.name}" - wrapped_test = _make_wrapped_test( - test_func, test_name, test_func.__name__, flow + @pytest.mark.parametrize( + "dtype", [torch.float16, torch.float32], ids=lambda s: str(s)[6:] ) - setattr(cls, test_name, wrapped_test) - elif test_type == TestType.DTYPE: - for dtype in DTYPES: - dtype_name = str(dtype)[6:] # strip "torch." - test_name = f"{test_func.__name__}_{dtype_name}_{flow.name}" - wrapped_test = _make_wrapped_test( - test_func, - test_name, - test_func.__name__, - flow, - {"dtype": dtype}, - ) - setattr(cls, test_name, wrapped_test) - else: - raise NotImplementedError(f"Unknown test type {test_type}.") + def wrapped_func(test_runner, dtype): + shim = TestCaseShim(test_runner) + original_func(shim, test_runner._flow, dtype) + return wrapped_func + else: + raise ValueError() -class OperatorTest(unittest.TestCase): - def _test_op( - self, model, inputs, flow: TestFlow, generate_random_test_inputs: bool = True - ): - context = get_active_test_context() - - # This should be set in the wrapped test. See _make_wrapped_test above. - assert context is not None, "Missing test context." - - run_summary = run_test( - model, - inputs, - flow, - context.test_name, - context.test_base_name, - context.subtest_index, - context.params, - generate_random_test_inputs=generate_random_test_inputs, - ) - log_test_summary(run_summary) +def operator_test(cls): + parent_module = sys.modules[cls.__module__] - # This is reset when a new test is started - it creates the context per-test. - context.subtest_index = context.subtest_index + 1 + for func_name in dir(cls): + if func_name.startswith("test"): + original_func = getattr(cls, func_name) + test_type = getattr(original_func, "test_type", TestType.STANDARD) + wrapped_func = wrap_test(original_func, test_type) + setattr(parent_module, func_name, wrapped_func) - if not run_summary.result.is_success(): - if run_summary.result.is_backend_failure(): - raise RuntimeError("Test failure.") from run_summary.error - else: - # Non-backend failure indicates a bad test. Mark as skipped. - raise unittest.SkipTest( - f"Test failed for reasons other than backend failure. Error: {run_summary.error}" - ) + return None diff --git a/backends/test/suite/operators/test_add_pytestified.py b/backends/test/suite/operators/test_add_pytestified.py new file mode 100644 index 00000000000..56a57fec97b --- /dev/null +++ b/backends/test/suite/operators/test_add_pytestified.py @@ -0,0 +1,83 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import pytest +import torch +from executorch.backends.test.suite.flow import TestFlow + +from executorch.backends.test.suite.operators import ( + dtype_test, + operator_test, + OperatorTest, +) + + +class Model(torch.nn.Module): + def forward(self, x, y): + return x + y + + +class ModelAlpha(torch.nn.Module): + def __init__(self, alpha): + super().__init__() + self.alpha = alpha + + def forward(self, x, y): + return torch.add(x, y, alpha=self.alpha) + + +@pytest.mark.parametrize( + "dtype", + [torch.float16, torch.float32], + ids=lambda s: str(s)[6:] +) +def test_add_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + Model(), + ( + (torch.rand(2, 10) * 100).to(dtype), + (torch.rand(2, 10) * 100).to(dtype), + ), + ) + +def test_add_f32_bcast_first(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + ( + torch.randn(5), + torch.randn(1, 5, 1, 5), + ), + ) + +def test_add_f32_bcast_second(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + ( + torch.randn(4, 4, 2, 7), + torch.randn(2, 7), + ), + ) + +def test_add_f32_bcast_unary(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + ( + torch.randn(5), + torch.randn(1, 1, 5), + ), + ) + +def test_add_f32_alpha(test_runner) -> None: + test_runner.lower_and_run_model( + ModelAlpha(alpha=2), + ( + torch.randn(1, 25), + torch.randn(1, 25), + ), + ) diff --git a/backends/test/suite/operators/test_sub.py b/backends/test/suite/operators/test_sub.py index be7b871fdad..839c28bc2c4 100644 --- a/backends/test/suite/operators/test_sub.py +++ b/backends/test/suite/operators/test_sub.py @@ -7,6 +7,10 @@ # pyre-unsafe +import sys +import unittest + +import pytest import torch from executorch.backends.test.suite.flow import TestFlow @@ -14,6 +18,7 @@ dtype_test, operator_test, OperatorTest, + TestType, ) diff --git a/backends/test/suite/runner.py b/backends/test/suite/runner.py index a6d7d07bce0..ed1e091e894 100644 --- a/backends/test/suite/runner.py +++ b/backends/test/suite/runner.py @@ -122,6 +122,7 @@ def build_result( # Ensure the model can run in eager mode. try: + print(f"Running model with flow {flow}") model(*inputs) except Exception as e: return build_result(TestResult.SKIPPED, e) From ee307af9bb80dc1f833c35236566cd2c5091b22b Mon Sep 17 00:00:00 2001 From: Gregory James Comer Date: Mon, 22 Sep 2025 17:04:42 -0700 Subject: [PATCH 2/9] Update [ghstack-poisoned] --- .ci/scripts/test_backend_linux.sh | 4 +- .ci/scripts/test_backend_macos.sh | 4 +- backends/test/suite/conftest.py | 79 +++-- .../suite/generate_markdown_summary_json.py | 10 +- backends/test/suite/models/__init__.py | 68 ---- backends/test/suite/models/test_torchaudio.py | 19 +- .../test/suite/models/test_torchvision.py | 320 ++++++++++-------- backends/test/suite/operators/__init__.py | 7 - backends/test/suite/operators/test_add.py | 111 +++--- .../suite/operators/test_add_pytestified.py | 83 ----- backends/test/suite/operators/test_sub.py | 6 - 11 files changed, 296 insertions(+), 415 deletions(-) delete mode 100644 backends/test/suite/operators/test_add_pytestified.py diff --git a/.ci/scripts/test_backend_linux.sh b/.ci/scripts/test_backend_linux.sh index d230860875d..eec8bde41bd 100755 --- a/.ci/scripts/test_backend_linux.sh +++ b/.ci/scripts/test_backend_linux.sh @@ -54,7 +54,7 @@ fi PYTHON_EXECUTABLE=python CMAKE_ARGS="$EXTRA_BUILD_ARGS" .ci/scripts/setup-linux.sh --build-tool cmake --build-mode Release --editable true EXIT_CODE=0 -python -m executorch.backends.test.suite.runner $SUITE --flow $FLOW --report "$REPORT_FILE" || EXIT_CODE=$? +pytest -c /dev/nul backends/test/suite/$SUITE/ -m flow_$FLOW --json-report --json-report-file "$REPORT_FILE" || EXIT_CODE=$? # Generate markdown summary. -python -m executorch.backends.test.suite.generate_markdown_summary "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE +python -m executorch.backends.test.suite.generate_markdown_summary_json "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE diff --git a/.ci/scripts/test_backend_macos.sh b/.ci/scripts/test_backend_macos.sh index c31fd504b03..a5c91b34e6a 100755 --- a/.ci/scripts/test_backend_macos.sh +++ b/.ci/scripts/test_backend_macos.sh @@ -24,7 +24,7 @@ PYTHON_EXECUTABLE=python ${CONDA_RUN} --no-capture-output .ci/scripts/setup-macos.sh --build-tool cmake --build-mode Release EXIT_CODE=0 -${CONDA_RUN} --no-capture-output python -m executorch.backends.test.suite.runner $SUITE --flow $FLOW --report "$REPORT_FILE" || EXIT_CODE=$? +pytest -c /dev/nul backends/test/suite/$SUITE/ -m flow_$FLOW --json-report --json-report-file "$REPORT_FILE" || EXIT_CODE=$? # Generate markdown summary. -${CONDA_RUN} --no-capture-output python -m executorch.backends.test.suite.generate_markdown_summary "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE +python -m executorch.backends.test.suite.generate_markdown_summary_json "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE diff --git a/backends/test/suite/conftest.py b/backends/test/suite/conftest.py index 797e61f8785..88cad8b3b5d 100644 --- a/backends/test/suite/conftest.py +++ b/backends/test/suite/conftest.py @@ -1,18 +1,30 @@ +from typing import Any + import pytest import torch -from executorch.backends.test.suite.flow import TestFlow, all_flows +from executorch.backends.test.suite.flow import all_flows from executorch.backends.test.suite.reporting import _sum_op_counts from executorch.backends.test.suite.runner import run_test -from typing import Any - -BACKENDS = ["xnnpack", "coreml", "vulkan", "qnn", "arm"] def pytest_configure(config): - for backend in BACKENDS: - config.addinivalue_line("markers", f"backend_{backend}: mark a test as testing the {backend} backend") - + backends = set() + + for flow in all_flows().values(): + config.addinivalue_line( + "markers", + f"flow_{flow.name}: mark a test as testing the {flow.name} flow", + ) + + if flow.backend not in backends: + config.addinivalue_line( + "markers", + f"backend_{flow.backend}: mark a test as testing the {flow.backend} backend", + ) + backends.add(flow.backend) + + class TestRunner: def __init__(self, flow, test_name, test_base_name): self._flow = flow @@ -21,7 +33,13 @@ def __init__(self, flow, test_name, test_base_name): self._subtest = 0 self._results = [] - def lower_and_run_model(self, model: torch.nn.Module, inputs: Any, generate_random_test_inputs=True): + def lower_and_run_model( + self, + model: torch.nn.Module, + inputs: Any, + generate_random_test_inputs=True, + dynamic_shapes=None, + ): run_summary = run_test( model, inputs, @@ -31,13 +49,13 @@ def lower_and_run_model(self, model: torch.nn.Module, inputs: Any, generate_rand self._subtest, None, generate_random_test_inputs=generate_random_test_inputs, + dynamic_shapes=dynamic_shapes, ) self._subtest += 1 self._results.append(run_summary) if not run_summary.result.is_success(): - raise RuntimeError("Test failure.") from run_summary.error if run_summary.result.is_backend_failure(): raise RuntimeError("Test failure.") from run_summary.error else: @@ -46,15 +64,27 @@ def lower_and_run_model(self, model: torch.nn.Module, inputs: Any, generate_rand f"Test failed for reasons other than backend failure. Error: {run_summary.error}" ) -@pytest.fixture(params=all_flows().values(), ids=str) + +@pytest.fixture( + params=[ + pytest.param( + f, + marks=[ + getattr(pytest.mark, f"flow_{f.name}"), + getattr(pytest.mark, f"backend_{f.backend}"), + ], + ) + for f in all_flows().values() + ], + ids=str, +) def test_runner(request): return TestRunner(request.param, request.node.name, request.node.originalname) + @pytest.hookimpl(optionalhook=True) def pytest_json_runtest_metadata(item, call): - metadata = { - "subtests": [] - } + metadata = {"subtests": []} if hasattr(item, "funcargs") and "test_runner" in item.funcargs: runner_instance = item.funcargs["test_runner"] @@ -85,16 +115,26 @@ def pytest_json_runtest_metadata(item, call): else None ) subtest_metadata["Lower Time (s)"] = ( - f"{record.lower_time.total_seconds():.3f}" if record.lower_time else None + f"{record.lower_time.total_seconds():.3f}" + if record.lower_time + else None ) for output_idx, error_stats in enumerate(record.tensor_error_statistics): - subtest_metadata[f"Output {output_idx} Error Max"] = f"{error_stats.error_max:.3f}" - subtest_metadata[f"Output {output_idx} Error MAE"] = f"{error_stats.error_mae:.3f}" + subtest_metadata[f"Output {output_idx} Error Max"] = ( + f"{error_stats.error_max:.3f}" + ) + subtest_metadata[f"Output {output_idx} Error MAE"] = ( + f"{error_stats.error_mae:.3f}" + ) subtest_metadata[f"Output {output_idx} SNR"] = f"{error_stats.sqnr:.3f}" - subtest_metadata["Delegated Nodes"] = _sum_op_counts(record.delegated_op_counts) - subtest_metadata["Undelegated Nodes"] = _sum_op_counts(record.undelegated_op_counts) + subtest_metadata["Delegated Nodes"] = _sum_op_counts( + record.delegated_op_counts + ) + subtest_metadata["Undelegated Nodes"] = _sum_op_counts( + record.undelegated_op_counts + ) if record.delegated_op_counts: subtest_metadata["Delegated Ops"] = dict(record.delegated_op_counts) if record.undelegated_op_counts: @@ -104,6 +144,5 @@ def pytest_json_runtest_metadata(item, call): ) metadata["subtests"].append(subtest_metadata) - - + return metadata diff --git a/backends/test/suite/generate_markdown_summary_json.py b/backends/test/suite/generate_markdown_summary_json.py index 85b6b6d3803..f0ac16d27fc 100644 --- a/backends/test/suite/generate_markdown_summary_json.py +++ b/backends/test/suite/generate_markdown_summary_json.py @@ -1,8 +1,5 @@ import argparse -import csv -import functools import json -import sys from dataclasses import dataclass, field @@ -216,18 +213,17 @@ def build_header(data) -> dict[str, int]: keys = max(data, key=len) - header = { - k:i for (i,k) in enumerate(keys) - } + header = {k: i for (i, k) in enumerate(keys)} for rec in data: keys = set(rec.keys()) for k in keys: if k not in header: header[k] = len(header) - + return header + def build_row(rec, header: dict[str, int]) -> list[str]: row = [""] * len(header) for k, v in rec.items(): diff --git a/backends/test/suite/models/__init__.py b/backends/test/suite/models/__init__.py index 15741445ea3..6ac1a72bde6 100644 --- a/backends/test/suite/models/__init__.py +++ b/backends/test/suite/models/__init__.py @@ -5,71 +5,3 @@ # LICENSE file in the root directory of this source tree. # pyre-unsafe - -import itertools -import os -import unittest -from typing import Any, Callable - -import torch -from executorch.backends.test.suite import get_test_flows -from executorch.backends.test.suite.context import get_active_test_context, TestContext -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.reporting import log_test_summary -from executorch.backends.test.suite.runner import run_test - - -DTYPES: list[torch.dtype] = [ - torch.float16, - torch.float32, -] - - -class ModelTest(unittest.TestCase): - pass - - -class TestCaseShim: - def __init__(self, test_runner): - self._test_runner = test_runner - - def _test_op(self, model, args, flow, generate_random_test_inputs=True): - self._test_runner.lower_and_run_model(model, args) - - -def wrap_test(original_func, test_type): - def wrapped_func(test_runner): - shim = TestCaseShim(test_runner) - original_func(shim, test_runner._flow) - - return wrapped_func - - -def model_test_cls(cls): - parent_module = sys.modules[cls.__module__] - - for func_name in dir(cls): - if func_name.startswith("test"): - original_func = getattr(cls, func_name) - test_type = getattr(original_func, "test_type", TestType.STANDARD) - wrapped_func = wrap_test(original_func, test_type) - setattr(parent_module, func_name, wrapped_func) - - return None - - -def model_test_params( - supports_dynamic_shapes: bool = True, - dtypes: list[torch.dtype] | None = None, -) -> Callable: - """Optional parameter decorator for model tests. Specifies test pararameters. Only valid with a class decorated by model_test_cls.""" - - def inner_decorator(func: Callable) -> Callable: - func.supports_dynamic_shapes = supports_dynamic_shapes # type: ignore - - if dtypes is not None: - func.dtypes = dtypes # type: ignore - - return func - - return inner_decorator diff --git a/backends/test/suite/models/test_torchaudio.py b/backends/test/suite/models/test_torchaudio.py index a6b9a62588a..2287b226c37 100644 --- a/backends/test/suite/models/test_torchaudio.py +++ b/backends/test/suite/models/test_torchaudio.py @@ -14,7 +14,6 @@ import torchaudio from executorch.backends.test.suite import dtype_to_str -from executorch.backends.test.suite.flow import TestFlow from torch.export import Dim # @@ -45,9 +44,7 @@ def forward( @pytest.mark.parametrize("dtype", [torch.float32], ids=dtype_to_str) -@pytest.mark.parametrize( - "use_dynamic_shapes", [False, True], ids=["static_shapes", "dynamic_shapes"] -) +@pytest.mark.parametrize("use_dynamic_shapes", [False], ids=["static_shapes"]) def test_conformer(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): inner_model = torchaudio.models.Conformer( input_dim=80, @@ -72,7 +69,7 @@ def test_conformer(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): @pytest.mark.parametrize( "use_dynamic_shapes", [False, True], ids=["static_shapes", "dynamic_shapes"] ) -def test_wav2letter(flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool): +def test_wav2letter(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): model = torchaudio.models.Wav2Letter().to(dtype) inputs = (torch.randn(1, 1, 1024, dtype=dtype),) dynamic_shapes = ( @@ -85,13 +82,11 @@ def test_wav2letter(flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool else None ) - test_runner.lower_and_run_model(model, inputs) + test_runner.lower_and_run_model(model, inputs, dynamic_shapes=dynamic_shapes) @pytest.mark.parametrize("dtype", [torch.float32], ids=dtype_to_str) -@pytest.mark.parametrize( - "use_dynamic_shapes", [False, True], ids=["static_shapes", "dynamic_shapes"] -) +@pytest.mark.parametrize("use_dynamic_shapes", [False], ids=["static_shapes"]) @unittest.skip("This model times out on all backends.") def test_wavernn( test_runner, @@ -108,8 +103,8 @@ def test_wavernn( # See https://docs.pytorch.org/audio/stable/generated/torchaudio.models.WaveRNN.html#forward inputs = ( - torch.randn(1, 1, (64 - 5 + 1) * 200), # waveform - torch.randn(1, 1, 128, 64), # specgram - ).to(dtype) + torch.randn(1, 1, (64 - 5 + 1) * 200).to(dtype), # waveform + torch.randn(1, 1, 128, 64).to(dtype), # specgram + ) test_runner.lower_and_run_model(model, inputs) diff --git a/backends/test/suite/models/test_torchvision.py b/backends/test/suite/models/test_torchvision.py index e69de80a871..58cf6a990d4 100644 --- a/backends/test/suite/models/test_torchvision.py +++ b/backends/test/suite/models/test_torchvision.py @@ -6,17 +6,12 @@ # pyre-unsafe -import unittest +import pytest import torch import torchvision +from executorch.backends.test.suite import dtype_to_str -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.models import ( - model_test_cls, - model_test_params, - run_model_test, -) from torch.export import Dim # @@ -25,148 +20,175 @@ # multiple size variants, one small or medium variant is used. # +PARAMETERIZE_DTYPE = pytest.mark.parametrize("dtype", [torch.float32], ids=dtype_to_str) +PARAMETERIZE_DYNAMIC_SHAPES = pytest.mark.parametrize( + "use_dynamic_shapes", [False, True], ids=["static_shapes", "dynamic_shapes"] +) +PARAMETERIZE_STATIC_ONLY = pytest.mark.parametrize( + "use_dynamic_shapes", [False], ids=["static_shapes"] +) + + +def _test_cv_model( + model: torch.nn.Module, + test_runner, + dtype: torch.dtype, + use_dynamic_shapes: bool, +): + model = model.eval().to(dtype) + + # Test a CV model that follows the standard conventions. + inputs = (torch.randn(1, 3, 224, 224, dtype=dtype),) -@model_test_cls -class TorchVision(unittest.TestCase): - def _test_cv_model( - self, - model: torch.nn.Module, - flow: TestFlow, - dtype: torch.dtype, - use_dynamic_shapes: bool, - ): - # Test a CV model that follows the standard conventions. - inputs = (torch.randn(1, 3, 224, 224, dtype=dtype),) - - dynamic_shapes = ( - ( - { - 2: Dim("height", min=1, max=16) * 16, - 3: Dim("width", min=1, max=16) * 16, - }, - ) - if use_dynamic_shapes - else None + dynamic_shapes = ( + ( + { + 2: Dim("height", min=1, max=16) * 16, + 3: Dim("width", min=1, max=16) * 16, + }, ) + if use_dynamic_shapes + else None + ) + + test_runner.lower_and_run_model(model, inputs, dynamic_shapes=dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_alexnet(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.alexnet() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_convnext_small(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.convnext_small() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_densenet161(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.densenet161() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_efficientnet_b4(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.efficientnet_b4() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_efficientnet_v2_s(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.efficientnet_v2_s() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_googlenet(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.googlenet() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_inception_v3(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.inception_v3() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_STATIC_ONLY +def test_maxvit_t(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.maxvit_t() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_mnasnet1_0(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.mnasnet1_0() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_mobilenet_v2(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.mobilenet_v2() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_mobilenet_v3_small(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.mobilenet_v3_small() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_regnet_y_1_6gf(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.regnet_y_1_6gf() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_resnet50(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.resnet50() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_resnext50_32x4d(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.resnext50_32x4d() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_shufflenet_v2_x1_0(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.shufflenet_v2_x1_0() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_squeezenet1_1(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.squeezenet1_1() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_swin_v2_t(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.swin_v2_t() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_vgg11(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.vgg11() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + + +@PARAMETERIZE_DTYPE +@PARAMETERIZE_STATIC_ONLY +def test_vit_b_16(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.vit_b_16() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) + - run_model_test(model, inputs, flow, dtype, dynamic_shapes) - - def test_alexnet( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.alexnet() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - def test_convnext_small( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.convnext_small() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - def test_densenet161( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.densenet161() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - def test_efficientnet_b4( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.efficientnet_b4() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - def test_efficientnet_v2_s( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.efficientnet_v2_s() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - def test_googlenet( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.googlenet() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - def test_inception_v3( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.inception_v3() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - @model_test_params(supports_dynamic_shapes=False) - def test_maxvit_t( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.maxvit_t() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - def test_mnasnet1_0( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.mnasnet1_0() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - def test_mobilenet_v2( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.mobilenet_v2() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - def test_mobilenet_v3_small( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.mobilenet_v3_small() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - def test_regnet_y_1_6gf( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.regnet_y_1_6gf() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - def test_resnet50( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.resnet50() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - def test_resnext50_32x4d( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.resnext50_32x4d() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - def test_shufflenet_v2_x1_0( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.shufflenet_v2_x1_0() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - def test_squeezenet1_1( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.squeezenet1_1() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - def test_swin_v2_t( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.swin_v2_t() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - def test_vgg11(self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool): - model = torchvision.models.vgg11() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - @model_test_params(supports_dynamic_shapes=False) - def test_vit_b_16( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.vit_b_16() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) - - def test_wide_resnet50_2( - self, flow: TestFlow, dtype: torch.dtype, use_dynamic_shapes: bool - ): - model = torchvision.models.wide_resnet50_2() - self._test_cv_model(model, flow, dtype, use_dynamic_shapes) +@PARAMETERIZE_DTYPE +@PARAMETERIZE_DYNAMIC_SHAPES +def test_wide_resnet50_2(test_runner, dtype: torch.dtype, use_dynamic_shapes: bool): + model = torchvision.models.wide_resnet50_2() + _test_cv_model(model, test_runner, dtype, use_dynamic_shapes) diff --git a/backends/test/suite/operators/__init__.py b/backends/test/suite/operators/__init__.py index a55e11efd2b..8da208879ec 100644 --- a/backends/test/suite/operators/__init__.py +++ b/backends/test/suite/operators/__init__.py @@ -6,21 +6,14 @@ # pyre-unsafe -import copy import os import sys import unittest from enum import Enum -from typing import Callable import pytest import torch -from executorch.backends.test.suite import get_test_flows -from executorch.backends.test.suite.context import get_active_test_context, TestContext -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.reporting import log_test_summary -from executorch.backends.test.suite.runner import run_test def load_tests(loader, suite, pattern): diff --git a/backends/test/suite/operators/test_add.py b/backends/test/suite/operators/test_add.py index 6b21c3bf985..15a8349cb97 100644 --- a/backends/test/suite/operators/test_add.py +++ b/backends/test/suite/operators/test_add.py @@ -7,14 +7,8 @@ # pyre-unsafe +import pytest import torch -from executorch.backends.test.suite.flow import TestFlow - -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) class Model(torch.nn.Module): @@ -31,55 +25,54 @@ def forward(self, x, y): return torch.add(x, y, alpha=self.alpha) -@operator_test -class Add(OperatorTest): - @dtype_test - def test_add_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - Model(), - ( - (torch.rand(2, 10) * 100).to(dtype), - (torch.rand(2, 10) * 100).to(dtype), - ), - flow, - ) - - def test_add_f32_bcast_first(self, flow: TestFlow) -> None: - self._test_op( - Model(), - ( - torch.randn(5), - torch.randn(1, 5, 1, 5), - ), - flow, - ) - - def test_add_f32_bcast_second(self, flow: TestFlow) -> None: - self._test_op( - Model(), - ( - torch.randn(4, 4, 2, 7), - torch.randn(2, 7), - ), - flow, - ) - - def test_add_f32_bcast_unary(self, flow: TestFlow) -> None: - self._test_op( - Model(), - ( - torch.randn(5), - torch.randn(1, 1, 5), - ), - flow, - ) - - def test_add_f32_alpha(self, flow: TestFlow) -> None: - self._test_op( - ModelAlpha(alpha=2), - ( - torch.randn(1, 25), - torch.randn(1, 25), - ), - flow, - ) +@pytest.mark.parametrize( + "dtype", [torch.float16, torch.float32], ids=lambda s: str(s)[6:] +) +def test_add_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + Model(), + ( + (torch.rand(2, 10) * 100).to(dtype), + (torch.rand(2, 10) * 100).to(dtype), + ), + ) + + +def test_add_f32_bcast_first(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + ( + torch.randn(5), + torch.randn(1, 5, 1, 5), + ), + ) + + +def test_add_f32_bcast_second(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + ( + torch.randn(4, 4, 2, 7), + torch.randn(2, 7), + ), + ) + + +def test_add_f32_bcast_unary(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + ( + torch.randn(5), + torch.randn(1, 1, 5), + ), + ) + + +def test_add_f32_alpha(test_runner) -> None: + test_runner.lower_and_run_model( + ModelAlpha(alpha=2), + ( + torch.randn(1, 25), + torch.randn(1, 25), + ), + ) diff --git a/backends/test/suite/operators/test_add_pytestified.py b/backends/test/suite/operators/test_add_pytestified.py deleted file mode 100644 index 56a57fec97b..00000000000 --- a/backends/test/suite/operators/test_add_pytestified.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. - -# pyre-unsafe - - -import pytest -import torch -from executorch.backends.test.suite.flow import TestFlow - -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) - - -class Model(torch.nn.Module): - def forward(self, x, y): - return x + y - - -class ModelAlpha(torch.nn.Module): - def __init__(self, alpha): - super().__init__() - self.alpha = alpha - - def forward(self, x, y): - return torch.add(x, y, alpha=self.alpha) - - -@pytest.mark.parametrize( - "dtype", - [torch.float16, torch.float32], - ids=lambda s: str(s)[6:] -) -def test_add_dtype(test_runner, dtype) -> None: - test_runner.lower_and_run_model( - Model(), - ( - (torch.rand(2, 10) * 100).to(dtype), - (torch.rand(2, 10) * 100).to(dtype), - ), - ) - -def test_add_f32_bcast_first(test_runner) -> None: - test_runner.lower_and_run_model( - Model(), - ( - torch.randn(5), - torch.randn(1, 5, 1, 5), - ), - ) - -def test_add_f32_bcast_second(test_runner) -> None: - test_runner.lower_and_run_model( - Model(), - ( - torch.randn(4, 4, 2, 7), - torch.randn(2, 7), - ), - ) - -def test_add_f32_bcast_unary(test_runner) -> None: - test_runner.lower_and_run_model( - Model(), - ( - torch.randn(5), - torch.randn(1, 1, 5), - ), - ) - -def test_add_f32_alpha(test_runner) -> None: - test_runner.lower_and_run_model( - ModelAlpha(alpha=2), - ( - torch.randn(1, 25), - torch.randn(1, 25), - ), - ) diff --git a/backends/test/suite/operators/test_sub.py b/backends/test/suite/operators/test_sub.py index 839c28bc2c4..2243eb6ee71 100644 --- a/backends/test/suite/operators/test_sub.py +++ b/backends/test/suite/operators/test_sub.py @@ -6,11 +6,6 @@ # pyre-unsafe - -import sys -import unittest - -import pytest import torch from executorch.backends.test.suite.flow import TestFlow @@ -18,7 +13,6 @@ dtype_test, operator_test, OperatorTest, - TestType, ) From 96c85c06944c90919c3ba74e438fb619d88e7863 Mon Sep 17 00:00:00 2001 From: Gregory James Comer Date: Mon, 22 Sep 2025 17:35:41 -0700 Subject: [PATCH 3/9] Update [ghstack-poisoned] --- .ci/scripts/test_backend_linux.sh | 2 +- .ci/scripts/test_backend_macos.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.ci/scripts/test_backend_linux.sh b/.ci/scripts/test_backend_linux.sh index eec8bde41bd..7c99a1f006e 100755 --- a/.ci/scripts/test_backend_linux.sh +++ b/.ci/scripts/test_backend_linux.sh @@ -54,7 +54,7 @@ fi PYTHON_EXECUTABLE=python CMAKE_ARGS="$EXTRA_BUILD_ARGS" .ci/scripts/setup-linux.sh --build-tool cmake --build-mode Release --editable true EXIT_CODE=0 -pytest -c /dev/nul backends/test/suite/$SUITE/ -m flow_$FLOW --json-report --json-report-file "$REPORT_FILE" || EXIT_CODE=$? +pytest -c /dev/nul -n auto backends/test/suite/$SUITE/ -m flow_$FLOW --json-report "$REPORT_FILE" || EXIT_CODE=$? # Generate markdown summary. python -m executorch.backends.test.suite.generate_markdown_summary_json "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE diff --git a/.ci/scripts/test_backend_macos.sh b/.ci/scripts/test_backend_macos.sh index a5c91b34e6a..78c5f5f8e8d 100755 --- a/.ci/scripts/test_backend_macos.sh +++ b/.ci/scripts/test_backend_macos.sh @@ -24,7 +24,7 @@ PYTHON_EXECUTABLE=python ${CONDA_RUN} --no-capture-output .ci/scripts/setup-macos.sh --build-tool cmake --build-mode Release EXIT_CODE=0 -pytest -c /dev/nul backends/test/suite/$SUITE/ -m flow_$FLOW --json-report --json-report-file "$REPORT_FILE" || EXIT_CODE=$? +pytest -c /dev/nul -n auto backends/test/suite/$SUITE/ -m flow_$FLOW --json-report "$REPORT_FILE" || EXIT_CODE=$? # Generate markdown summary. python -m executorch.backends.test.suite.generate_markdown_summary_json "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE From 766d050bac12b12bbc80dbd351f5aa80a4e7dfc1 Mon Sep 17 00:00:00 2001 From: Gregory James Comer Date: Mon, 22 Sep 2025 18:04:11 -0700 Subject: [PATCH 4/9] Update [ghstack-poisoned] --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 00cae6de2e7..fbed875a824 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -64,6 +64,7 @@ dependencies=[ "pytest", "pytest-xdist", "pytest-rerunfailures==15.1", + "pytest-json-report", "pyyaml", "ruamel.yaml", "sympy", From 09a7c7385f65aa6d7d121951ca656ce47d536e25 Mon Sep 17 00:00:00 2001 From: Gregory Comer Date: Mon, 22 Sep 2025 19:42:21 -0700 Subject: [PATCH 5/9] Update [ghstack-poisoned] --- .ci/scripts/test_backend_linux.sh | 4 ++-- .ci/scripts/test_backend_macos.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.ci/scripts/test_backend_linux.sh b/.ci/scripts/test_backend_linux.sh index 7c99a1f006e..a8709b0bc20 100755 --- a/.ci/scripts/test_backend_linux.sh +++ b/.ci/scripts/test_backend_linux.sh @@ -10,7 +10,7 @@ SUITE=$1 FLOW=$2 ARTIFACT_DIR=$3 -REPORT_FILE="$ARTIFACT_DIR/test-report-$FLOW-$SUITE.csv" +REPORT_FILE="$ARTIFACT_DIR/test-report-$FLOW-$SUITE.json" echo "Running backend test job for suite $SUITE, flow $FLOW." echo "Saving job artifacts to $ARTIFACT_DIR." @@ -54,7 +54,7 @@ fi PYTHON_EXECUTABLE=python CMAKE_ARGS="$EXTRA_BUILD_ARGS" .ci/scripts/setup-linux.sh --build-tool cmake --build-mode Release --editable true EXIT_CODE=0 -pytest -c /dev/nul -n auto backends/test/suite/$SUITE/ -m flow_$FLOW --json-report "$REPORT_FILE" || EXIT_CODE=$? +pytest -c /dev/nul -n auto backends/test/suite/$SUITE/ -m flow_$FLOW --json-report --json-report-file="$REPORT_FILE" || EXIT_CODE=$? # Generate markdown summary. python -m executorch.backends.test.suite.generate_markdown_summary_json "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE diff --git a/.ci/scripts/test_backend_macos.sh b/.ci/scripts/test_backend_macos.sh index 78c5f5f8e8d..156ff77b87f 100755 --- a/.ci/scripts/test_backend_macos.sh +++ b/.ci/scripts/test_backend_macos.sh @@ -10,7 +10,7 @@ SUITE=$1 FLOW=$2 ARTIFACT_DIR=$3 -REPORT_FILE="$ARTIFACT_DIR/test-report-$FLOW-$SUITE.csv" +REPORT_FILE="$ARTIFACT_DIR/test-report-$FLOW-$SUITE.json" echo "Running backend test job for suite $SUITE, flow $FLOW." echo "Saving job artifacts to $ARTIFACT_DIR." @@ -24,7 +24,7 @@ PYTHON_EXECUTABLE=python ${CONDA_RUN} --no-capture-output .ci/scripts/setup-macos.sh --build-tool cmake --build-mode Release EXIT_CODE=0 -pytest -c /dev/nul -n auto backends/test/suite/$SUITE/ -m flow_$FLOW --json-report "$REPORT_FILE" || EXIT_CODE=$? +pytest -c /dev/nul -n auto backends/test/suite/$SUITE/ -m flow_$FLOW --json-report --json-report-file="$REPORT_FILE" || EXIT_CODE=$? # Generate markdown summary. python -m executorch.backends.test.suite.generate_markdown_summary_json "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE From 6e6216e016211068080a3a9e880d17821ce79f41 Mon Sep 17 00:00:00 2001 From: Gregory James Comer Date: Tue, 23 Sep 2025 10:10:52 -0700 Subject: [PATCH 6/9] Update [ghstack-poisoned] --- .../suite/generate_markdown_summary_json.py | 55 ++++++------------- 1 file changed, 18 insertions(+), 37 deletions(-) diff --git a/backends/test/suite/generate_markdown_summary_json.py b/backends/test/suite/generate_markdown_summary_json.py index 9b3f0cfda53..921b9f6cd43 100644 --- a/backends/test/suite/generate_markdown_summary_json.py +++ b/backends/test/suite/generate_markdown_summary_json.py @@ -80,7 +80,7 @@ def aggregate_results(json_path: str) -> AggregatedSummary: test_id = subtest_meta["Test ID"] base_test = subtest_meta["Test Case"] - params = test_id[base_test.len() + 1 : -1] + params = test_id[len(base_test) + 1 : -1] if params: if params not in counts_by_param: @@ -135,49 +135,30 @@ def generate_markdown(json_path: str, exit_code: int = 0): # noqa (C901) if results.counts_by_params: print("\n## Results by Parameters\n") - # Extract all unique parameter keys from the JSON strings - all_param_keys = set() - parsed_params = {} - - for params_str in results.counts_by_params.keys(): - # Parse the JSON string (it's a string representation of a dict) - params_dict = json.loads(params_str) - parsed_params[params_str] = params_dict - all_param_keys.update(params_dict.keys()) - - if parsed_params and len(parsed_params) > 1: - # Sort parameter keys for consistent column ordering - sorted_param_keys = sorted(all_param_keys) - + if len(results.counts_by_params) > 0: # Create table header - header_cols = sorted_param_keys + ["Pass", "Fail", "Skip", "Pass %"] + header_cols = ["Params", "Pass", "Fail", "Skip", "Pass %"] print("| " + " | ".join(header_cols) + " |") print("|" + "|".join(["---"] * len(header_cols)) + "|") # Create table rows for params_str, counts in results.counts_by_params.items(): - if params_str in parsed_params: - params_dict = parsed_params[params_str] - row_values = [] - - # Add parameter values - for key in sorted_param_keys: - value = params_dict.get(key, "") - row_values.append(str(value)) + row_values = [params_str] - pass_fraction = counts.passes / (counts.passes + counts.fails) + # Add parameter values + pass_fraction = counts.passes / (counts.passes + counts.fails) - # Add count values - row_values.extend( - [ - str(counts.passes), - str(counts.fails), - str(counts.skips), - f"{pass_fraction*100:.2f}%", - ] - ) + # Add count values + row_values.extend( + [ + str(counts.passes), + str(counts.fails), + str(counts.skips), + f"{pass_fraction*100:.2f}%", + ] + ) - print("| " + " | ".join(row_values) + " |") + print("| " + " | ".join(row_values) + " |") print() @@ -231,12 +212,12 @@ def main(): parser = argparse.ArgumentParser( description="Generate a Markdown representation of a test report." ) - parser.add_argument("csv_path", help="Path to the test report CSV file.") + parser.add_argument("json_path", help="Path to the test report CSV file.") parser.add_argument( "--exit-code", type=int, default=0, help="Exit code from the test process." ) args = parser.parse_args() - generate_markdown(args.csv_path, args.exit_code) + generate_markdown(args.json_path, args.exit_code) if __name__ == "__main__": From 32f66ca2cb58fe2ed44b3ebecff51b38d5e9bdc1 Mon Sep 17 00:00:00 2001 From: Gregory James Comer Date: Tue, 23 Sep 2025 13:15:44 -0700 Subject: [PATCH 7/9] Update [ghstack-poisoned] --- .ci/scripts/test_backend_macos.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.ci/scripts/test_backend_macos.sh b/.ci/scripts/test_backend_macos.sh index 156ff77b87f..f3c80ad4934 100755 --- a/.ci/scripts/test_backend_macos.sh +++ b/.ci/scripts/test_backend_macos.sh @@ -24,7 +24,7 @@ PYTHON_EXECUTABLE=python ${CONDA_RUN} --no-capture-output .ci/scripts/setup-macos.sh --build-tool cmake --build-mode Release EXIT_CODE=0 -pytest -c /dev/nul -n auto backends/test/suite/$SUITE/ -m flow_$FLOW --json-report --json-report-file="$REPORT_FILE" || EXIT_CODE=$? +${CONDA_RUN} python -m pytest -c /dev/nul -n auto backends/test/suite/$SUITE/ -m flow_$FLOW --json-report --json-report-file="$REPORT_FILE" || EXIT_CODE=$? # Generate markdown summary. -python -m executorch.backends.test.suite.generate_markdown_summary_json "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE +${CONDA_RUN} python -m executorch.backends.test.suite.generate_markdown_summary_json "$REPORT_FILE" > ${GITHUB_STEP_SUMMARY:-"step_summary.md"} --exit-code $EXIT_CODE From 999e0616a320aa858607eca42ffa23fe2fe4afc3 Mon Sep 17 00:00:00 2001 From: Gregory James Comer Date: Tue, 23 Sep 2025 16:58:42 -0700 Subject: [PATCH 8/9] Update [ghstack-poisoned] --- backends/test/suite/operators/__init__.py | 5 + backends/test/suite/operators/replace.sed | 10 + backends/test/suite/operators/test_abs.py | 55 +- .../operators/test_adaptive_avgpool1d.py | 129 ++- .../operators/test_adaptive_avgpool2d.py | 147 ++-- .../operators/test_adaptive_avgpool3d.py | 147 ++-- .../operators/test_adaptive_maxpool1d.py | 177 ++-- .../operators/test_adaptive_maxpool2d.py | 195 ++--- .../operators/test_adaptive_maxpool3d.py | 195 ++--- backends/test/suite/operators/test_add.py | 4 +- backends/test/suite/operators/test_amax.py | 420 +++++---- backends/test/suite/operators/test_amin.py | 420 +++++---- backends/test/suite/operators/test_argmax.py | 324 ++++--- backends/test/suite/operators/test_argmin.py | 324 ++++--- .../test/suite/operators/test_avgpool1d.py | 224 +++-- .../test/suite/operators/test_avgpool2d.py | 243 +++--- .../test/suite/operators/test_avgpool3d.py | 234 +++-- backends/test/suite/operators/test_cat.py | 292 +++---- backends/test/suite/operators/test_ceil.py | 59 +- backends/test/suite/operators/test_clamp.py | 117 ++- backends/test/suite/operators/test_conv1d.py | 169 ++-- backends/test/suite/operators/test_conv2d.py | 220 +++-- backends/test/suite/operators/test_conv3d.py | 211 +++-- .../suite/operators/test_convtranspose1d.py | 169 ++-- .../suite/operators/test_convtranspose2d.py | 239 +++--- .../suite/operators/test_convtranspose3d.py | 230 +++-- backends/test/suite/operators/test_div.py | 142 ++-- backends/test/suite/operators/test_elu.py | 37 +- .../test/suite/operators/test_embedding.py | 98 +-- .../suite/operators/test_embedding_bag.py | 150 ++-- backends/test/suite/operators/test_exp.py | 63 +- backends/test/suite/operators/test_expand.py | 181 ++-- backends/test/suite/operators/test_floor.py | 59 +- .../test/suite/operators/test_floor_divide.py | 386 +++++---- backends/test/suite/operators/test_gelu.py | 48 +- backends/test/suite/operators/test_glu.py | 49 +- .../test/suite/operators/test_hardsigmoid.py | 41 +- .../test/suite/operators/test_hardswish.py | 41 +- .../test/suite/operators/test_hardtanh.py | 48 +- .../test/suite/operators/test_index_put.py | 802 +++++++++--------- .../test/suite/operators/test_index_select.py | 196 ++--- .../test/suite/operators/test_leaky_relu.py | 46 +- backends/test/suite/operators/test_linear.py | 159 ++-- backends/test/suite/operators/test_log.py | 57 +- backends/test/suite/operators/test_log10.py | 61 +- backends/test/suite/operators/test_log1p.py | 61 +- backends/test/suite/operators/test_log2.py | 61 +- .../test/suite/operators/test_logsigmoid.py | 34 +- backends/test/suite/operators/test_lstm.py | 322 ++++--- .../test/suite/operators/test_masked_fill.py | 145 ++-- .../test/suite/operators/test_maxpool1d.py | 279 +++--- .../test/suite/operators/test_maxpool2d.py | 290 +++---- .../test/suite/operators/test_maxpool3d.py | 285 +++---- backends/test/suite/operators/test_mean.py | 507 +++++------ backends/test/suite/operators/test_median.py | 243 +++--- backends/test/suite/operators/test_mul.py | 88 +- backends/test/suite/operators/test_neg.py | 90 +- backends/test/suite/operators/test_permute.py | 158 ++-- backends/test/suite/operators/test_pow.py | 226 +++-- backends/test/suite/operators/test_prelu.py | 55 +- backends/test/suite/operators/test_relu.py | 32 +- backends/test/suite/operators/test_reshape.py | 105 +-- backends/test/suite/operators/test_round.py | 216 ++--- backends/test/suite/operators/test_rsqrt.py | 59 +- backends/test/suite/operators/test_select.py | 119 ++- backends/test/suite/operators/test_sigmoid.py | 34 +- backends/test/suite/operators/test_silu.py | 41 +- backends/test/suite/operators/test_slice.py | 182 ++-- backends/test/suite/operators/test_split.py | 155 ++-- backends/test/suite/operators/test_sqrt.py | 61 +- backends/test/suite/operators/test_square.py | 75 +- backends/test/suite/operators/test_squeeze.py | 108 ++- backends/test/suite/operators/test_stack.py | 172 ++-- backends/test/suite/operators/test_sub.py | 108 ++- backends/test/suite/operators/test_tanh.py | 34 +- .../test/suite/operators/test_threshold.py | 72 +- .../test/suite/operators/test_transpose.py | 221 +++-- backends/test/suite/operators/test_trunc.py | 84 +- .../test/suite/operators/test_unsqueeze.py | 160 ++-- .../operators/test_upsample_bilinear2d.py | 414 +++++---- .../operators/test_upsample_nearest2d.py | 200 ++--- backends/test/suite/operators/test_view.py | 105 +-- 82 files changed, 6202 insertions(+), 7022 deletions(-) create mode 100644 backends/test/suite/operators/replace.sed diff --git a/backends/test/suite/operators/__init__.py b/backends/test/suite/operators/__init__.py index fa5ec2566d4..891bc3eb593 100644 --- a/backends/test/suite/operators/__init__.py +++ b/backends/test/suite/operators/__init__.py @@ -61,6 +61,11 @@ def dtype_test(func): return func +parameterize_by_dtype = pytest.mark.parametrize( + "dtype", DTYPES, ids=lambda s: str(s)[6:] +) + + class OperatorTest(unittest.TestCase): pass diff --git a/backends/test/suite/operators/replace.sed b/backends/test/suite/operators/replace.sed new file mode 100644 index 00000000000..5ee6db9b7cb --- /dev/null +++ b/backends/test/suite/operators/replace.sed @@ -0,0 +1,10 @@ +s/self\, flow\: TestFlow/test_runner/g +s/self\._test_op/test_runner.lower_and_run_model/g +s/, flow//g +/@operator_test/d +/(OperatorTest):/d +s/dtype_test/parameterize_by_dtype/g +/flow,/d +/import TestFlow/d +/operator_test,/d +/OperatorTest,/d diff --git a/backends/test/suite/operators/test_abs.py b/backends/test/suite/operators/test_abs.py index 484281e294e..c717f248226 100644 --- a/backends/test/suite/operators/test_abs.py +++ b/backends/test/suite/operators/test_abs.py @@ -10,13 +10,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class AbsModel(torch.nn.Module): @@ -27,34 +22,34 @@ def forward(self, x): return torch.abs(x) -@operator_test -class TestAbs(OperatorTest): - @dtype_test - def test_abs_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes - model = AbsModel().to(dtype) - self._test_op(model, (torch.rand(10, 10).to(dtype) * 2 - 1,), flow) +@parameterize_by_dtype +def test_abs_dtype(test_runner, dtype) -> None: + # Test with different dtypes + model = AbsModel().to(dtype) + test_runner.lower_and_run_model(model, (torch.rand(10, 10).to(dtype) * 2 - 1,)) - def test_abs_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes - # 1D tensor - self._test_op(AbsModel(), (torch.randn(20),), flow) +def test_abs_shapes(test_runner) -> None: + # Test with different tensor shapes - # 2D tensor - self._test_op(AbsModel(), (torch.randn(5, 10),), flow) + # 1D tensor + test_runner.lower_and_run_model(AbsModel(), (torch.randn(20),)) - # 3D tensor - self._test_op(AbsModel(), (torch.randn(3, 4, 5),), flow) + # 2D tensor + test_runner.lower_and_run_model(AbsModel(), (torch.randn(5, 10),)) - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_abs_edge_cases(self, flow: TestFlow) -> None: - # Test edge cases + # 3D tensor + test_runner.lower_and_run_model(AbsModel(), (torch.randn(3, 4, 5),)) - # Tensor with infinity - x = torch.tensor([float("inf"), float("-inf"), 1.0, -1.0]) - self._test_op(AbsModel(), (x,), flow, generate_random_test_inputs=False) - # Tensor with NaN - x = torch.tensor([float("nan"), 1.0, -1.0]) - self._test_op(AbsModel(), (x,), flow, generate_random_test_inputs=False) +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_abs_edge_cases(test_runner) -> None: + # Test edge cases + + # Tensor with infinity + x = torch.tensor([float("inf"), float("-inf"), 1.0, -1.0]) + test_runner.lower_and_run_model(AbsModel(), (x,), generate_random_test_inputs=False) + + # Tensor with NaN + x = torch.tensor([float("nan"), 1.0, -1.0]) + test_runner.lower_and_run_model(AbsModel(), (x,), generate_random_test_inputs=False) diff --git a/backends/test/suite/operators/test_adaptive_avgpool1d.py b/backends/test/suite/operators/test_adaptive_avgpool1d.py index f8858ecbc02..08c2819c1c2 100644 --- a/backends/test/suite/operators/test_adaptive_avgpool1d.py +++ b/backends/test/suite/operators/test_adaptive_avgpool1d.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -31,72 +26,62 @@ def forward(self, x): return self.adaptive_avgpool(x) -@operator_test -class AdaptiveAvgPool1d(OperatorTest): - @dtype_test - def test_adaptive_avgpool1d_dtype(self, flow: TestFlow, dtype) -> None: - # Input shape: (batch_size, channels, length) - self._test_op( - Model().to(dtype), - ((torch.rand(1, 8, 100) * 10).to(dtype),), - flow, - ) +@parameterize_by_dtype +def test_adaptive_avgpool1d_dtype(test_runner, dtype) -> None: + # Input shape: (batch_size, channels, length) + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(1, 8, 100) * 10).to(dtype),), + ) - def test_adaptive_avgpool1d_output_size(self, flow: TestFlow) -> None: - # Test with different output sizes - self._test_op( - Model(output_size=1), - (torch.randn(1, 8, 100),), - flow, - ) - self._test_op( - Model(output_size=10), - (torch.randn(1, 8, 100),), - flow, - ) - self._test_op( - Model(output_size=50), - (torch.randn(1, 8, 100),), - flow, - ) - def test_adaptive_avgpool1d_batch_sizes(self, flow: TestFlow) -> None: - # Test with batch inputs - self._test_op( - Model(), - (torch.randn(2, 8, 100),), - flow, - ) - self._test_op( - Model(), - (torch.randn(8, 8, 100),), - flow, - ) - self._test_op( - Model(), - (torch.randn(16, 8, 100),), - flow, - ) +def test_adaptive_avgpool1d_output_size(test_runner) -> None: + # Test with different output sizes + test_runner.lower_and_run_model( + Model(output_size=1), + (torch.randn(1, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(output_size=10), + (torch.randn(1, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(output_size=50), + (torch.randn(1, 8, 100),), + ) - def test_adaptive_avgpool1d_input_sizes(self, flow: TestFlow) -> None: - # Test with different input sizes - self._test_op( - Model(), - (torch.randn(1, 4, 100),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 16, 100),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 8, 50),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 8, 200),), - flow, - ) + +def test_adaptive_avgpool1d_batch_sizes(test_runner) -> None: + # Test with batch inputs + test_runner.lower_and_run_model( + Model(), + (torch.randn(2, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(8, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(16, 8, 100),), + ) + + +def test_adaptive_avgpool1d_input_sizes(test_runner) -> None: + # Test with different input sizes + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 4, 100),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 16, 100),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 8, 50),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 8, 200),), + ) diff --git a/backends/test/suite/operators/test_adaptive_avgpool2d.py b/backends/test/suite/operators/test_adaptive_avgpool2d.py index d0a456ccd9c..a99bc80d156 100644 --- a/backends/test/suite/operators/test_adaptive_avgpool2d.py +++ b/backends/test/suite/operators/test_adaptive_avgpool2d.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -31,82 +26,70 @@ def forward(self, x): return self.adaptive_avgpool(x) -@operator_test -class AdaptiveAvgPool2d(OperatorTest): - @dtype_test - def test_adaptive_avgpool2d_dtype(self, flow: TestFlow, dtype) -> None: - # Input shape: (batch_size, channels, height, width) - self._test_op( - Model().to(dtype), - ((torch.rand(1, 8, 20, 20) * 10).to(dtype),), - flow, - ) +@parameterize_by_dtype +def test_adaptive_avgpool2d_dtype(test_runner, dtype) -> None: + # Input shape: (batch_size, channels, height, width) + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(1, 8, 20, 20) * 10).to(dtype),), + ) - def test_adaptive_avgpool2d_output_size(self, flow: TestFlow) -> None: - # Test with different output sizes - self._test_op( - Model(output_size=1), - (torch.randn(1, 8, 20, 20),), - flow, - ) - self._test_op( - Model(output_size=(1, 1)), - (torch.randn(1, 8, 20, 20),), - flow, - ) - self._test_op( - Model(output_size=(10, 10)), - (torch.randn(1, 8, 20, 20),), - flow, - ) - self._test_op( - Model(output_size=(5, 10)), - (torch.randn(1, 8, 20, 20),), - flow, - ) - def test_adaptive_avgpool2d_batch_sizes(self, flow: TestFlow) -> None: - # Test with batch inputs - self._test_op( - Model(), - (torch.randn(2, 8, 20, 20),), - flow, - ) - self._test_op( - Model(), - (torch.randn(8, 8, 20, 20),), - flow, - ) - self._test_op( - Model(), - (torch.randn(16, 8, 20, 20),), - flow, - ) +def test_adaptive_avgpool2d_output_size(test_runner) -> None: + # Test with different output sizes + test_runner.lower_and_run_model( + Model(output_size=1), + (torch.randn(1, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(output_size=(1, 1)), + (torch.randn(1, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(output_size=(10, 10)), + (torch.randn(1, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(output_size=(5, 10)), + (torch.randn(1, 8, 20, 20),), + ) - def test_adaptive_avgpool2d_input_sizes(self, flow: TestFlow) -> None: - # Test with different input sizes - self._test_op( - Model(), - (torch.randn(1, 4, 20, 20),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 16, 20, 20),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 8, 10, 10),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 8, 30, 30),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 8, 15, 25),), - flow, - ) + +def test_adaptive_avgpool2d_batch_sizes(test_runner) -> None: + # Test with batch inputs + test_runner.lower_and_run_model( + Model(), + (torch.randn(2, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(8, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(16, 8, 20, 20),), + ) + + +def test_adaptive_avgpool2d_input_sizes(test_runner) -> None: + # Test with different input sizes + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 4, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 16, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 8, 10, 10),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 8, 30, 30),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 8, 15, 25),), + ) diff --git a/backends/test/suite/operators/test_adaptive_avgpool3d.py b/backends/test/suite/operators/test_adaptive_avgpool3d.py index 658ded337f4..76a68200f47 100644 --- a/backends/test/suite/operators/test_adaptive_avgpool3d.py +++ b/backends/test/suite/operators/test_adaptive_avgpool3d.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -31,82 +26,70 @@ def forward(self, x): return self.adaptive_avgpool(x) -@operator_test -class AdaptiveAvgPool3d(OperatorTest): - @dtype_test - def test_adaptive_avgpool3d_dtype(self, flow: TestFlow, dtype) -> None: - # Input shape: (batch_size, channels, depth, height, width) - self._test_op( - Model().to(dtype), - ((torch.rand(1, 4, 8, 8, 8) * 10).to(dtype),), - flow, - ) +@parameterize_by_dtype +def test_adaptive_avgpool3d_dtype(test_runner, dtype) -> None: + # Input shape: (batch_size, channels, depth, height, width) + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(1, 4, 8, 8, 8) * 10).to(dtype),), + ) - def test_adaptive_avgpool3d_output_size(self, flow: TestFlow) -> None: - # Test with different output sizes - self._test_op( - Model(output_size=1), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(output_size=(1, 1, 1)), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(output_size=(6, 6, 6)), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(output_size=(2, 4, 6)), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - def test_adaptive_avgpool3d_batch_sizes(self, flow: TestFlow) -> None: - # Test with batch inputs - self._test_op( - Model(), - (torch.randn(2, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(), - (torch.randn(8, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(), - (torch.randn(16, 4, 8, 8, 8),), - flow, - ) +def test_adaptive_avgpool3d_output_size(test_runner) -> None: + # Test with different output sizes + test_runner.lower_and_run_model( + Model(output_size=1), + (torch.randn(1, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(output_size=(1, 1, 1)), + (torch.randn(1, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(output_size=(6, 6, 6)), + (torch.randn(1, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(output_size=(2, 4, 6)), + (torch.randn(1, 4, 8, 8, 8),), + ) - def test_adaptive_avgpool3d_input_sizes(self, flow: TestFlow) -> None: - # Test with different input sizes - self._test_op( - Model(), - (torch.randn(1, 2, 8, 8, 8),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 8, 8, 8, 8),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 4, 6, 6, 6),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 4, 10, 10, 10),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 4, 7, 9, 11),), - flow, - ) + +def test_adaptive_avgpool3d_batch_sizes(test_runner) -> None: + # Test with batch inputs + test_runner.lower_and_run_model( + Model(), + (torch.randn(2, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(8, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(16, 4, 8, 8, 8),), + ) + + +def test_adaptive_avgpool3d_input_sizes(test_runner) -> None: + # Test with different input sizes + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 2, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 8, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 4, 6, 6, 6),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 4, 10, 10, 10),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 4, 7, 9, 11),), + ) diff --git a/backends/test/suite/operators/test_adaptive_maxpool1d.py b/backends/test/suite/operators/test_adaptive_maxpool1d.py index 782bd1a5ea7..959014baa65 100644 --- a/backends/test/suite/operators/test_adaptive_maxpool1d.py +++ b/backends/test/suite/operators/test_adaptive_maxpool1d.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -33,93 +28,83 @@ def forward(self, x): return self.adaptive_maxpool(x) -@operator_test -class AdaptiveMaxPool1d(OperatorTest): - @dtype_test - def test_adaptive_maxpool1d_dtype(self, flow: TestFlow, dtype) -> None: - # Input shape: (batch_size, channels, length) - self._test_op( - Model().to(dtype), - ((torch.rand(1, 8, 100) * 10).to(dtype),), - flow, - ) - - def test_adaptive_maxpool1d_output_size(self, flow: TestFlow) -> None: - # Test with different output sizes - self._test_op( - Model(output_size=1), - (torch.randn(1, 8, 100),), - flow, - ) - self._test_op( - Model(output_size=10), - (torch.randn(1, 8, 100),), - flow, - ) - self._test_op( - Model(output_size=50), - (torch.randn(1, 8, 100),), - flow, - ) - - def test_adaptive_maxpool1d_return_indices(self, flow: TestFlow) -> None: - # Test with return_indices=True - class ModelWithIndices(torch.nn.Module): - def __init__(self): - super().__init__() - self.adaptive_maxpool = torch.nn.AdaptiveMaxPool1d( - output_size=5, - return_indices=True, - ) - - def forward(self, x): - return self.adaptive_maxpool(x) - - input_tensor = torch.randn(1, 8, 100) - - self._test_op( - ModelWithIndices(), - (input_tensor,), - flow, - ) - - def test_adaptive_maxpool1d_batch_sizes(self, flow: TestFlow) -> None: - # Test with batch inputs - self._test_op( - Model(), - (torch.randn(2, 8, 100),), - flow, - ) - self._test_op( - Model(), - (torch.randn(8, 8, 100),), - flow, - ) - self._test_op( - Model(), - (torch.randn(16, 8, 100),), - flow, - ) - - def test_adaptive_maxpool1d_input_sizes(self, flow: TestFlow) -> None: - # Test with different input sizes - self._test_op( - Model(), - (torch.randn(1, 4, 100),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 16, 100),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 8, 50),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 8, 200),), - flow, - ) +@parameterize_by_dtype +def test_adaptive_maxpool1d_dtype(test_runner, dtype) -> None: + # Input shape: (batch_size, channels, length) + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(1, 8, 100) * 10).to(dtype),), + ) + + +def test_adaptive_maxpool1d_output_size(test_runner) -> None: + # Test with different output sizes + test_runner.lower_and_run_model( + Model(output_size=1), + (torch.randn(1, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(output_size=10), + (torch.randn(1, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(output_size=50), + (torch.randn(1, 8, 100),), + ) + + +def test_adaptive_maxpool1d_return_indices(test_runner) -> None: + # Test with return_indices=True + class ModelWithIndices(torch.nn.Module): + def __init__(self): + super().__init__() + self.adaptive_maxpool = torch.nn.AdaptiveMaxPool1d( + output_size=5, + return_indices=True, + ) + + def forward(self, x): + return self.adaptive_maxpool(x) + + input_tensor = torch.randn(1, 8, 100) + + test_runner.lower_and_run_model( + ModelWithIndices(), + (input_tensor,), + ) + + +def test_adaptive_maxpool1d_batch_sizes(test_runner) -> None: + # Test with batch inputs + test_runner.lower_and_run_model( + Model(), + (torch.randn(2, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(8, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(16, 8, 100),), + ) + + +def test_adaptive_maxpool1d_input_sizes(test_runner) -> None: + # Test with different input sizes + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 4, 100),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 16, 100),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 8, 50),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 8, 200),), + ) diff --git a/backends/test/suite/operators/test_adaptive_maxpool2d.py b/backends/test/suite/operators/test_adaptive_maxpool2d.py index 3ba98ed6c86..c7ea5daf4ba 100644 --- a/backends/test/suite/operators/test_adaptive_maxpool2d.py +++ b/backends/test/suite/operators/test_adaptive_maxpool2d.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -33,103 +28,91 @@ def forward(self, x): return self.adaptive_maxpool(x) -@operator_test -class AdaptiveMaxPool2d(OperatorTest): - @dtype_test - def test_adaptive_maxpool2d_dtype(self, flow: TestFlow, dtype) -> None: - # Input shape: (batch_size, channels, height, width) - self._test_op( - Model().to(dtype), - ((torch.rand(1, 8, 20, 20) * 10).to(dtype),), - flow, - ) - - def test_adaptive_maxpool2d_output_size(self, flow: TestFlow) -> None: - # Test with different output sizes - self._test_op( - Model(output_size=1), - (torch.randn(1, 8, 20, 20),), - flow, - ) - self._test_op( - Model(output_size=(1, 1)), - (torch.randn(1, 8, 20, 20),), - flow, - ) - self._test_op( - Model(output_size=(10, 10)), - (torch.randn(1, 8, 20, 20),), - flow, - ) - self._test_op( - Model(output_size=(5, 10)), - (torch.randn(1, 8, 20, 20),), - flow, - ) - - def test_adaptive_maxpool2d_return_indices(self, flow: TestFlow) -> None: - # Test with return_indices=True - class ModelWithIndices(torch.nn.Module): - def __init__(self): - super().__init__() - self.adaptive_maxpool = torch.nn.AdaptiveMaxPool2d( - output_size=(5, 5), - return_indices=True, - ) - - def forward(self, x): - return self.adaptive_maxpool(x) - - input_tensor = torch.randn(1, 8, 20, 20) - - self._test_op( - ModelWithIndices(), - (input_tensor,), - flow, - ) - - def test_adaptive_maxpool2d_batch_sizes(self, flow: TestFlow) -> None: - # Test with batch inputs - self._test_op( - Model(), - (torch.randn(2, 8, 20, 20),), - flow, - ) - self._test_op( - Model(), - (torch.randn(8, 8, 20, 20),), - flow, - ) - self._test_op( - Model(), - (torch.randn(16, 8, 20, 20),), - flow, - ) - - def test_adaptive_maxpool2d_input_sizes(self, flow: TestFlow) -> None: - # Test with different input sizes - self._test_op( - Model(), - (torch.randn(1, 4, 20, 20),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 16, 20, 20),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 8, 10, 10),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 8, 30, 30),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 8, 15, 25),), - flow, - ) +@parameterize_by_dtype +def test_adaptive_maxpool2d_dtype(test_runner, dtype) -> None: + # Input shape: (batch_size, channels, height, width) + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(1, 8, 20, 20) * 10).to(dtype),), + ) + + +def test_adaptive_maxpool2d_output_size(test_runner) -> None: + # Test with different output sizes + test_runner.lower_and_run_model( + Model(output_size=1), + (torch.randn(1, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(output_size=(1, 1)), + (torch.randn(1, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(output_size=(10, 10)), + (torch.randn(1, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(output_size=(5, 10)), + (torch.randn(1, 8, 20, 20),), + ) + + +def test_adaptive_maxpool2d_return_indices(test_runner) -> None: + # Test with return_indices=True + class ModelWithIndices(torch.nn.Module): + def __init__(self): + super().__init__() + self.adaptive_maxpool = torch.nn.AdaptiveMaxPool2d( + output_size=(5, 5), + return_indices=True, + ) + + def forward(self, x): + return self.adaptive_maxpool(x) + + input_tensor = torch.randn(1, 8, 20, 20) + + test_runner.lower_and_run_model( + ModelWithIndices(), + (input_tensor,), + ) + + +def test_adaptive_maxpool2d_batch_sizes(test_runner) -> None: + # Test with batch inputs + test_runner.lower_and_run_model( + Model(), + (torch.randn(2, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(8, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(16, 8, 20, 20),), + ) + + +def test_adaptive_maxpool2d_input_sizes(test_runner) -> None: + # Test with different input sizes + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 4, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 16, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 8, 10, 10),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 8, 30, 30),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 8, 15, 25),), + ) diff --git a/backends/test/suite/operators/test_adaptive_maxpool3d.py b/backends/test/suite/operators/test_adaptive_maxpool3d.py index b2c507c12e1..e8d9aa9c8f2 100644 --- a/backends/test/suite/operators/test_adaptive_maxpool3d.py +++ b/backends/test/suite/operators/test_adaptive_maxpool3d.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -33,103 +28,91 @@ def forward(self, x): return self.adaptive_maxpool(x) -@operator_test -class AdaptiveMaxPool3d(OperatorTest): - @dtype_test - def test_adaptive_maxpool3d_dtype(self, flow: TestFlow, dtype) -> None: - # Input shape: (batch_size, channels, depth, height, width) - self._test_op( - Model().to(dtype), - ((torch.rand(1, 4, 8, 8, 8) * 10).to(dtype),), - flow, - ) - - def test_adaptive_maxpool3d_output_size(self, flow: TestFlow) -> None: - # Test with different output sizes - self._test_op( - Model(output_size=1), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(output_size=(1, 1, 1)), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(output_size=(6, 6, 6)), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(output_size=(2, 4, 6)), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - - def test_adaptive_maxpool3d_return_indices(self, flow: TestFlow) -> None: - # Test with return_indices=True - class ModelWithIndices(torch.nn.Module): - def __init__(self): - super().__init__() - self.adaptive_maxpool = torch.nn.AdaptiveMaxPool3d( - output_size=(4, 4, 4), - return_indices=True, - ) - - def forward(self, x): - return self.adaptive_maxpool(x) - - input_tensor = torch.randn(1, 4, 8, 8, 8) - - self._test_op( - ModelWithIndices(), - (input_tensor,), - flow, - ) - - def test_adaptive_maxpool3d_batch_sizes(self, flow: TestFlow) -> None: - # Test with batch inputs - self._test_op( - Model(), - (torch.randn(2, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(), - (torch.randn(8, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(), - (torch.randn(16, 4, 8, 8, 8),), - flow, - ) - - def test_adaptive_maxpool3d_input_sizes(self, flow: TestFlow) -> None: - # Test with different input sizes - self._test_op( - Model(), - (torch.randn(1, 2, 8, 8, 8),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 8, 8, 8, 8),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 4, 6, 6, 6),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 4, 10, 10, 10),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 4, 7, 9, 11),), - flow, - ) +@parameterize_by_dtype +def test_adaptive_maxpool3d_dtype(test_runner, dtype) -> None: + # Input shape: (batch_size, channels, depth, height, width) + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(1, 4, 8, 8, 8) * 10).to(dtype),), + ) + + +def test_adaptive_maxpool3d_output_size(test_runner) -> None: + # Test with different output sizes + test_runner.lower_and_run_model( + Model(output_size=1), + (torch.randn(1, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(output_size=(1, 1, 1)), + (torch.randn(1, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(output_size=(6, 6, 6)), + (torch.randn(1, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(output_size=(2, 4, 6)), + (torch.randn(1, 4, 8, 8, 8),), + ) + + +def test_adaptive_maxpool3d_return_indices(test_runner) -> None: + # Test with return_indices=True + class ModelWithIndices(torch.nn.Module): + def __init__(self): + super().__init__() + self.adaptive_maxpool = torch.nn.AdaptiveMaxPool3d( + output_size=(4, 4, 4), + return_indices=True, + ) + + def forward(self, x): + return self.adaptive_maxpool(x) + + input_tensor = torch.randn(1, 4, 8, 8, 8) + + test_runner.lower_and_run_model( + ModelWithIndices(), + (input_tensor,), + ) + + +def test_adaptive_maxpool3d_batch_sizes(test_runner) -> None: + # Test with batch inputs + test_runner.lower_and_run_model( + Model(), + (torch.randn(2, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(8, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(16, 4, 8, 8, 8),), + ) + + +def test_adaptive_maxpool3d_input_sizes(test_runner) -> None: + # Test with different input sizes + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 2, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 8, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 4, 6, 6, 6),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 4, 10, 10, 10),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 4, 7, 9, 11),), + ) diff --git a/backends/test/suite/operators/test_add.py b/backends/test/suite/operators/test_add.py index 15a8349cb97..850e6f5132c 100644 --- a/backends/test/suite/operators/test_add.py +++ b/backends/test/suite/operators/test_add.py @@ -25,9 +25,7 @@ def forward(self, x, y): return torch.add(x, y, alpha=self.alpha) -@pytest.mark.parametrize( - "dtype", [torch.float16, torch.float32], ids=lambda s: str(s)[6:] -) +@pytest.mark.parametrize("dtype", [torch.float32], ids=lambda s: str(s)[6:]) def test_add_dtype(test_runner, dtype) -> None: test_runner.lower_and_run_model( Model(), diff --git a/backends/test/suite/operators/test_amax.py b/backends/test/suite/operators/test_amax.py index 04e0b17ae0a..2cc173f7a01 100644 --- a/backends/test/suite/operators/test_amax.py +++ b/backends/test/suite/operators/test_amax.py @@ -10,13 +10,8 @@ from typing import List, Optional, Tuple, Union import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class AmaxModel(torch.nn.Module): @@ -33,225 +28,194 @@ def forward(self, x): return torch.amax(x, dim=self.dim, keepdim=self.keepdim) -@operator_test -class Amax(OperatorTest): - @dtype_test - def test_amax_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - AmaxModel().to(dtype), - (torch.rand(10, 10).to(dtype),), - flow, - ) - - def test_amax_dim(self, flow: TestFlow) -> None: - self._test_op( - AmaxModel(dim=0), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - AmaxModel(dim=1), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - AmaxModel(dim=0), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AmaxModel(dim=1), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AmaxModel(dim=2), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AmaxModel(dim=1), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - AmaxModel(dim=-1), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AmaxModel(dim=-2), - (torch.randn(3, 4, 5),), - flow, - ) - - def test_amax_multi_dim(self, flow: TestFlow) -> None: - self._test_op( - AmaxModel(dim=(0, 1)), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AmaxModel(dim=(0, 2)), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AmaxModel(dim=(1, 2)), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AmaxModel(dim=(1, 3)), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - AmaxModel(dim=(0, 2)), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - AmaxModel(dim=(-1, -3)), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - AmaxModel(dim=(0, 1, 2, 3)), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - def test_amax_keepdim(self, flow: TestFlow) -> None: - self._test_op( - AmaxModel(dim=0, keepdim=True), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - AmaxModel(dim=1, keepdim=True), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - AmaxModel(dim=1, keepdim=True), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AmaxModel(dim=2, keepdim=True), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - AmaxModel(dim=(1, 2), keepdim=True), - (torch.randn(3, 4, 5),), - flow, - ) - - def test_amax_shapes(self, flow: TestFlow) -> None: - self._test_op( - AmaxModel(), - (torch.randn(20),), - flow, - ) - self._test_op( - AmaxModel(dim=0), - (torch.randn(20),), - flow, - ) - - self._test_op( - AmaxModel(), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - AmaxModel(), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AmaxModel(), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - AmaxModel(), - (torch.randn(2, 2, 3, 4, 5),), - flow, - ) - - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_amax_edge_cases(self, flow: TestFlow) -> None: - x = torch.tensor([[1.0, float("inf"), 3.0], [4.0, 5.0, float("inf")]]) - self._test_op( - AmaxModel(), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - AmaxModel(dim=0), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - AmaxModel(dim=1), - (x,), - flow, - generate_random_test_inputs=False, - ) - - x = torch.tensor([[1.0, float("nan"), 3.0], [4.0, 5.0, float("nan")]]) - self._test_op( - AmaxModel(), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - AmaxModel(dim=0), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - AmaxModel(dim=1), - (x,), - flow, - generate_random_test_inputs=False, - ) - - def test_amax_scalar(self, flow: TestFlow) -> None: - self._test_op( - AmaxModel(), - (torch.tensor([5.0]),), - flow, - ) - self._test_op( - AmaxModel(dim=0), - (torch.tensor([5.0]),), - flow, - ) +@parameterize_by_dtype +def test_amax_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + AmaxModel().to(dtype), + (torch.rand(10, 10).to(dtype),), + ) + + +def test_amax_dim(test_runner) -> None: + test_runner.lower_and_run_model( + AmaxModel(dim=0), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + AmaxModel(dim=1), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + AmaxModel(dim=0), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AmaxModel(dim=1), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AmaxModel(dim=2), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AmaxModel(dim=1), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AmaxModel(dim=-1), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AmaxModel(dim=-2), + (torch.randn(3, 4, 5),), + ) + + +def test_amax_multi_dim(test_runner) -> None: + test_runner.lower_and_run_model( + AmaxModel(dim=(0, 1)), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AmaxModel(dim=(0, 2)), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AmaxModel(dim=(1, 2)), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AmaxModel(dim=(1, 3)), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AmaxModel(dim=(0, 2)), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AmaxModel(dim=(-1, -3)), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AmaxModel(dim=(0, 1, 2, 3)), + (torch.randn(2, 3, 4, 5),), + ) + + +def test_amax_keepdim(test_runner) -> None: + test_runner.lower_and_run_model( + AmaxModel(dim=0, keepdim=True), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + AmaxModel(dim=1, keepdim=True), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + AmaxModel(dim=1, keepdim=True), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AmaxModel(dim=2, keepdim=True), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AmaxModel(dim=(1, 2), keepdim=True), + (torch.randn(3, 4, 5),), + ) + + +def test_amax_shapes(test_runner) -> None: + test_runner.lower_and_run_model( + AmaxModel(), + (torch.randn(20),), + ) + test_runner.lower_and_run_model( + AmaxModel(dim=0), + (torch.randn(20),), + ) + + test_runner.lower_and_run_model( + AmaxModel(), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + AmaxModel(), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AmaxModel(), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AmaxModel(), + (torch.randn(2, 2, 3, 4, 5),), + ) + + +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_amax_edge_cases(test_runner) -> None: + x = torch.tensor([[1.0, float("inf"), 3.0], [4.0, 5.0, float("inf")]]) + test_runner.lower_and_run_model( + AmaxModel(), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + AmaxModel(dim=0), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + AmaxModel(dim=1), + (x,), + generate_random_test_inputs=False, + ) + + x = torch.tensor([[1.0, float("nan"), 3.0], [4.0, 5.0, float("nan")]]) + test_runner.lower_and_run_model( + AmaxModel(), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + AmaxModel(dim=0), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + AmaxModel(dim=1), + (x,), + generate_random_test_inputs=False, + ) + + +def test_amax_scalar(test_runner) -> None: + test_runner.lower_and_run_model( + AmaxModel(), + (torch.tensor([5.0]),), + ) + test_runner.lower_and_run_model( + AmaxModel(dim=0), + (torch.tensor([5.0]),), + ) diff --git a/backends/test/suite/operators/test_amin.py b/backends/test/suite/operators/test_amin.py index 7aa5c6b7a34..6e9845c1492 100644 --- a/backends/test/suite/operators/test_amin.py +++ b/backends/test/suite/operators/test_amin.py @@ -10,13 +10,8 @@ from typing import List, Optional, Tuple, Union import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class AminModel(torch.nn.Module): @@ -35,225 +30,194 @@ def forward(self, x): return torch.amin(x, dim=self.dim, keepdim=self.keepdim) -@operator_test -class Amin(OperatorTest): - @dtype_test - def test_amin_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - AminModel().to(dtype), - (torch.rand(10, 10).to(dtype),), - flow, - ) - - def test_amin_dim(self, flow: TestFlow) -> None: - self._test_op( - AminModel(dim=0), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - AminModel(dim=1), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - AminModel(dim=0), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AminModel(dim=1), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AminModel(dim=2), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AminModel(dim=1), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - AminModel(dim=-1), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AminModel(dim=-2), - (torch.randn(3, 4, 5),), - flow, - ) - - def test_amin_multi_dim(self, flow: TestFlow) -> None: - self._test_op( - AminModel(dim=(0, 1)), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AminModel(dim=(0, 2)), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AminModel(dim=(1, 2)), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AminModel(dim=(1, 3)), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - AminModel(dim=(0, 2)), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - AminModel(dim=(-1, -3)), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - AminModel(dim=(0, 1, 2, 3)), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - def test_amin_keepdim(self, flow: TestFlow) -> None: - self._test_op( - AminModel(dim=0, keepdim=True), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - AminModel(dim=1, keepdim=True), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - AminModel(dim=1, keepdim=True), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AminModel(dim=2, keepdim=True), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - AminModel(dim=(1, 2), keepdim=True), - (torch.randn(3, 4, 5),), - flow, - ) - - def test_amin_shapes(self, flow: TestFlow) -> None: - self._test_op( - AminModel(), - (torch.randn(20),), - flow, - ) - self._test_op( - AminModel(dim=0), - (torch.randn(20),), - flow, - ) - - self._test_op( - AminModel(), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - AminModel(), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - AminModel(), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - AminModel(), - (torch.randn(2, 2, 3, 4, 5),), - flow, - ) - - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_amin_edge_cases(self, flow: TestFlow) -> None: - x = torch.tensor([[1.0, float("-inf"), 3.0], [4.0, 5.0, float("-inf")]]) - self._test_op( - AminModel(), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - AminModel(dim=0), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - AminModel(dim=1), - (x,), - flow, - generate_random_test_inputs=False, - ) - - x = torch.tensor([[1.0, float("nan"), 3.0], [4.0, 5.0, float("nan")]]) - self._test_op( - AminModel(), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - AminModel(dim=0), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - AminModel(dim=1), - (x,), - flow, - generate_random_test_inputs=False, - ) - - def test_amin_scalar(self, flow: TestFlow) -> None: - self._test_op( - AminModel(), - (torch.tensor([5.0]),), - flow, - ) - self._test_op( - AminModel(dim=0), - (torch.tensor([5.0]),), - flow, - ) +@parameterize_by_dtype +def test_amin_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + AminModel().to(dtype), + (torch.rand(10, 10).to(dtype),), + ) + + +def test_amin_dim(test_runner) -> None: + test_runner.lower_and_run_model( + AminModel(dim=0), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + AminModel(dim=1), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + AminModel(dim=0), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AminModel(dim=1), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AminModel(dim=2), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AminModel(dim=1), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AminModel(dim=-1), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AminModel(dim=-2), + (torch.randn(3, 4, 5),), + ) + + +def test_amin_multi_dim(test_runner) -> None: + test_runner.lower_and_run_model( + AminModel(dim=(0, 1)), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AminModel(dim=(0, 2)), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AminModel(dim=(1, 2)), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AminModel(dim=(1, 3)), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AminModel(dim=(0, 2)), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AminModel(dim=(-1, -3)), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AminModel(dim=(0, 1, 2, 3)), + (torch.randn(2, 3, 4, 5),), + ) + + +def test_amin_keepdim(test_runner) -> None: + test_runner.lower_and_run_model( + AminModel(dim=0, keepdim=True), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + AminModel(dim=1, keepdim=True), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + AminModel(dim=1, keepdim=True), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AminModel(dim=2, keepdim=True), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AminModel(dim=(1, 2), keepdim=True), + (torch.randn(3, 4, 5),), + ) + + +def test_amin_shapes(test_runner) -> None: + test_runner.lower_and_run_model( + AminModel(), + (torch.randn(20),), + ) + test_runner.lower_and_run_model( + AminModel(dim=0), + (torch.randn(20),), + ) + + test_runner.lower_and_run_model( + AminModel(), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + AminModel(), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AminModel(), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + AminModel(), + (torch.randn(2, 2, 3, 4, 5),), + ) + + +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_amin_edge_cases(test_runner) -> None: + x = torch.tensor([[1.0, float("-inf"), 3.0], [4.0, 5.0, float("-inf")]]) + test_runner.lower_and_run_model( + AminModel(), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + AminModel(dim=0), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + AminModel(dim=1), + (x,), + generate_random_test_inputs=False, + ) + + x = torch.tensor([[1.0, float("nan"), 3.0], [4.0, 5.0, float("nan")]]) + test_runner.lower_and_run_model( + AminModel(), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + AminModel(dim=0), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + AminModel(dim=1), + (x,), + generate_random_test_inputs=False, + ) + + +def test_amin_scalar(test_runner) -> None: + test_runner.lower_and_run_model( + AminModel(), + (torch.tensor([5.0]),), + ) + test_runner.lower_and_run_model( + AminModel(dim=0), + (torch.tensor([5.0]),), + ) diff --git a/backends/test/suite/operators/test_argmax.py b/backends/test/suite/operators/test_argmax.py index ca3ae9e1805..b1c481fd4a0 100644 --- a/backends/test/suite/operators/test_argmax.py +++ b/backends/test/suite/operators/test_argmax.py @@ -10,13 +10,8 @@ from typing import Optional import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class ArgmaxModel(torch.nn.Module): @@ -29,173 +24,150 @@ def forward(self, x): return torch.argmax(x, dim=self.dim, keepdim=self.keepdim) -@operator_test -class Argmax(OperatorTest): - @dtype_test - def test_argmax_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - ArgmaxModel().to(dtype), - (torch.rand(10, 10).to(dtype),), - flow, - ) - - def test_argmax_dim(self, flow: TestFlow) -> None: - self._test_op( - ArgmaxModel(dim=0), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - ArgmaxModel(dim=1), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - ArgmaxModel(dim=0), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - ArgmaxModel(dim=1), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - ArgmaxModel(dim=2), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - ArgmaxModel(dim=1), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - ArgmaxModel(dim=-1), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - ArgmaxModel(dim=-2), - (torch.randn(3, 4, 5),), - flow, - ) - - def test_argmax_keepdim(self, flow: TestFlow) -> None: - self._test_op( - ArgmaxModel(dim=0, keepdim=True), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - ArgmaxModel(dim=1, keepdim=True), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - ArgmaxModel(dim=1, keepdim=True), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - ArgmaxModel(dim=2, keepdim=True), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - def test_argmax_shapes(self, flow: TestFlow) -> None: - self._test_op( - ArgmaxModel(), - (torch.randn(20),), - flow, - ) - - self._test_op( - ArgmaxModel(), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - ArgmaxModel(), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - ArgmaxModel(), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - ArgmaxModel(), - (torch.randn(2, 2, 3, 4, 5),), - flow, - ) - - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_argmax_edge_cases(self, flow: TestFlow) -> None: - x = torch.tensor([[1.0, float("inf"), 3.0], [4.0, 5.0, float("inf")]]) - self._test_op( - ArgmaxModel(), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - ArgmaxModel(dim=0), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - ArgmaxModel(dim=1), - (x,), - flow, - generate_random_test_inputs=False, - ) - - x = torch.tensor([[1.0, float("nan"), 3.0], [4.0, 5.0, float("nan")]]) - self._test_op( - ArgmaxModel(), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - ArgmaxModel(dim=0), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - ArgmaxModel(dim=1), - (x,), - flow, - generate_random_test_inputs=False, - ) - - x = torch.tensor([5.0]) - self._test_op( - ArgmaxModel(), - (x,), - flow, - ) - - def test_argmax_scalar(self, flow: TestFlow) -> None: - self._test_op( - ArgmaxModel(), - (torch.tensor([5.0]),), - flow, - ) +@parameterize_by_dtype +def test_argmax_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + ArgmaxModel().to(dtype), + (torch.rand(10, 10).to(dtype),), + ) + + +def test_argmax_dim(test_runner) -> None: + test_runner.lower_and_run_model( + ArgmaxModel(dim=0), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + ArgmaxModel(dim=1), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + ArgmaxModel(dim=0), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + ArgmaxModel(dim=1), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + ArgmaxModel(dim=2), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + ArgmaxModel(dim=1), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + ArgmaxModel(dim=-1), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + ArgmaxModel(dim=-2), + (torch.randn(3, 4, 5),), + ) + + +def test_argmax_keepdim(test_runner) -> None: + test_runner.lower_and_run_model( + ArgmaxModel(dim=0, keepdim=True), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + ArgmaxModel(dim=1, keepdim=True), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + ArgmaxModel(dim=1, keepdim=True), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + ArgmaxModel(dim=2, keepdim=True), + (torch.randn(2, 3, 4, 5),), + ) + + +def test_argmax_shapes(test_runner) -> None: + test_runner.lower_and_run_model( + ArgmaxModel(), + (torch.randn(20),), + ) + + test_runner.lower_and_run_model( + ArgmaxModel(), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + ArgmaxModel(), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + ArgmaxModel(), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + ArgmaxModel(), + (torch.randn(2, 2, 3, 4, 5),), + ) + + +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_argmax_edge_cases(test_runner) -> None: + x = torch.tensor([[1.0, float("inf"), 3.0], [4.0, 5.0, float("inf")]]) + test_runner.lower_and_run_model( + ArgmaxModel(), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + ArgmaxModel(dim=0), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + ArgmaxModel(dim=1), + (x,), + generate_random_test_inputs=False, + ) + + x = torch.tensor([[1.0, float("nan"), 3.0], [4.0, 5.0, float("nan")]]) + test_runner.lower_and_run_model( + ArgmaxModel(), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + ArgmaxModel(dim=0), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + ArgmaxModel(dim=1), + (x,), + generate_random_test_inputs=False, + ) + + x = torch.tensor([5.0]) + test_runner.lower_and_run_model( + ArgmaxModel(), + (x,), + ) + + +def test_argmax_scalar(test_runner) -> None: + test_runner.lower_and_run_model( + ArgmaxModel(), + (torch.tensor([5.0]),), + ) diff --git a/backends/test/suite/operators/test_argmin.py b/backends/test/suite/operators/test_argmin.py index aaf4e9bd167..3e972f9d107 100644 --- a/backends/test/suite/operators/test_argmin.py +++ b/backends/test/suite/operators/test_argmin.py @@ -10,13 +10,8 @@ from typing import Optional import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class ArgminModel(torch.nn.Module): @@ -29,173 +24,150 @@ def forward(self, x): return torch.argmin(x, dim=self.dim, keepdim=self.keepdim) -@operator_test -class Argmin(OperatorTest): - @dtype_test - def test_argmin_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - ArgminModel().to(dtype), - (torch.rand(10, 10).to(dtype),), - flow, - ) - - def test_argmin_dim(self, flow: TestFlow) -> None: - self._test_op( - ArgminModel(dim=0), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - ArgminModel(dim=1), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - ArgminModel(dim=0), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - ArgminModel(dim=1), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - ArgminModel(dim=2), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - ArgminModel(dim=1), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - ArgminModel(dim=-1), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - ArgminModel(dim=-2), - (torch.randn(3, 4, 5),), - flow, - ) - - def test_argmin_keepdim(self, flow: TestFlow) -> None: - self._test_op( - ArgminModel(dim=0, keepdim=True), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - ArgminModel(dim=1, keepdim=True), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - ArgminModel(dim=1, keepdim=True), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - ArgminModel(dim=2, keepdim=True), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - def test_argmin_shapes(self, flow: TestFlow) -> None: - self._test_op( - ArgminModel(), - (torch.randn(20),), - flow, - ) - - self._test_op( - ArgminModel(), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - ArgminModel(), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - ArgminModel(), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - ArgminModel(), - (torch.randn(2, 2, 3, 4, 5),), - flow, - ) - - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_argmin_edge_cases(self, flow: TestFlow) -> None: - x = torch.tensor([[1.0, float("-inf"), 3.0], [4.0, 5.0, float("-inf")]]) - self._test_op( - ArgminModel(), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - ArgminModel(dim=0), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - ArgminModel(dim=1), - (x,), - flow, - generate_random_test_inputs=False, - ) - - x = torch.tensor([[1.0, float("nan"), 3.0], [4.0, 5.0, float("nan")]]) - self._test_op( - ArgminModel(), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - ArgminModel(dim=0), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - ArgminModel(dim=1), - (x,), - flow, - generate_random_test_inputs=False, - ) - - x = torch.tensor([5.0]) - self._test_op( - ArgminModel(), - (x,), - flow, - ) - - def test_argmin_scalar(self, flow: TestFlow) -> None: - self._test_op( - ArgminModel(), - (torch.tensor([5.0]),), - flow, - ) +@parameterize_by_dtype +def test_argmin_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + ArgminModel().to(dtype), + (torch.rand(10, 10).to(dtype),), + ) + + +def test_argmin_dim(test_runner) -> None: + test_runner.lower_and_run_model( + ArgminModel(dim=0), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + ArgminModel(dim=1), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + ArgminModel(dim=0), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + ArgminModel(dim=1), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + ArgminModel(dim=2), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + ArgminModel(dim=1), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + ArgminModel(dim=-1), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + ArgminModel(dim=-2), + (torch.randn(3, 4, 5),), + ) + + +def test_argmin_keepdim(test_runner) -> None: + test_runner.lower_and_run_model( + ArgminModel(dim=0, keepdim=True), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + ArgminModel(dim=1, keepdim=True), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + ArgminModel(dim=1, keepdim=True), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + ArgminModel(dim=2, keepdim=True), + (torch.randn(2, 3, 4, 5),), + ) + + +def test_argmin_shapes(test_runner) -> None: + test_runner.lower_and_run_model( + ArgminModel(), + (torch.randn(20),), + ) + + test_runner.lower_and_run_model( + ArgminModel(), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + ArgminModel(), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + ArgminModel(), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + ArgminModel(), + (torch.randn(2, 2, 3, 4, 5),), + ) + + +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_argmin_edge_cases(test_runner) -> None: + x = torch.tensor([[1.0, float("-inf"), 3.0], [4.0, 5.0, float("-inf")]]) + test_runner.lower_and_run_model( + ArgminModel(), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + ArgminModel(dim=0), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + ArgminModel(dim=1), + (x,), + generate_random_test_inputs=False, + ) + + x = torch.tensor([[1.0, float("nan"), 3.0], [4.0, 5.0, float("nan")]]) + test_runner.lower_and_run_model( + ArgminModel(), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + ArgminModel(dim=0), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + ArgminModel(dim=1), + (x,), + generate_random_test_inputs=False, + ) + + x = torch.tensor([5.0]) + test_runner.lower_and_run_model( + ArgminModel(), + (x,), + ) + + +def test_argmin_scalar(test_runner) -> None: + test_runner.lower_and_run_model( + ArgminModel(), + (torch.tensor([5.0]),), + ) diff --git a/backends/test/suite/operators/test_avgpool1d.py b/backends/test/suite/operators/test_avgpool1d.py index 0b2d001de01..9fdf9d2f08c 100644 --- a/backends/test/suite/operators/test_avgpool1d.py +++ b/backends/test/suite/operators/test_avgpool1d.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -39,117 +34,106 @@ def forward(self, x): return self.avgpool(x) -@operator_test -class AvgPool1d(OperatorTest): - @dtype_test - def test_avgpool1d_dtype(self, flow: TestFlow, dtype) -> None: - # Input shape: (batch_size, channels, length) - self._test_op( - Model().to(dtype), - ((torch.rand(1, 8, 100) * 10).to(dtype),), - flow, - ) - - def test_avgpool1d_kernel_size(self, flow: TestFlow) -> None: - # Test with different kernel sizes - self._test_op( - Model(kernel_size=1), - (torch.randn(1, 8, 100),), - flow, - ) - self._test_op( - Model(kernel_size=5), - (torch.randn(1, 8, 100),), - flow, - ) - - def test_avgpool1d_stride(self, flow: TestFlow) -> None: - # Test with different stride values - self._test_op( - Model(stride=2), - (torch.randn(1, 8, 100),), - flow, - ) - self._test_op( - Model(stride=3), - (torch.randn(1, 8, 100),), - flow, - ) - - def test_avgpool1d_padding(self, flow: TestFlow) -> None: - # Test with different padding values - self._test_op( - Model(padding=1), - (torch.randn(1, 8, 100),), - flow, - ) - self._test_op( - Model(padding=2), - (torch.randn(1, 8, 100),), - flow, - ) - - def test_avgpool1d_ceil_mode(self, flow: TestFlow) -> None: - # Test with ceil_mode=True - self._test_op( - Model(ceil_mode=True), - (torch.randn(1, 8, 100),), - flow, - ) - - def test_avgpool1d_count_include_pad(self, flow: TestFlow) -> None: - # Test with count_include_pad=False - self._test_op( - Model(padding=1, count_include_pad=False), - (torch.randn(1, 8, 100),), - flow, - ) - - def test_avgpool1d_batch_sizes(self, flow: TestFlow) -> None: - # Test with batch inputs - self._test_op( - Model(), - (torch.randn(2, 8, 100),), - flow, - ) - self._test_op( - Model(), - (torch.randn(8, 8, 100),), - flow, - ) - self._test_op( - Model(), - (torch.randn(16, 8, 100),), - flow, - ) - - def test_avgpool1d_input_sizes(self, flow: TestFlow) -> None: - # Test with different input sizes - self._test_op( - Model(), - (torch.randn(1, 4, 100),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 16, 100),), - flow, - ) - - def test_avgpool1d_combinations(self, flow: TestFlow) -> None: - # Test with combinations of parameters - self._test_op( - Model(kernel_size=2, stride=2, padding=1), - (torch.randn(1, 8, 100),), - flow, - ) - self._test_op( - Model(kernel_size=3, stride=2, padding=1, ceil_mode=True), - (torch.randn(1, 8, 100),), - flow, - ) - self._test_op( - Model(kernel_size=2, stride=2, padding=1, count_include_pad=False), - (torch.randn(1, 8, 100),), - flow, - ) +@parameterize_by_dtype +def test_avgpool1d_dtype(test_runner, dtype) -> None: + # Input shape: (batch_size, channels, length) + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(1, 8, 100) * 10).to(dtype),), + ) + + +def test_avgpool1d_kernel_size(test_runner) -> None: + # Test with different kernel sizes + test_runner.lower_and_run_model( + Model(kernel_size=1), + (torch.randn(1, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=5), + (torch.randn(1, 8, 100),), + ) + + +def test_avgpool1d_stride(test_runner) -> None: + # Test with different stride values + test_runner.lower_and_run_model( + Model(stride=2), + (torch.randn(1, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(stride=3), + (torch.randn(1, 8, 100),), + ) + + +def test_avgpool1d_padding(test_runner) -> None: + # Test with different padding values + test_runner.lower_and_run_model( + Model(padding=1), + (torch.randn(1, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(padding=2), + (torch.randn(1, 8, 100),), + ) + + +def test_avgpool1d_ceil_mode(test_runner) -> None: + # Test with ceil_mode=True + test_runner.lower_and_run_model( + Model(ceil_mode=True), + (torch.randn(1, 8, 100),), + ) + + +def test_avgpool1d_count_include_pad(test_runner) -> None: + # Test with count_include_pad=False + test_runner.lower_and_run_model( + Model(padding=1, count_include_pad=False), + (torch.randn(1, 8, 100),), + ) + + +def test_avgpool1d_batch_sizes(test_runner) -> None: + # Test with batch inputs + test_runner.lower_and_run_model( + Model(), + (torch.randn(2, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(8, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(16, 8, 100),), + ) + + +def test_avgpool1d_input_sizes(test_runner) -> None: + # Test with different input sizes + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 4, 100),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 16, 100),), + ) + + +def test_avgpool1d_combinations(test_runner) -> None: + # Test with combinations of parameters + test_runner.lower_and_run_model( + Model(kernel_size=2, stride=2, padding=1), + (torch.randn(1, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=3, stride=2, padding=1, ceil_mode=True), + (torch.randn(1, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=2, stride=2, padding=1, count_include_pad=False), + (torch.randn(1, 8, 100),), + ) diff --git a/backends/test/suite/operators/test_avgpool2d.py b/backends/test/suite/operators/test_avgpool2d.py index 97bcb00372a..f7f3cce7394 100644 --- a/backends/test/suite/operators/test_avgpool2d.py +++ b/backends/test/suite/operators/test_avgpool2d.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -42,127 +37,115 @@ def forward(self, x): return self.avgpool(x) -@operator_test -class AvgPool2d(OperatorTest): - @dtype_test - def test_avgpool2d_dtype(self, flow: TestFlow, dtype) -> None: - # Input shape: (batch_size, channels, height, width) - self._test_op( - Model().to(dtype), - ((torch.rand(1, 8, 20, 20) * 10).to(dtype),), - flow, - ) - - def test_avgpool2d_kernel_size(self, flow: TestFlow) -> None: - # Test with different kernel sizes - self._test_op( - Model(kernel_size=1), - (torch.randn(1, 8, 20, 20),), - flow, - ) - self._test_op( - Model(kernel_size=5), - (torch.randn(1, 8, 20, 20),), - flow, - ) - self._test_op( - Model(kernel_size=(3, 2)), - (torch.randn(1, 8, 20, 20),), - flow, - ) - - def test_avgpool2d_stride(self, flow: TestFlow) -> None: - # Test with different stride values - self._test_op( - Model(stride=2), - (torch.randn(1, 8, 20, 20),), - flow, - ) - self._test_op( - Model(stride=(2, 1)), - (torch.randn(1, 8, 20, 20),), - flow, - ) - - def test_avgpool2d_padding(self, flow: TestFlow) -> None: - # Test with different padding values - self._test_op( - Model(padding=1), - (torch.randn(1, 8, 20, 20),), - flow, - ) - self._test_op( - Model(padding=(1, 2)), - (torch.randn(1, 8, 20, 20),), - flow, - ) - - def test_avgpool2d_ceil_mode(self, flow: TestFlow) -> None: - # Test with ceil_mode=True - self._test_op( - Model(ceil_mode=True), - (torch.randn(1, 8, 20, 20),), - flow, - ) - - def test_avgpool2d_count_include_pad(self, flow: TestFlow) -> None: - # Test with count_include_pad=False - self._test_op( - Model(padding=1, count_include_pad=False), - (torch.randn(1, 8, 20, 20),), - flow, - ) - - def test_avgpool2d_batch_sizes(self, flow: TestFlow) -> None: - # Test with batch inputs - self._test_op( - Model(), - (torch.randn(2, 8, 20, 20),), - flow, - ) - self._test_op( - Model(), - (torch.randn(8, 8, 20, 20),), - flow, - ) - self._test_op( - Model(), - (torch.randn(16, 8, 20, 20),), - flow, - ) - - def test_avgpool2d_input_sizes(self, flow: TestFlow) -> None: - # Test with different input sizes - self._test_op( - Model(), - (torch.randn(1, 4, 20, 20),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 16, 20, 20),), - flow, - ) - - def test_avgpool2d_combinations(self, flow: TestFlow) -> None: - # Test with combinations of parameters - self._test_op( - Model(kernel_size=2, stride=2, padding=1), - (torch.randn(1, 8, 20, 20),), - flow, - ) - self._test_op( - Model(kernel_size=3, stride=2, padding=1, ceil_mode=True), - (torch.randn(1, 8, 21, 21),), - flow, - ) - self._test_op( - Model( - kernel_size=(2, 3), - stride=(2, 1), - padding=(1, 0), - count_include_pad=False, - ), - (torch.randn(1, 8, 20, 20),), - flow, - ) +@parameterize_by_dtype +def test_avgpool2d_dtype(test_runner, dtype) -> None: + # Input shape: (batch_size, channels, height, width) + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(1, 8, 20, 20) * 10).to(dtype),), + ) + + +def test_avgpool2d_kernel_size(test_runner) -> None: + # Test with different kernel sizes + test_runner.lower_and_run_model( + Model(kernel_size=1), + (torch.randn(1, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=5), + (torch.randn(1, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=(3, 2)), + (torch.randn(1, 8, 20, 20),), + ) + + +def test_avgpool2d_stride(test_runner) -> None: + # Test with different stride values + test_runner.lower_and_run_model( + Model(stride=2), + (torch.randn(1, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(stride=(2, 1)), + (torch.randn(1, 8, 20, 20),), + ) + + +def test_avgpool2d_padding(test_runner) -> None: + # Test with different padding values + test_runner.lower_and_run_model( + Model(padding=1), + (torch.randn(1, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(padding=(1, 2)), + (torch.randn(1, 8, 20, 20),), + ) + + +def test_avgpool2d_ceil_mode(test_runner) -> None: + # Test with ceil_mode=True + test_runner.lower_and_run_model( + Model(ceil_mode=True), + (torch.randn(1, 8, 20, 20),), + ) + + +def test_avgpool2d_count_include_pad(test_runner) -> None: + # Test with count_include_pad=False + test_runner.lower_and_run_model( + Model(padding=1, count_include_pad=False), + (torch.randn(1, 8, 20, 20),), + ) + + +def test_avgpool2d_batch_sizes(test_runner) -> None: + # Test with batch inputs + test_runner.lower_and_run_model( + Model(), + (torch.randn(2, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(8, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(16, 8, 20, 20),), + ) + + +def test_avgpool2d_input_sizes(test_runner) -> None: + # Test with different input sizes + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 4, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 16, 20, 20),), + ) + + +def test_avgpool2d_combinations(test_runner) -> None: + # Test with combinations of parameters + test_runner.lower_and_run_model( + Model(kernel_size=2, stride=2, padding=1), + (torch.randn(1, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=3, stride=2, padding=1, ceil_mode=True), + (torch.randn(1, 8, 21, 21),), + ) + test_runner.lower_and_run_model( + Model( + kernel_size=(2, 3), + stride=(2, 1), + padding=(1, 0), + count_include_pad=False, + ), + (torch.randn(1, 8, 20, 20),), + ) diff --git a/backends/test/suite/operators/test_avgpool3d.py b/backends/test/suite/operators/test_avgpool3d.py index 9e9b05907bc..7539986a3f2 100644 --- a/backends/test/suite/operators/test_avgpool3d.py +++ b/backends/test/suite/operators/test_avgpool3d.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -42,122 +37,111 @@ def forward(self, x): return self.avgpool(x) -@operator_test -class AvgPool3d(OperatorTest): - @dtype_test - def test_avgpool3d_dtype(self, flow: TestFlow, dtype) -> None: - # Input shape: (batch_size, channels, depth, height, width) - self._test_op( - Model().to(dtype), - ((torch.rand(1, 4, 8, 8, 8) * 10).to(dtype),), - flow, - ) - - def test_avgpool3d_kernel_size(self, flow: TestFlow) -> None: - # Test with different kernel sizes - self._test_op( - Model(kernel_size=1), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(kernel_size=(1, 2, 2)), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - - def test_avgpool3d_stride(self, flow: TestFlow) -> None: - # Test with different stride values - self._test_op( - Model(stride=2), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(stride=(1, 2, 2)), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - - def test_avgpool3d_padding(self, flow: TestFlow) -> None: - # Test with different padding values - self._test_op( - Model(padding=1), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(padding=(0, 1, 1)), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - - def test_avgpool3d_ceil_mode(self, flow: TestFlow) -> None: - # Test with ceil_mode=True - self._test_op( - Model(ceil_mode=True), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - - def test_avgpool3d_count_include_pad(self, flow: TestFlow) -> None: - # Test with count_include_pad=False - self._test_op( - Model(padding=1, count_include_pad=False), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - - def test_avgpool3d_batch_sizes(self, flow: TestFlow) -> None: - # Test with batch inputs - self._test_op( - Model(), - (torch.randn(2, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(), - (torch.randn(8, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(), - (torch.randn(16, 4, 8, 8, 8),), - flow, - ) - - def test_avgpool3d_input_sizes(self, flow: TestFlow) -> None: - # Test with different input sizes - self._test_op( - Model(), - (torch.randn(1, 2, 8, 8, 8),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 8, 8, 8, 8),), - flow, - ) - - def test_avgpool3d_combinations(self, flow: TestFlow) -> None: - # Test with combinations of parameters - self._test_op( - Model(kernel_size=2, stride=2, padding=1), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(kernel_size=3, stride=2, padding=1, ceil_mode=True), - (torch.randn(1, 4, 10, 10, 10),), - flow, - ) - self._test_op( - Model( - kernel_size=(2, 2, 2), - stride=(1, 2, 2), - padding=(0, 1, 1), - count_include_pad=False, - ), - (torch.randn(1, 4, 8, 10, 10),), - flow, - ) +@parameterize_by_dtype +def test_avgpool3d_dtype(test_runner, dtype) -> None: + # Input shape: (batch_size, channels, depth, height, width) + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(1, 4, 8, 8, 8) * 10).to(dtype),), + ) + + +def test_avgpool3d_kernel_size(test_runner) -> None: + # Test with different kernel sizes + test_runner.lower_and_run_model( + Model(kernel_size=1), + (torch.randn(1, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=(1, 2, 2)), + (torch.randn(1, 4, 8, 8, 8),), + ) + + +def test_avgpool3d_stride(test_runner) -> None: + # Test with different stride values + test_runner.lower_and_run_model( + Model(stride=2), + (torch.randn(1, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(stride=(1, 2, 2)), + (torch.randn(1, 4, 8, 8, 8),), + ) + + +def test_avgpool3d_padding(test_runner) -> None: + # Test with different padding values + test_runner.lower_and_run_model( + Model(padding=1), + (torch.randn(1, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(padding=(0, 1, 1)), + (torch.randn(1, 4, 8, 8, 8),), + ) + + +def test_avgpool3d_ceil_mode(test_runner) -> None: + # Test with ceil_mode=True + test_runner.lower_and_run_model( + Model(ceil_mode=True), + (torch.randn(1, 4, 8, 8, 8),), + ) + + +def test_avgpool3d_count_include_pad(test_runner) -> None: + # Test with count_include_pad=False + test_runner.lower_and_run_model( + Model(padding=1, count_include_pad=False), + (torch.randn(1, 4, 8, 8, 8),), + ) + + +def test_avgpool3d_batch_sizes(test_runner) -> None: + # Test with batch inputs + test_runner.lower_and_run_model( + Model(), + (torch.randn(2, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(8, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(16, 4, 8, 8, 8),), + ) + + +def test_avgpool3d_input_sizes(test_runner) -> None: + # Test with different input sizes + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 2, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 8, 8, 8, 8),), + ) + + +def test_avgpool3d_combinations(test_runner) -> None: + # Test with combinations of parameters + test_runner.lower_and_run_model( + Model(kernel_size=2, stride=2, padding=1), + (torch.randn(1, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=3, stride=2, padding=1, ceil_mode=True), + (torch.randn(1, 4, 10, 10, 10),), + ) + test_runner.lower_and_run_model( + Model( + kernel_size=(2, 2, 2), + stride=(1, 2, 2), + padding=(0, 1, 1), + count_include_pad=False, + ), + (torch.randn(1, 4, 8, 10, 10),), + ) diff --git a/backends/test/suite/operators/test_cat.py b/backends/test/suite/operators/test_cat.py index 9cf858425be..9d08396d87e 100644 --- a/backends/test/suite/operators/test_cat.py +++ b/backends/test/suite/operators/test_cat.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class CatModel(torch.nn.Module): @@ -26,151 +21,140 @@ def forward(self, x1, x2, x3): return torch.cat([x1, x2, x3], dim=self.dim) -@operator_test -class Cat(OperatorTest): - @dtype_test - def test_cat_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - CatModel(), - ( - torch.rand(8, 32).to(dtype), - torch.rand(12, 32).to(dtype), - torch.rand(16, 32).to(dtype), - ), - flow, - ) - - def test_cat_dimensions(self, flow: TestFlow) -> None: - self._test_op( - CatModel(dim=0), - ( - torch.randn(8, 32), - torch.randn(12, 32), - torch.randn(16, 32), - ), - flow, - ) - - self._test_op( - CatModel(dim=1), - ( - torch.randn(16, 8), - torch.randn(16, 12), - torch.randn(16, 16), - ), - flow, - ) - - self._test_op( - CatModel(dim=2), - ( - torch.randn(4, 8, 4), - torch.randn(4, 8, 8), - torch.randn(4, 8, 12), - ), - flow, - ) - - def test_cat_negative_dim(self, flow: TestFlow) -> None: - self._test_op( - CatModel(dim=-1), - ( - torch.randn(16, 8), - torch.randn(16, 12), - torch.randn(16, 16), - ), - flow, - ) - - self._test_op( - CatModel(dim=-2), - ( - torch.randn(8, 32), - torch.randn(12, 32), - torch.randn(16, 32), - ), - flow, - ) - - def test_cat_different_shapes(self, flow: TestFlow) -> None: - self._test_op( - CatModel(), - ( - torch.randn(128), - torch.randn(256), - torch.randn(384), - ), - flow, - ) - - self._test_op( - CatModel(dim=0), - ( - torch.randn(4, 8, 16), - torch.randn(8, 8, 16), - torch.randn(12, 8, 16), - ), - flow, - ) - - self._test_op( - CatModel(dim=1), - ( - torch.randn(8, 4, 16), - torch.randn(8, 8, 16), - torch.randn(8, 12, 16), - ), - flow, - ) - - self._test_op( - CatModel(dim=2), - ( - torch.randn(8, 12, 4), - torch.randn(8, 12, 8), - torch.randn(8, 12, 12), - ), - flow, - ) - - def test_cat_broadcast(self, flow: TestFlow) -> None: - self._test_op( - CatModel(dim=0), - ( - torch.randn(2, 16, 32), - torch.randn(4, 16, 32), - torch.randn(6, 16, 32), - ), - flow, - ) - - self._test_op( - CatModel(dim=1), - ( - torch.randn(8, 8, 16), - torch.randn(8, 16, 16), - torch.randn(8, 24, 16), - ), - flow, - ) - - self._test_op( - CatModel(dim=2), - ( - torch.randn(4, 16, 8), - torch.randn(4, 16, 16), - torch.randn(4, 16, 24), - ), - flow, - ) - - def test_cat_same_shapes(self, flow: TestFlow) -> None: - self._test_op( - CatModel(), - ( - torch.randn(8, 32), - torch.randn(8, 32), - torch.randn(8, 32), - ), - flow, - ) +@parameterize_by_dtype +def test_cat_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + CatModel(), + ( + torch.rand(8, 32).to(dtype), + torch.rand(12, 32).to(dtype), + torch.rand(16, 32).to(dtype), + ), + ) + + +def test_cat_dimensions(test_runner) -> None: + test_runner.lower_and_run_model( + CatModel(dim=0), + ( + torch.randn(8, 32), + torch.randn(12, 32), + torch.randn(16, 32), + ), + ) + + test_runner.lower_and_run_model( + CatModel(dim=1), + ( + torch.randn(16, 8), + torch.randn(16, 12), + torch.randn(16, 16), + ), + ) + + test_runner.lower_and_run_model( + CatModel(dim=2), + ( + torch.randn(4, 8, 4), + torch.randn(4, 8, 8), + torch.randn(4, 8, 12), + ), + ) + + +def test_cat_negative_dim(test_runner) -> None: + test_runner.lower_and_run_model( + CatModel(dim=-1), + ( + torch.randn(16, 8), + torch.randn(16, 12), + torch.randn(16, 16), + ), + ) + + test_runner.lower_and_run_model( + CatModel(dim=-2), + ( + torch.randn(8, 32), + torch.randn(12, 32), + torch.randn(16, 32), + ), + ) + + +def test_cat_different_shapes(test_runner) -> None: + test_runner.lower_and_run_model( + CatModel(), + ( + torch.randn(128), + torch.randn(256), + torch.randn(384), + ), + ) + + test_runner.lower_and_run_model( + CatModel(dim=0), + ( + torch.randn(4, 8, 16), + torch.randn(8, 8, 16), + torch.randn(12, 8, 16), + ), + ) + + test_runner.lower_and_run_model( + CatModel(dim=1), + ( + torch.randn(8, 4, 16), + torch.randn(8, 8, 16), + torch.randn(8, 12, 16), + ), + ) + + test_runner.lower_and_run_model( + CatModel(dim=2), + ( + torch.randn(8, 12, 4), + torch.randn(8, 12, 8), + torch.randn(8, 12, 12), + ), + ) + + +def test_cat_broadcast(test_runner) -> None: + test_runner.lower_and_run_model( + CatModel(dim=0), + ( + torch.randn(2, 16, 32), + torch.randn(4, 16, 32), + torch.randn(6, 16, 32), + ), + ) + + test_runner.lower_and_run_model( + CatModel(dim=1), + ( + torch.randn(8, 8, 16), + torch.randn(8, 16, 16), + torch.randn(8, 24, 16), + ), + ) + + test_runner.lower_and_run_model( + CatModel(dim=2), + ( + torch.randn(4, 16, 8), + torch.randn(4, 16, 16), + torch.randn(4, 16, 24), + ), + ) + + +def test_cat_same_shapes(test_runner) -> None: + test_runner.lower_and_run_model( + CatModel(), + ( + torch.randn(8, 32), + torch.randn(8, 32), + torch.randn(8, 32), + ), + ) diff --git a/backends/test/suite/operators/test_ceil.py b/backends/test/suite/operators/test_ceil.py index 4d7c0a5e888..b89c8147865 100644 --- a/backends/test/suite/operators/test_ceil.py +++ b/backends/test/suite/operators/test_ceil.py @@ -10,13 +10,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class CeilModel(torch.nn.Module): @@ -27,34 +22,38 @@ def forward(self, x): return torch.ceil(x) -@operator_test -class TestCeil(OperatorTest): - @dtype_test - def test_ceil_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes - model = CeilModel().to(dtype) - self._test_op(model, (torch.rand(10, 10).to(dtype) * 2 - 1,), flow) +@parameterize_by_dtype +def test_ceil_dtype(test_runner, dtype) -> None: + # Test with different dtypes + model = CeilModel().to(dtype) + test_runner.lower_and_run_model(model, (torch.rand(10, 10).to(dtype) * 2 - 1,)) - def test_ceil_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes - # 1D tensor - self._test_op(CeilModel(), (torch.randn(20),), flow) +def test_ceil_shapes(test_runner) -> None: + # Test with different tensor shapes - # 2D tensor - self._test_op(CeilModel(), (torch.randn(5, 10),), flow) + # 1D tensor + test_runner.lower_and_run_model(CeilModel(), (torch.randn(20),)) - # 3D tensor - self._test_op(CeilModel(), (torch.randn(3, 4, 5),), flow) + # 2D tensor + test_runner.lower_and_run_model(CeilModel(), (torch.randn(5, 10),)) - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_ceil_edge_cases(self, flow: TestFlow) -> None: - # Test edge cases + # 3D tensor + test_runner.lower_and_run_model(CeilModel(), (torch.randn(3, 4, 5),)) - # Tensor with infinity - x = torch.tensor([float("inf"), float("-inf"), 1.0, -1.0]) - self._test_op(CeilModel(), (x,), flow, generate_random_test_inputs=False) - # Tensor with NaN - x = torch.tensor([float("nan"), 1.0, -1.0]) - self._test_op(CeilModel(), (x,), flow, generate_random_test_inputs=False) +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_ceil_edge_cases(test_runner) -> None: + # Test edge cases + + # Tensor with infinity + x = torch.tensor([float("inf"), float("-inf"), 1.0, -1.0]) + test_runner.lower_and_run_model( + CeilModel(), (x,), generate_random_test_inputs=False + ) + + # Tensor with NaN + x = torch.tensor([float("nan"), 1.0, -1.0]) + test_runner.lower_and_run_model( + CeilModel(), (x,), generate_random_test_inputs=False + ) diff --git a/backends/test/suite/operators/test_clamp.py b/backends/test/suite/operators/test_clamp.py index 49419f0453a..df4bd83b88b 100644 --- a/backends/test/suite/operators/test_clamp.py +++ b/backends/test/suite/operators/test_clamp.py @@ -10,13 +10,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class ClampModel(torch.nn.Module): @@ -29,58 +24,58 @@ def forward(self, x): return torch.clamp(x, min=self.min_val, max=self.max_val) -@operator_test -class TestClamp(OperatorTest): - @dtype_test - def test_clamp_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes - model = ClampModel(min_val=-0.5, max_val=0.5).to(dtype) - self._test_op(model, (torch.rand(10, 10).to(dtype) * 2 - 1,), flow) - - def test_clamp_min_only(self, flow: TestFlow) -> None: - # Test with only min value specified - self._test_op(ClampModel(min_val=0.0), (torch.randn(10, 10),), flow) - - def test_clamp_max_only(self, flow: TestFlow) -> None: - # Test with only max value specified - self._test_op(ClampModel(max_val=0.0), (torch.randn(10, 10),), flow) - - def test_clamp_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes - model = ClampModel(min_val=-1.0, max_val=1.0) - - # 1D tensor - self._test_op(model, (torch.randn(20),), flow) - - # 2D tensor - self._test_op(model, (torch.randn(5, 10),), flow) - - # 3D tensor - self._test_op(model, (torch.randn(3, 4, 5),), flow) - - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_clamp_edge_cases(self, flow: TestFlow) -> None: - # Test edge cases - - # Min equals max - self._test_op( - ClampModel(min_val=0.0, max_val=0.0), (torch.randn(10, 10),), flow - ) - - # Tensor with infinity - x = torch.tensor([float("inf"), float("-inf"), 1.0, -1.0]) - self._test_op( - ClampModel(min_val=-2.0, max_val=2.0), - (x,), - flow, - generate_random_test_inputs=False, - ) - - # Tensor with NaN - x = torch.tensor([float("nan"), 1.0, -1.0]) - self._test_op( - ClampModel(min_val=-2.0, max_val=2.0), - (x,), - flow, - generate_random_test_inputs=False, - ) +@parameterize_by_dtype +def test_clamp_dtype(test_runner, dtype) -> None: + # Test with different dtypes + model = ClampModel(min_val=-0.5, max_val=0.5).to(dtype) + test_runner.lower_and_run_model(model, (torch.rand(10, 10).to(dtype) * 2 - 1,)) + + +def test_clamp_min_only(test_runner) -> None: + # Test with only min value specified + test_runner.lower_and_run_model(ClampModel(min_val=0.0), (torch.randn(10, 10),)) + + +def test_clamp_max_only(test_runner) -> None: + # Test with only max value specified + test_runner.lower_and_run_model(ClampModel(max_val=0.0), (torch.randn(10, 10),)) + + +def test_clamp_shapes(test_runner) -> None: + # Test with different tensor shapes + model = ClampModel(min_val=-1.0, max_val=1.0) + + # 1D tensor + test_runner.lower_and_run_model(model, (torch.randn(20),)) + + # 2D tensor + test_runner.lower_and_run_model(model, (torch.randn(5, 10),)) + + # 3D tensor + test_runner.lower_and_run_model(model, (torch.randn(3, 4, 5),)) + + +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_clamp_edge_cases(test_runner) -> None: + # Test edge cases + + # Min equals max + test_runner.lower_and_run_model( + ClampModel(min_val=0.0, max_val=0.0), (torch.randn(10, 10),) + ) + + # Tensor with infinity + x = torch.tensor([float("inf"), float("-inf"), 1.0, -1.0]) + test_runner.lower_and_run_model( + ClampModel(min_val=-2.0, max_val=2.0), + (x,), + generate_random_test_inputs=False, + ) + + # Tensor with NaN + x = torch.tensor([float("nan"), 1.0, -1.0]) + test_runner.lower_and_run_model( + ClampModel(min_val=-2.0, max_val=2.0), + (x,), + generate_random_test_inputs=False, + ) diff --git a/backends/test/suite/operators/test_conv1d.py b/backends/test/suite/operators/test_conv1d.py index c34dc7a73a7..1db1e26eb22 100644 --- a/backends/test/suite/operators/test_conv1d.py +++ b/backends/test/suite/operators/test_conv1d.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -47,98 +42,92 @@ def forward(self, x): return self.conv(x) -@operator_test -class Conv1d(OperatorTest): - @dtype_test - def test_conv1d_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - Model().to(dtype), - ((torch.rand(4, 3, 50) * 10).to(dtype),), - flow, - ) +@parameterize_by_dtype +def test_conv1d_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(4, 3, 50) * 10).to(dtype),), + ) - def test_conv1d_basic(self, flow: TestFlow) -> None: - self._test_op( - Model(), - (torch.randn(4, 3, 50),), - flow, - ) - def test_conv1d_kernel_size(self, flow: TestFlow) -> None: - self._test_op( - Model(kernel_size=1), - (torch.randn(4, 3, 50),), - flow, - ) - self._test_op( - Model(kernel_size=5), - (torch.randn(4, 3, 50),), - flow, - ) +def test_conv1d_basic(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + (torch.randn(4, 3, 50),), + ) - def test_conv1d_stride(self, flow: TestFlow) -> None: - self._test_op( - Model(stride=2), - (torch.randn(4, 3, 50),), - flow, - ) - def test_conv1d_padding(self, flow: TestFlow) -> None: - self._test_op( - Model(padding=1), - (torch.randn(4, 3, 50),), - flow, - ) - self._test_op( - Model(padding=2), - (torch.randn(4, 3, 50),), - flow, - ) +def test_conv1d_kernel_size(test_runner) -> None: + test_runner.lower_and_run_model( + Model(kernel_size=1), + (torch.randn(4, 3, 50),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=5), + (torch.randn(4, 3, 50),), + ) - def test_conv1d_dilation(self, flow: TestFlow) -> None: - self._test_op( - Model(dilation=2), - (torch.randn(4, 3, 50),), - flow, - ) - def test_conv1d_groups(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=6, out_channels=6, groups=3), - (torch.randn(4, 6, 50),), - flow, - ) +def test_conv1d_stride(test_runner) -> None: + test_runner.lower_and_run_model( + Model(stride=2), + (torch.randn(4, 3, 50),), + ) + + +def test_conv1d_padding(test_runner) -> None: + test_runner.lower_and_run_model( + Model(padding=1), + (torch.randn(4, 3, 50),), + ) + test_runner.lower_and_run_model( + Model(padding=2), + (torch.randn(4, 3, 50),), + ) + + +def test_conv1d_dilation(test_runner) -> None: + test_runner.lower_and_run_model( + Model(dilation=2), + (torch.randn(4, 3, 50),), + ) + + +def test_conv1d_groups(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(4, 6, 50),), + ) - def test_conv1d_depthwise(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=8, out_channels=8, groups=8), - (torch.randn(4, 8, 50),), - flow, - ) - def test_conv1d_no_bias(self, flow: TestFlow) -> None: - self._test_op( - Model(bias=False), +def test_conv1d_depthwise(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=8, out_channels=8, groups=8), + (torch.randn(4, 8, 50),), + ) + + +def test_conv1d_no_bias(test_runner) -> None: + test_runner.lower_and_run_model( + Model(bias=False), + (torch.randn(4, 3, 50),), + ) + + +def test_conv1d_padding_modes(test_runner) -> None: + for mode in ["zeros", "reflect", "replicate", "circular"]: + test_runner.lower_and_run_model( + Model(padding=1, padding_mode=mode), (torch.randn(4, 3, 50),), - flow, ) - def test_conv1d_padding_modes(self, flow: TestFlow) -> None: - for mode in ["zeros", "reflect", "replicate", "circular"]: - self._test_op( - Model(padding=1, padding_mode=mode), - (torch.randn(4, 3, 50),), - flow, - ) - - def test_conv1d_channels(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=1, out_channels=1), - (torch.randn(4, 1, 50),), - flow, - ) - self._test_op( - Model(in_channels=5, out_channels=10), - (torch.randn(4, 5, 50),), - flow, - ) + +def test_conv1d_channels(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=1, out_channels=1), + (torch.randn(4, 1, 50),), + ) + test_runner.lower_and_run_model( + Model(in_channels=5, out_channels=10), + (torch.randn(4, 5, 50),), + ) diff --git a/backends/test/suite/operators/test_conv2d.py b/backends/test/suite/operators/test_conv2d.py index 04fee271a49..a0267a6482f 100644 --- a/backends/test/suite/operators/test_conv2d.py +++ b/backends/test/suite/operators/test_conv2d.py @@ -9,13 +9,8 @@ from typing import Tuple, Union import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -48,120 +43,111 @@ def forward(self, x): return self.conv(x) -@operator_test -class Conv2d(OperatorTest): - @dtype_test - def test_conv2d_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - Model().to(dtype), - ((torch.rand(4, 3, 16, 16) * 10).to(dtype),), - flow, - ) - - def test_conv2d_basic(self, flow: TestFlow) -> None: - self._test_op( - Model(), - (torch.randn(4, 3, 16, 16),), - flow, - ) - - def test_conv2d_kernel_size(self, flow: TestFlow) -> None: - self._test_op( - Model(kernel_size=1), - (torch.randn(4, 3, 16, 16),), - flow, - ) - self._test_op( - Model(kernel_size=5), - (torch.randn(4, 3, 16, 16),), - flow, - ) - self._test_op( - Model(kernel_size=(3, 5)), - (torch.randn(4, 3, 16, 16),), - flow, - ) - - def test_conv2d_stride(self, flow: TestFlow) -> None: - self._test_op( - Model(stride=2), - (torch.randn(4, 3, 16, 16),), - flow, - ) - self._test_op( - Model(stride=(2, 1)), - (torch.randn(4, 3, 16, 16),), - flow, - ) - - def test_conv2d_padding(self, flow: TestFlow) -> None: - self._test_op( - Model(padding=1), - (torch.randn(4, 3, 16, 16),), - flow, - ) - self._test_op( - Model(padding=(1, 2)), - (torch.randn(4, 3, 16, 16),), - flow, - ) - - def test_conv2d_dilation(self, flow: TestFlow) -> None: - self._test_op( - Model(dilation=2), - (torch.randn(4, 3, 16, 16),), - flow, - ) - self._test_op( - Model(dilation=(2, 1)), +@parameterize_by_dtype +def test_conv2d_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(4, 3, 16, 16) * 10).to(dtype),), + ) + + +def test_conv2d_basic(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + (torch.randn(4, 3, 16, 16),), + ) + + +def test_conv2d_kernel_size(test_runner) -> None: + test_runner.lower_and_run_model( + Model(kernel_size=1), + (torch.randn(4, 3, 16, 16),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=5), + (torch.randn(4, 3, 16, 16),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=(3, 5)), + (torch.randn(4, 3, 16, 16),), + ) + + +def test_conv2d_stride(test_runner) -> None: + test_runner.lower_and_run_model( + Model(stride=2), + (torch.randn(4, 3, 16, 16),), + ) + test_runner.lower_and_run_model( + Model(stride=(2, 1)), + (torch.randn(4, 3, 16, 16),), + ) + + +def test_conv2d_padding(test_runner) -> None: + test_runner.lower_and_run_model( + Model(padding=1), + (torch.randn(4, 3, 16, 16),), + ) + test_runner.lower_and_run_model( + Model(padding=(1, 2)), + (torch.randn(4, 3, 16, 16),), + ) + + +def test_conv2d_dilation(test_runner) -> None: + test_runner.lower_and_run_model( + Model(dilation=2), + (torch.randn(4, 3, 16, 16),), + ) + test_runner.lower_and_run_model( + Model(dilation=(2, 1)), + (torch.randn(4, 3, 16, 16),), + ) + + +def test_conv2d_groups(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(4, 6, 16, 16),), + ) + + +def test_conv2d_depthwise(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=8, out_channels=8, groups=8), + (torch.randn(4, 8, 16, 16),), + ) + + +def test_conv2d_no_bias(test_runner) -> None: + test_runner.lower_and_run_model( + Model(bias=False), + (torch.randn(4, 3, 16, 16),), + ) + + +def test_conv2d_padding_modes(test_runner) -> None: + for mode in ["zeros", "reflect", "replicate", "circular"]: + test_runner.lower_and_run_model( + Model(padding=1, padding_mode=mode), (torch.randn(4, 3, 16, 16),), - flow, ) - def test_conv2d_groups(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=6, out_channels=6, groups=3), - (torch.randn(4, 6, 16, 16),), - flow, - ) - - def test_conv2d_depthwise(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=8, out_channels=8, groups=8), - (torch.randn(4, 8, 16, 16),), - flow, - ) - def test_conv2d_no_bias(self, flow: TestFlow) -> None: - self._test_op( - Model(bias=False), - (torch.randn(4, 3, 16, 16),), - flow, - ) +def test_conv2d_channels(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=1, out_channels=1), + (torch.randn(4, 1, 16, 16),), + ) + test_runner.lower_and_run_model( + Model(in_channels=5, out_channels=10), + (torch.randn(4, 5, 16, 16),), + ) - def test_conv2d_padding_modes(self, flow: TestFlow) -> None: - for mode in ["zeros", "reflect", "replicate", "circular"]: - self._test_op( - Model(padding=1, padding_mode=mode), - (torch.randn(4, 3, 16, 16),), - flow, - ) - - def test_conv2d_channels(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=1, out_channels=1), - (torch.randn(4, 1, 16, 16),), - flow, - ) - self._test_op( - Model(in_channels=5, out_channels=10), - (torch.randn(4, 5, 16, 16),), - flow, - ) - def test_conv2d_different_spatial_dims(self, flow: TestFlow) -> None: - self._test_op( - Model(), - (torch.randn(4, 3, 20, 16),), - flow, - ) +def test_conv2d_different_spatial_dims(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + (torch.randn(4, 3, 20, 16),), + ) diff --git a/backends/test/suite/operators/test_conv3d.py b/backends/test/suite/operators/test_conv3d.py index 01ffa4942df..9ce802f63d4 100644 --- a/backends/test/suite/operators/test_conv3d.py +++ b/backends/test/suite/operators/test_conv3d.py @@ -9,13 +9,8 @@ from typing import Tuple, Union import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -48,115 +43,107 @@ def forward(self, x): return self.conv(x) -@operator_test -class Conv3d(OperatorTest): - @dtype_test - def test_conv3d_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - Model().to(dtype), - ((torch.rand(4, 3, 8, 8, 8) * 10).to(dtype),), - flow, - ) - - def test_conv3d_basic(self, flow: TestFlow) -> None: - self._test_op( - Model(), - (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - - def test_conv3d_kernel_size(self, flow: TestFlow) -> None: - self._test_op( - Model(kernel_size=1), - (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - self._test_op( - Model(kernel_size=(1, 3, 3)), - (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - - def test_conv3d_stride(self, flow: TestFlow) -> None: - self._test_op( - Model(stride=2), - (torch.randn(4, 3, 12, 12, 12),), - flow, - ) - self._test_op( - Model(stride=(1, 2, 2)), - (torch.randn(4, 3, 8, 12, 12),), - flow, - ) - - def test_conv3d_padding(self, flow: TestFlow) -> None: - self._test_op( - Model(padding=1), +@parameterize_by_dtype +def test_conv3d_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(4, 3, 8, 8, 8) * 10).to(dtype),), + ) + + +def test_conv3d_basic(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + (torch.randn(4, 3, 8, 8, 8),), + ) + + +def test_conv3d_kernel_size(test_runner) -> None: + test_runner.lower_and_run_model( + Model(kernel_size=1), + (torch.randn(4, 3, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=(1, 3, 3)), + (torch.randn(4, 3, 8, 8, 8),), + ) + + +def test_conv3d_stride(test_runner) -> None: + test_runner.lower_and_run_model( + Model(stride=2), + (torch.randn(4, 3, 12, 12, 12),), + ) + test_runner.lower_and_run_model( + Model(stride=(1, 2, 2)), + (torch.randn(4, 3, 8, 12, 12),), + ) + + +def test_conv3d_padding(test_runner) -> None: + test_runner.lower_and_run_model( + Model(padding=1), + (torch.randn(4, 3, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(padding=(0, 1, 1)), + (torch.randn(4, 3, 8, 8, 8),), + ) + + +def test_conv3d_dilation(test_runner) -> None: + test_runner.lower_and_run_model( + Model(dilation=2), + (torch.randn(4, 3, 12, 12, 12),), + ) + test_runner.lower_and_run_model( + Model(dilation=(1, 2, 2)), + (torch.randn(4, 3, 8, 12, 12),), + ) + + +def test_conv3d_groups(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(4, 6, 8, 8, 8),), + ) + + +def test_conv3d_depthwise(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=8, out_channels=8, groups=8), + (torch.randn(4, 8, 8, 8, 8),), + ) + + +def test_conv3d_no_bias(test_runner) -> None: + test_runner.lower_and_run_model( + Model(bias=False), + (torch.randn(4, 3, 8, 8, 8),), + ) + + +def test_conv3d_padding_modes(test_runner) -> None: + for mode in ["zeros", "reflect", "replicate", "circular"]: + test_runner.lower_and_run_model( + Model(padding=1, padding_mode=mode), (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - self._test_op( - Model(padding=(0, 1, 1)), - (torch.randn(4, 3, 8, 8, 8),), - flow, ) - def test_conv3d_dilation(self, flow: TestFlow) -> None: - self._test_op( - Model(dilation=2), - (torch.randn(4, 3, 12, 12, 12),), - flow, - ) - self._test_op( - Model(dilation=(1, 2, 2)), - (torch.randn(4, 3, 8, 12, 12),), - flow, - ) - def test_conv3d_groups(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=6, out_channels=6, groups=3), - (torch.randn(4, 6, 8, 8, 8),), - flow, - ) +def test_conv3d_channels(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=1, out_channels=1), + (torch.randn(4, 1, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(in_channels=5, out_channels=10), + (torch.randn(4, 5, 8, 8, 8),), + ) - def test_conv3d_depthwise(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=8, out_channels=8, groups=8), - (torch.randn(4, 8, 8, 8, 8),), - flow, - ) - def test_conv3d_no_bias(self, flow: TestFlow) -> None: - self._test_op( - Model(bias=False), - (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - - def test_conv3d_padding_modes(self, flow: TestFlow) -> None: - for mode in ["zeros", "reflect", "replicate", "circular"]: - self._test_op( - Model(padding=1, padding_mode=mode), - (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - - def test_conv3d_channels(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=1, out_channels=1), - (torch.randn(4, 1, 8, 8, 8),), - flow, - ) - self._test_op( - Model(in_channels=5, out_channels=10), - (torch.randn(4, 5, 8, 8, 8),), - flow, - ) - - def test_conv3d_different_spatial_dims(self, flow: TestFlow) -> None: - self._test_op( - Model(), - (torch.randn(4, 3, 6, 8, 10),), - flow, - ) +def test_conv3d_different_spatial_dims(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + (torch.randn(4, 3, 6, 8, 10),), + ) diff --git a/backends/test/suite/operators/test_convtranspose1d.py b/backends/test/suite/operators/test_convtranspose1d.py index 178121eb5c3..174573269a4 100644 --- a/backends/test/suite/operators/test_convtranspose1d.py +++ b/backends/test/suite/operators/test_convtranspose1d.py @@ -9,13 +9,8 @@ from typing import Tuple, Union import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -48,97 +43,91 @@ def forward(self, x): return self.conv_transpose(x) -@operator_test -class ConvTranspose1d(OperatorTest): - @dtype_test - def test_convtranspose1d_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - Model().to(dtype), - ((torch.rand(4, 3, 50) * 10).to(dtype),), - flow, - ) +@parameterize_by_dtype +def test_convtranspose1d_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(4, 3, 50) * 10).to(dtype),), + ) - def test_convtranspose1d_basic(self, flow: TestFlow) -> None: - self._test_op( - Model(), - (torch.randn(4, 3, 50),), - flow, - ) - def test_convtranspose1d_kernel_size(self, flow: TestFlow) -> None: - self._test_op( - Model(kernel_size=1), - (torch.randn(4, 3, 50),), - flow, - ) - self._test_op( - Model(kernel_size=5), - (torch.randn(4, 3, 50),), - flow, - ) +def test_convtranspose1d_basic(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + (torch.randn(4, 3, 50),), + ) - def test_convtranspose1d_stride(self, flow: TestFlow) -> None: - self._test_op( - Model(stride=2), - (torch.randn(4, 3, 50),), - flow, - ) - def test_convtranspose1d_padding(self, flow: TestFlow) -> None: - self._test_op( - Model(padding=1), - (torch.randn(4, 3, 50),), - flow, - ) - self._test_op( - Model(padding=2), - (torch.randn(4, 3, 50),), - flow, - ) +def test_convtranspose1d_kernel_size(test_runner) -> None: + test_runner.lower_and_run_model( + Model(kernel_size=1), + (torch.randn(4, 3, 50),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=5), + (torch.randn(4, 3, 50),), + ) - def test_convtranspose1d_output_padding(self, flow: TestFlow) -> None: - self._test_op( - Model(stride=2, output_padding=1), - (torch.randn(4, 3, 50),), - flow, - ) - def test_convtranspose1d_dilation(self, flow: TestFlow) -> None: - self._test_op( - Model(dilation=2), - (torch.randn(4, 3, 50),), - flow, - ) +def test_convtranspose1d_stride(test_runner) -> None: + test_runner.lower_and_run_model( + Model(stride=2), + (torch.randn(4, 3, 50),), + ) - def test_convtranspose1d_groups(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=6, out_channels=6, groups=3), - (torch.randn(4, 6, 50),), - flow, - ) - def test_convtranspose1d_depthwise(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=8, out_channels=8, groups=8), - (torch.randn(4, 8, 50),), - flow, - ) +def test_convtranspose1d_padding(test_runner) -> None: + test_runner.lower_and_run_model( + Model(padding=1), + (torch.randn(4, 3, 50),), + ) + test_runner.lower_and_run_model( + Model(padding=2), + (torch.randn(4, 3, 50),), + ) - def test_convtranspose1d_no_bias(self, flow: TestFlow) -> None: - self._test_op( - Model(bias=False), - (torch.randn(4, 3, 50),), - flow, - ) - def test_convtranspose1d_channels(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=1, out_channels=1), - (torch.randn(4, 1, 50),), - flow, - ) - self._test_op( - Model(in_channels=5, out_channels=10), - (torch.randn(4, 5, 50),), - flow, - ) +def test_convtranspose1d_output_padding(test_runner) -> None: + test_runner.lower_and_run_model( + Model(stride=2, output_padding=1), + (torch.randn(4, 3, 50),), + ) + + +def test_convtranspose1d_dilation(test_runner) -> None: + test_runner.lower_and_run_model( + Model(dilation=2), + (torch.randn(4, 3, 50),), + ) + + +def test_convtranspose1d_groups(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(4, 6, 50),), + ) + + +def test_convtranspose1d_depthwise(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=8, out_channels=8, groups=8), + (torch.randn(4, 8, 50),), + ) + + +def test_convtranspose1d_no_bias(test_runner) -> None: + test_runner.lower_and_run_model( + Model(bias=False), + (torch.randn(4, 3, 50),), + ) + + +def test_convtranspose1d_channels(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=1, out_channels=1), + (torch.randn(4, 1, 50),), + ) + test_runner.lower_and_run_model( + Model(in_channels=5, out_channels=10), + (torch.randn(4, 5, 50),), + ) diff --git a/backends/test/suite/operators/test_convtranspose2d.py b/backends/test/suite/operators/test_convtranspose2d.py index ab2e44581d0..7fbb776d4cb 100644 --- a/backends/test/suite/operators/test_convtranspose2d.py +++ b/backends/test/suite/operators/test_convtranspose2d.py @@ -9,13 +9,8 @@ from typing import Tuple, Union import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -48,124 +43,114 @@ def forward(self, x): return self.conv_transpose(x) -@operator_test -class ConvTranspose2d(OperatorTest): - @dtype_test - def test_convtranspose2d_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - Model().to(dtype), - ((torch.rand(4, 3, 16, 16) * 10).to(dtype),), - flow, - ) - - def test_convtranspose2d_basic(self, flow: TestFlow) -> None: - self._test_op( - Model(), - (torch.randn(4, 3, 16, 16),), - flow, - ) - - def test_convtranspose2d_kernel_size(self, flow: TestFlow) -> None: - self._test_op( - Model(kernel_size=1), - (torch.randn(4, 3, 16, 16),), - flow, - ) - self._test_op( - Model(kernel_size=5), - (torch.randn(4, 3, 16, 16),), - flow, - ) - self._test_op( - Model(kernel_size=(3, 5)), - (torch.randn(4, 3, 16, 16),), - flow, - ) - - def test_convtranspose2d_stride(self, flow: TestFlow) -> None: - self._test_op( - Model(stride=2), - (torch.randn(4, 3, 16, 16),), - flow, - ) - self._test_op( - Model(stride=(2, 1)), - (torch.randn(4, 3, 16, 16),), - flow, - ) - - def test_convtranspose2d_padding(self, flow: TestFlow) -> None: - self._test_op( - Model(padding=1), - (torch.randn(4, 3, 16, 16),), - flow, - ) - self._test_op( - Model(padding=(1, 2)), - (torch.randn(4, 3, 16, 16),), - flow, - ) - - def test_convtranspose2d_output_padding(self, flow: TestFlow) -> None: - self._test_op( - Model(stride=2, output_padding=1), - (torch.randn(4, 3, 16, 16),), - flow, - ) - self._test_op( - Model(stride=(2, 2), output_padding=(1, 0)), - (torch.randn(4, 3, 16, 16),), - flow, - ) - - def test_convtranspose2d_dilation(self, flow: TestFlow) -> None: - self._test_op( - Model(dilation=2), - (torch.randn(4, 3, 16, 16),), - flow, - ) - self._test_op( - Model(dilation=(2, 1)), - (torch.randn(4, 3, 16, 16),), - flow, - ) - - def test_convtranspose2d_groups(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=6, out_channels=6, groups=3), - (torch.randn(4, 6, 16, 16),), - flow, - ) - - def test_convtranspose2d_depthwise(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=8, out_channels=8, groups=8), - (torch.randn(4, 8, 16, 16),), - flow, - ) - - def test_convtranspose2d_no_bias(self, flow: TestFlow) -> None: - self._test_op( - Model(bias=False), - (torch.randn(4, 3, 16, 16),), - flow, - ) - - def test_convtranspose2d_channels(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=1, out_channels=1), - (torch.randn(4, 1, 16, 16),), - flow, - ) - self._test_op( - Model(in_channels=5, out_channels=10), - (torch.randn(4, 5, 16, 16),), - flow, - ) - - def test_convtranspose2d_different_spatial_dims(self, flow: TestFlow) -> None: - self._test_op( - Model(), - (torch.randn(4, 3, 20, 16),), - flow, - ) +@parameterize_by_dtype +def test_convtranspose2d_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(4, 3, 16, 16) * 10).to(dtype),), + ) + + +def test_convtranspose2d_basic(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + (torch.randn(4, 3, 16, 16),), + ) + + +def test_convtranspose2d_kernel_size(test_runner) -> None: + test_runner.lower_and_run_model( + Model(kernel_size=1), + (torch.randn(4, 3, 16, 16),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=5), + (torch.randn(4, 3, 16, 16),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=(3, 5)), + (torch.randn(4, 3, 16, 16),), + ) + + +def test_convtranspose2d_stride(test_runner) -> None: + test_runner.lower_and_run_model( + Model(stride=2), + (torch.randn(4, 3, 16, 16),), + ) + test_runner.lower_and_run_model( + Model(stride=(2, 1)), + (torch.randn(4, 3, 16, 16),), + ) + + +def test_convtranspose2d_padding(test_runner) -> None: + test_runner.lower_and_run_model( + Model(padding=1), + (torch.randn(4, 3, 16, 16),), + ) + test_runner.lower_and_run_model( + Model(padding=(1, 2)), + (torch.randn(4, 3, 16, 16),), + ) + + +def test_convtranspose2d_output_padding(test_runner) -> None: + test_runner.lower_and_run_model( + Model(stride=2, output_padding=1), + (torch.randn(4, 3, 16, 16),), + ) + test_runner.lower_and_run_model( + Model(stride=(2, 2), output_padding=(1, 0)), + (torch.randn(4, 3, 16, 16),), + ) + + +def test_convtranspose2d_dilation(test_runner) -> None: + test_runner.lower_and_run_model( + Model(dilation=2), + (torch.randn(4, 3, 16, 16),), + ) + test_runner.lower_and_run_model( + Model(dilation=(2, 1)), + (torch.randn(4, 3, 16, 16),), + ) + + +def test_convtranspose2d_groups(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(4, 6, 16, 16),), + ) + + +def test_convtranspose2d_depthwise(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=8, out_channels=8, groups=8), + (torch.randn(4, 8, 16, 16),), + ) + + +def test_convtranspose2d_no_bias(test_runner) -> None: + test_runner.lower_and_run_model( + Model(bias=False), + (torch.randn(4, 3, 16, 16),), + ) + + +def test_convtranspose2d_channels(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=1, out_channels=1), + (torch.randn(4, 1, 16, 16),), + ) + test_runner.lower_and_run_model( + Model(in_channels=5, out_channels=10), + (torch.randn(4, 5, 16, 16),), + ) + + +def test_convtranspose2d_different_spatial_dims(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + (torch.randn(4, 3, 20, 16),), + ) diff --git a/backends/test/suite/operators/test_convtranspose3d.py b/backends/test/suite/operators/test_convtranspose3d.py index 4ad70042df9..be2e26b6fb2 100644 --- a/backends/test/suite/operators/test_convtranspose3d.py +++ b/backends/test/suite/operators/test_convtranspose3d.py @@ -9,13 +9,8 @@ from typing import Tuple, Union import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -48,119 +43,110 @@ def forward(self, x): return self.conv_transpose(x) -@operator_test -class ConvTranspose3d(OperatorTest): - @dtype_test - def test_convtranspose3d_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - Model().to(dtype), - ((torch.rand(4, 3, 8, 8, 8) * 10).to(dtype),), - flow, - ) - - def test_convtranspose3d_basic(self, flow: TestFlow) -> None: - self._test_op( - Model(), - (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - - def test_convtranspose3d_kernel_size(self, flow: TestFlow) -> None: - self._test_op( - Model(kernel_size=1), - (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - self._test_op( - Model(kernel_size=(1, 3, 3)), - (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - - def test_convtranspose3d_stride(self, flow: TestFlow) -> None: - self._test_op( - Model(stride=2), - (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - self._test_op( - Model(stride=(1, 2, 2)), - (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - - def test_convtranspose3d_padding(self, flow: TestFlow) -> None: - self._test_op( - Model(padding=1), - (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - self._test_op( - Model(padding=(0, 1, 1)), - (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - - def test_convtranspose3d_output_padding(self, flow: TestFlow) -> None: - self._test_op( - Model(stride=2, output_padding=1), - (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - self._test_op( - Model(stride=(2, 2, 2), output_padding=(1, 0, 1)), - (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - - def test_convtranspose3d_dilation(self, flow: TestFlow) -> None: - self._test_op( - Model(dilation=2), - (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - self._test_op( - Model(dilation=(1, 2, 2)), - (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - - def test_convtranspose3d_groups(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=6, out_channels=6, groups=3), - (torch.randn(4, 6, 8, 8, 8),), - flow, - ) - - def test_convtranspose3d_depthwise(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=8, out_channels=8, groups=8), - (torch.randn(4, 8, 8, 8, 8),), - flow, - ) - - def test_convtranspose3d_no_bias(self, flow: TestFlow) -> None: - self._test_op( - Model(bias=False), - (torch.randn(4, 3, 8, 8, 8),), - flow, - ) - - def test_convtranspose3d_channels(self, flow: TestFlow) -> None: - self._test_op( - Model(in_channels=1, out_channels=1), - (torch.randn(4, 1, 8, 8, 8),), - flow, - ) - self._test_op( - Model(in_channels=5, out_channels=10), - (torch.randn(4, 5, 8, 8, 8),), - flow, - ) - - def test_convtranspose3d_different_spatial_dims(self, flow: TestFlow) -> None: - self._test_op( - Model(), - (torch.randn(4, 3, 6, 8, 10),), - flow, - ) +@parameterize_by_dtype +def test_convtranspose3d_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(4, 3, 8, 8, 8) * 10).to(dtype),), + ) + + +def test_convtranspose3d_basic(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + (torch.randn(4, 3, 8, 8, 8),), + ) + + +def test_convtranspose3d_kernel_size(test_runner) -> None: + test_runner.lower_and_run_model( + Model(kernel_size=1), + (torch.randn(4, 3, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=(1, 3, 3)), + (torch.randn(4, 3, 8, 8, 8),), + ) + + +def test_convtranspose3d_stride(test_runner) -> None: + test_runner.lower_and_run_model( + Model(stride=2), + (torch.randn(4, 3, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(stride=(1, 2, 2)), + (torch.randn(4, 3, 8, 8, 8),), + ) + + +def test_convtranspose3d_padding(test_runner) -> None: + test_runner.lower_and_run_model( + Model(padding=1), + (torch.randn(4, 3, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(padding=(0, 1, 1)), + (torch.randn(4, 3, 8, 8, 8),), + ) + + +def test_convtranspose3d_output_padding(test_runner) -> None: + test_runner.lower_and_run_model( + Model(stride=2, output_padding=1), + (torch.randn(4, 3, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(stride=(2, 2, 2), output_padding=(1, 0, 1)), + (torch.randn(4, 3, 8, 8, 8),), + ) + + +def test_convtranspose3d_dilation(test_runner) -> None: + test_runner.lower_and_run_model( + Model(dilation=2), + (torch.randn(4, 3, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(dilation=(1, 2, 2)), + (torch.randn(4, 3, 8, 8, 8),), + ) + + +def test_convtranspose3d_groups(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=6, out_channels=6, groups=3), + (torch.randn(4, 6, 8, 8, 8),), + ) + + +def test_convtranspose3d_depthwise(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=8, out_channels=8, groups=8), + (torch.randn(4, 8, 8, 8, 8),), + ) + + +def test_convtranspose3d_no_bias(test_runner) -> None: + test_runner.lower_and_run_model( + Model(bias=False), + (torch.randn(4, 3, 8, 8, 8),), + ) + + +def test_convtranspose3d_channels(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_channels=1, out_channels=1), + (torch.randn(4, 1, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(in_channels=5, out_channels=10), + (torch.randn(4, 5, 8, 8, 8),), + ) + + +def test_convtranspose3d_different_spatial_dims(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + (torch.randn(4, 3, 6, 8, 10),), + ) diff --git a/backends/test/suite/operators/test_div.py b/backends/test/suite/operators/test_div.py index 656d350585d..77e61678479 100644 --- a/backends/test/suite/operators/test_div.py +++ b/backends/test/suite/operators/test_div.py @@ -10,13 +10,8 @@ from typing import Optional import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -33,72 +28,69 @@ def forward(self, x, y): return torch.div(x, y, rounding_mode=self.rounding_mode) -@operator_test -class Divide(OperatorTest): - @dtype_test - def test_divide_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - Model(), - ( - (torch.rand(2, 10) * 100).to(dtype), - (torch.rand(2, 10) * 100 + 0.1).to( - dtype - ), # Adding 0.1 to avoid division by zero - ), - flow, - ) - - def test_divide_f32_bcast_first(self, flow: TestFlow) -> None: - self._test_op( - Model(), - ( - torch.randn(5), - torch.randn(1, 5, 1, 5).abs() - + 0.1, # Using abs and adding 0.1 to avoid division by zero - ), - flow, - ) - - def test_divide_f32_bcast_second(self, flow: TestFlow) -> None: - self._test_op( - Model(), - ( - torch.randn(4, 4, 2, 7), - torch.randn(2, 7).abs() - + 0.1, # Using abs and adding 0.1 to avoid division by zero - ), - flow, - ) - - def test_divide_f32_bcast_unary(self, flow: TestFlow) -> None: - self._test_op( - Model(), - ( - torch.randn(5), - torch.randn(1, 1, 5).abs() - + 0.1, # Using abs and adding 0.1 to avoid division by zero - ), - flow, - ) - - def test_divide_f32_trunc(self, flow: TestFlow) -> None: - self._test_op( - ModelWithRounding(rounding_mode="trunc"), - ( - torch.randn(3, 4) * 10, - torch.randn(3, 4).abs() - + 0.1, # Using abs and adding 0.1 to avoid division by zero - ), - flow, - ) - - def test_divide_f32_floor(self, flow: TestFlow) -> None: - self._test_op( - ModelWithRounding(rounding_mode="floor"), - ( - torch.randn(3, 4) * 10, - torch.randn(3, 4).abs() - + 0.1, # Using abs and adding 0.1 to avoid division by zero - ), - flow, - ) +@parameterize_by_dtype +def test_divide_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + Model(), + ( + (torch.rand(2, 10) * 100).to(dtype), + (torch.rand(2, 10) * 100 + 0.1).to( + dtype + ), # Adding 0.1 to avoid division by zero + ), + ) + + +def test_divide_f32_bcast_first(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + ( + torch.randn(5), + torch.randn(1, 5, 1, 5).abs() + + 0.1, # Using abs and adding 0.1 to avoid division by zero + ), + ) + + +def test_divide_f32_bcast_second(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + ( + torch.randn(4, 4, 2, 7), + torch.randn(2, 7).abs() + + 0.1, # Using abs and adding 0.1 to avoid division by zero + ), + ) + + +def test_divide_f32_bcast_unary(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + ( + torch.randn(5), + torch.randn(1, 1, 5).abs() + + 0.1, # Using abs and adding 0.1 to avoid division by zero + ), + ) + + +def test_divide_f32_trunc(test_runner) -> None: + test_runner.lower_and_run_model( + ModelWithRounding(rounding_mode="trunc"), + ( + torch.randn(3, 4) * 10, + torch.randn(3, 4).abs() + + 0.1, # Using abs and adding 0.1 to avoid division by zero + ), + ) + + +def test_divide_f32_floor(test_runner) -> None: + test_runner.lower_and_run_model( + ModelWithRounding(rounding_mode="floor"), + ( + torch.randn(3, 4) * 10, + torch.randn(3, 4).abs() + + 0.1, # Using abs and adding 0.1 to avoid division by zero + ), + ) diff --git a/backends/test/suite/operators/test_elu.py b/backends/test/suite/operators/test_elu.py index 361e1382c37..659036b52dd 100644 --- a/backends/test/suite/operators/test_elu.py +++ b/backends/test/suite/operators/test_elu.py @@ -10,13 +10,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -29,21 +24,23 @@ def forward(self, x): return torch.nn.functional.elu(x, alpha=self.alpha, inplace=self.inplace) -@operator_test -class TestELU(OperatorTest): - @dtype_test - def test_elu_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op(Model(), ((torch.rand(2, 10) * 100).to(dtype),), flow) +@parameterize_by_dtype +def test_elu_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model(Model(), ((torch.rand(2, 10) * 100).to(dtype),)) - def test_elu_f32_single_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(20),), flow) - def test_elu_f32_multi_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) +def test_elu_f32_single_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(20),)) - def test_elu_f32_alpha(self, flow: TestFlow) -> None: - self._test_op(Model(alpha=0.5), (torch.randn(3, 4, 5),), flow) - @unittest.skip("In place activations aren't properly defunctionalized yet.") - def test_elu_f32_inplace(self, flow: TestFlow) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) +def test_elu_f32_multi_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(2, 3, 4, 5),)) + + +def test_elu_f32_alpha(test_runner) -> None: + test_runner.lower_and_run_model(Model(alpha=0.5), (torch.randn(3, 4, 5),)) + + +@unittest.skip("In place activations aren't properly defunctionalized yet.") +def test_elu_f32_inplace(test_runner) -> None: + test_runner.lower_and_run_model(Model(inplace=True), (torch.randn(3, 4, 5),)) diff --git a/backends/test/suite/operators/test_embedding.py b/backends/test/suite/operators/test_embedding.py index 07e09952db8..f6648023b7f 100644 --- a/backends/test/suite/operators/test_embedding.py +++ b/backends/test/suite/operators/test_embedding.py @@ -7,13 +7,8 @@ # pyre-unsafe import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -31,59 +26,52 @@ def __init__( def forward(self, x): return self.embedding(x) - -@operator_test -class Embedding(OperatorTest): # Note that generate_random_test_inputs is used to avoid the tester # generating random inputs that are out of range of the embedding size. # The tester's random input generation is not smart enough to know that # the index inputs must be within a certain range. - @dtype_test - def test_embedding_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - Model().to(dtype), - (torch.randint(0, 10, (2, 8), dtype=torch.long),), - flow, - generate_random_test_inputs=False, - ) - def test_embedding_sizes(self, flow: TestFlow) -> None: - self._test_op( - Model(num_embeddings=5, embedding_dim=3), - (torch.randint(0, 5, (2, 8), dtype=torch.long),), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - Model(num_embeddings=100, embedding_dim=10), - (torch.randint(0, 100, (2, 8), dtype=torch.long),), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - Model(num_embeddings=1000, embedding_dim=50), - (torch.randint(0, 1000, (2, 4), dtype=torch.long),), - flow, - generate_random_test_inputs=False, - ) +@parameterize_by_dtype +def test_embedding_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + Model().to(dtype), + (torch.randint(0, 10, (2, 8), dtype=torch.long),), + generate_random_test_inputs=False, + ) - def test_embedding_batch_dim(self, flow: TestFlow) -> None: - self._test_op( - Model(), - (torch.randint(0, 100, (5,), dtype=torch.long),), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - Model(), - (torch.randint(0, 100, (2, 8), dtype=torch.long),), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - Model(), - (torch.randint(0, 100, (2, 3, 4), dtype=torch.long),), - flow, - generate_random_test_inputs=False, - ) + +def test_embedding_sizes(test_runner) -> None: + test_runner.lower_and_run_model( + Model(num_embeddings=5, embedding_dim=3), + (torch.randint(0, 5, (2, 8), dtype=torch.long),), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + Model(num_embeddings=100, embedding_dim=10), + (torch.randint(0, 100, (2, 8), dtype=torch.long),), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + Model(num_embeddings=1000, embedding_dim=50), + (torch.randint(0, 1000, (2, 4), dtype=torch.long),), + generate_random_test_inputs=False, + ) + + +def test_embedding_batch_dim(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + (torch.randint(0, 100, (5,), dtype=torch.long),), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + Model(), + (torch.randint(0, 100, (2, 8), dtype=torch.long),), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + Model(), + (torch.randint(0, 100, (2, 3, 4), dtype=torch.long),), + generate_random_test_inputs=False, + ) diff --git a/backends/test/suite/operators/test_embedding_bag.py b/backends/test/suite/operators/test_embedding_bag.py index 2659bdd9b0b..2bdd08fef14 100644 --- a/backends/test/suite/operators/test_embedding_bag.py +++ b/backends/test/suite/operators/test_embedding_bag.py @@ -7,13 +7,8 @@ # pyre-unsafe import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -35,84 +30,77 @@ def __init__( def forward(self, x, offsets=None): return self.embedding_bag(x, offsets) - -@operator_test -class EmbeddingBag(OperatorTest): # Note that generate_random_test_inputs is used to avoid the tester # generating random inputs that are out of range of the embedding size. # The tester's random input generation is not smart enough to know that # the index inputs must be within a certain range. - @dtype_test - def test_embedding_bag_dtype(self, flow: TestFlow, dtype) -> None: - indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) - offsets = torch.tensor([0, 4], dtype=torch.long) - self._test_op( - Model().to(dtype), - (indices, offsets), - flow, - generate_random_test_inputs=False, - ) - - def test_embedding_bag_sizes(self, flow: TestFlow) -> None: - indices = torch.tensor([1, 2, 3, 1], dtype=torch.long) - offsets = torch.tensor([0, 2], dtype=torch.long) - - self._test_op( - Model(num_embeddings=5, embedding_dim=3), - (indices, offsets), - flow, - generate_random_test_inputs=False, - ) - - indices = torch.tensor([5, 20, 10, 43, 7], dtype=torch.long) - offsets = torch.tensor([0, 2, 4], dtype=torch.long) - self._test_op( - Model(num_embeddings=50, embedding_dim=10), - (indices, offsets), - flow, - generate_random_test_inputs=False, - ) - - indices = torch.tensor([100, 200, 300, 400], dtype=torch.long) - offsets = torch.tensor([0, 2], dtype=torch.long) - self._test_op( - Model(num_embeddings=500, embedding_dim=20), - (indices, offsets), - flow, - generate_random_test_inputs=False, - ) - - def test_embedding_bag_modes(self, flow: TestFlow) -> None: - indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) - offsets = torch.tensor([0, 4], dtype=torch.long) - - self._test_op( - Model(mode="sum"), - (indices, offsets), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - Model(mode="mean"), - (indices, offsets), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - Model(mode="max"), - (indices, offsets), - flow, - generate_random_test_inputs=False, - ) - def test_embedding_bag_include_last_offset(self, flow: TestFlow) -> None: - indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) - offsets = torch.tensor([0, 4], dtype=torch.long) - - self._test_op( - Model(include_last_offset=True), - (indices, offsets), - flow, - generate_random_test_inputs=False, - ) +@parameterize_by_dtype +def test_embedding_bag_dtype(test_runner, dtype) -> None: + indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) + offsets = torch.tensor([0, 4], dtype=torch.long) + test_runner.lower_and_run_model( + Model().to(dtype), + (indices, offsets), + generate_random_test_inputs=False, + ) + + +def test_embedding_bag_sizes(test_runner) -> None: + indices = torch.tensor([1, 2, 3, 1], dtype=torch.long) + offsets = torch.tensor([0, 2], dtype=torch.long) + + test_runner.lower_and_run_model( + Model(num_embeddings=5, embedding_dim=3), + (indices, offsets), + generate_random_test_inputs=False, + ) + + indices = torch.tensor([5, 20, 10, 43, 7], dtype=torch.long) + offsets = torch.tensor([0, 2, 4], dtype=torch.long) + test_runner.lower_and_run_model( + Model(num_embeddings=50, embedding_dim=10), + (indices, offsets), + generate_random_test_inputs=False, + ) + + indices = torch.tensor([100, 200, 300, 400], dtype=torch.long) + offsets = torch.tensor([0, 2], dtype=torch.long) + test_runner.lower_and_run_model( + Model(num_embeddings=500, embedding_dim=20), + (indices, offsets), + generate_random_test_inputs=False, + ) + + +def test_embedding_bag_modes(test_runner) -> None: + indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) + offsets = torch.tensor([0, 4], dtype=torch.long) + + test_runner.lower_and_run_model( + Model(mode="sum"), + (indices, offsets), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + Model(mode="mean"), + (indices, offsets), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + Model(mode="max"), + (indices, offsets), + generate_random_test_inputs=False, + ) + + +def test_embedding_bag_include_last_offset(test_runner) -> None: + indices = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9], dtype=torch.long) + offsets = torch.tensor([0, 4], dtype=torch.long) + + test_runner.lower_and_run_model( + Model(include_last_offset=True), + (indices, offsets), + generate_random_test_inputs=False, + ) diff --git a/backends/test/suite/operators/test_exp.py b/backends/test/suite/operators/test_exp.py index 54196d81ba9..633fd8d0fe3 100644 --- a/backends/test/suite/operators/test_exp.py +++ b/backends/test/suite/operators/test_exp.py @@ -10,13 +10,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class ExpModel(torch.nn.Module): @@ -27,39 +22,39 @@ def forward(self, x): return torch.exp(x) -@operator_test -class TestExp(OperatorTest): - @dtype_test - def test_exp_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes - model = ExpModel().to(dtype) - # Use smaller range to avoid overflow - self._test_op(model, (torch.rand(10, 10).to(dtype) * 4 - 2,), flow) +@parameterize_by_dtype +def test_exp_dtype(test_runner, dtype) -> None: + # Test with different dtypes + model = ExpModel().to(dtype) + # Use smaller range to avoid overflow + test_runner.lower_and_run_model(model, (torch.rand(10, 10).to(dtype) * 4 - 2,)) - def test_exp_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes - # 1D tensor - self._test_op(ExpModel(), (torch.randn(20),), flow) +def test_exp_shapes(test_runner) -> None: + # Test with different tensor shapes - # 2D tensor - self._test_op(ExpModel(), (torch.randn(5, 10),), flow) + # 1D tensor + test_runner.lower_and_run_model(ExpModel(), (torch.randn(20),)) - # 3D tensor - self._test_op(ExpModel(), (torch.randn(3, 4, 5),), flow) + # 2D tensor + test_runner.lower_and_run_model(ExpModel(), (torch.randn(5, 10),)) - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_exp_edge_cases(self, flow: TestFlow) -> None: - # Test edge cases + # 3D tensor + test_runner.lower_and_run_model(ExpModel(), (torch.randn(3, 4, 5),)) - # Tensor with infinity - x = torch.tensor([float("inf"), float("-inf"), 1.0, -1.0]) - self._test_op(ExpModel(), (x,), flow, generate_random_test_inputs=False) - # Tensor with NaN - x = torch.tensor([float("nan"), 1.0, -1.0]) - self._test_op(ExpModel(), (x,), flow, generate_random_test_inputs=False) +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_exp_edge_cases(test_runner) -> None: + # Test edge cases - # Overflow - x = torch.tensor([10e10]) - self._test_op(ExpModel(), (x,), flow, generate_random_test_inputs=False) + # Tensor with infinity + x = torch.tensor([float("inf"), float("-inf"), 1.0, -1.0]) + test_runner.lower_and_run_model(ExpModel(), (x,), generate_random_test_inputs=False) + + # Tensor with NaN + x = torch.tensor([float("nan"), 1.0, -1.0]) + test_runner.lower_and_run_model(ExpModel(), (x,), generate_random_test_inputs=False) + + # Overflow + x = torch.tensor([10e10]) + test_runner.lower_and_run_model(ExpModel(), (x,), generate_random_test_inputs=False) diff --git a/backends/test/suite/operators/test_expand.py b/backends/test/suite/operators/test_expand.py index 72fab150f62..e02445f2198 100644 --- a/backends/test/suite/operators/test_expand.py +++ b/backends/test/suite/operators/test_expand.py @@ -9,13 +9,8 @@ from typing import List import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class ExpandModel(torch.nn.Module): @@ -27,96 +22,84 @@ def forward(self, x): return x.expand(self.shape) -@operator_test -class Expand(OperatorTest): - @dtype_test - def test_expand_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - ExpandModel(shape=[8, 32]), - (torch.rand(1, 32).to(dtype),), - flow, - ) - - def test_expand_dimensions(self, flow: TestFlow) -> None: - self._test_op( - ExpandModel(shape=[8, 32]), - (torch.randn(1, 32),), - flow, - ) - - self._test_op( - ExpandModel(shape=[16, 20]), - (torch.randn(1, 1),), - flow, - ) - - self._test_op( - ExpandModel(shape=[4, 1, 32]), - (torch.randn(1, 32),), - flow, - ) - - self._test_op( - ExpandModel(shape=[8, 4, 16]), - (torch.randn(8, 1, 16),), - flow, - ) - - self._test_op( - ExpandModel(shape=[6, 16, 8]), - (torch.randn(6, 16, 1),), - flow, - ) - - def test_expand_keep_original_size(self, flow: TestFlow) -> None: - self._test_op( - ExpandModel(shape=[8, -1]), - (torch.randn(1, 32),), - flow, - ) - - self._test_op( - ExpandModel(shape=[-1, 32]), - (torch.randn(4, 1),), - flow, - ) - - self._test_op( - ExpandModel(shape=[-1, 16, -1]), - (torch.randn(4, 1, 8),), - flow, - ) - - def test_expand_rank_increase(self, flow: TestFlow) -> None: - # Test expanding 2D tensor to 3D - self._test_op( - ExpandModel(shape=[6, 8, 16]), - (torch.randn(8, 16),), - flow, - ) - - # Test expanding 2D tensor to 4D - self._test_op( - ExpandModel(shape=[3, 4, 8, 16]), - (torch.randn(8, 16),), - flow, - ) - - def test_expand_singleton_dimensions(self, flow: TestFlow) -> None: - self._test_op( - ExpandModel(shape=[512]), - (torch.randn(1),), - flow, - ) - - self._test_op( - ExpandModel(shape=[16, 20]), - (torch.randn(1, 1),), - flow, - ) - - self._test_op( - ExpandModel(shape=[8, 32]), - (torch.randn(32),), - flow, - ) +@parameterize_by_dtype +def test_expand_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + ExpandModel(shape=[8, 32]), + (torch.rand(1, 32).to(dtype),), + ) + + +def test_expand_dimensions(test_runner) -> None: + test_runner.lower_and_run_model( + ExpandModel(shape=[8, 32]), + (torch.randn(1, 32),), + ) + + test_runner.lower_and_run_model( + ExpandModel(shape=[16, 20]), + (torch.randn(1, 1),), + ) + + test_runner.lower_and_run_model( + ExpandModel(shape=[4, 1, 32]), + (torch.randn(1, 32),), + ) + + test_runner.lower_and_run_model( + ExpandModel(shape=[8, 4, 16]), + (torch.randn(8, 1, 16),), + ) + + test_runner.lower_and_run_model( + ExpandModel(shape=[6, 16, 8]), + (torch.randn(6, 16, 1),), + ) + + +def test_expand_keep_original_size(test_runner) -> None: + test_runner.lower_and_run_model( + ExpandModel(shape=[8, -1]), + (torch.randn(1, 32),), + ) + + test_runner.lower_and_run_model( + ExpandModel(shape=[-1, 32]), + (torch.randn(4, 1),), + ) + + test_runner.lower_and_run_model( + ExpandModel(shape=[-1, 16, -1]), + (torch.randn(4, 1, 8),), + ) + + +def test_expand_rank_increase(test_runner) -> None: + # Test expanding 2D tensor to 3D + test_runner.lower_and_run_model( + ExpandModel(shape=[6, 8, 16]), + (torch.randn(8, 16),), + ) + + # Test expanding 2D tensor to 4D + test_runner.lower_and_run_model( + ExpandModel(shape=[3, 4, 8, 16]), + (torch.randn(8, 16),), + ) + + +def test_expand_singleton_dimensions(test_runner) -> None: + test_runner.lower_and_run_model( + ExpandModel(shape=[512]), + (torch.randn(1),), + ) + + test_runner.lower_and_run_model( + ExpandModel(shape=[16, 20]), + (torch.randn(1, 1),), + ) + + test_runner.lower_and_run_model( + ExpandModel(shape=[8, 32]), + (torch.randn(32),), + ) diff --git a/backends/test/suite/operators/test_floor.py b/backends/test/suite/operators/test_floor.py index bce9f0b4d34..a035356a21a 100644 --- a/backends/test/suite/operators/test_floor.py +++ b/backends/test/suite/operators/test_floor.py @@ -10,13 +10,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class FloorModel(torch.nn.Module): @@ -24,34 +19,38 @@ def forward(self, x): return torch.floor(x) -@operator_test -class TestFloor(OperatorTest): - @dtype_test - def test_floor_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes - model = FloorModel().to(dtype) - self._test_op(model, (torch.rand(10, 10).to(dtype) * 2 - 1,), flow) +@parameterize_by_dtype +def test_floor_dtype(test_runner, dtype) -> None: + # Test with different dtypes + model = FloorModel().to(dtype) + test_runner.lower_and_run_model(model, (torch.rand(10, 10).to(dtype) * 2 - 1,)) - def test_floor_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes - # 1D tensor - self._test_op(FloorModel(), (torch.randn(20),), flow) +def test_floor_shapes(test_runner) -> None: + # Test with different tensor shapes - # 2D tensor - self._test_op(FloorModel(), (torch.randn(5, 10),), flow) + # 1D tensor + test_runner.lower_and_run_model(FloorModel(), (torch.randn(20),)) - # 3D tensor - self._test_op(FloorModel(), (torch.randn(3, 4, 5),), flow) + # 2D tensor + test_runner.lower_and_run_model(FloorModel(), (torch.randn(5, 10),)) - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_floor_edge_cases(self, flow: TestFlow) -> None: - # Test edge cases + # 3D tensor + test_runner.lower_and_run_model(FloorModel(), (torch.randn(3, 4, 5),)) - # Tensor with infinity - x = torch.tensor([float("inf"), float("-inf"), 1.0, -1.0]) - self._test_op(FloorModel(), (x,), flow, generate_random_test_inputs=False) - # Tensor with NaN - x = torch.tensor([float("nan"), 1.0, -1.0]) - self._test_op(FloorModel(), (x,), flow, generate_random_test_inputs=False) +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_floor_edge_cases(test_runner) -> None: + # Test edge cases + + # Tensor with infinity + x = torch.tensor([float("inf"), float("-inf"), 1.0, -1.0]) + test_runner.lower_and_run_model( + FloorModel(), (x,), generate_random_test_inputs=False + ) + + # Tensor with NaN + x = torch.tensor([float("nan"), 1.0, -1.0]) + test_runner.lower_and_run_model( + FloorModel(), (x,), generate_random_test_inputs=False + ) diff --git a/backends/test/suite/operators/test_floor_divide.py b/backends/test/suite/operators/test_floor_divide.py index c14151b6181..7c370b9ed5b 100644 --- a/backends/test/suite/operators/test_floor_divide.py +++ b/backends/test/suite/operators/test_floor_divide.py @@ -9,13 +9,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class FloorDivideModel(torch.nn.Module): @@ -26,191 +21,194 @@ def forward(self, x, y): return torch.floor_divide(x, y) -@operator_test -class TestFloorDivide(OperatorTest): - @dtype_test - def test_floor_divide_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes - model = FloorDivideModel().to(dtype) - # Use values that won't cause division by zero - x = torch.randint(-100, 100, (10, 10)).to(dtype) - y = torch.full_like(x, 2) # Divisor of 2 - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) - - def test_floor_divide_scalar_divisors(self, flow: TestFlow) -> None: - # Test with different scalar divisors as tensors - - # Positive divisor - x = torch.randint(-100, 100, (10, 10)) - y = torch.full_like(x, 3) # Divisor of 3 - self._test_op( - FloorDivideModel(), (x, y), flow, generate_random_test_inputs=False - ) - - # Negative divisor - x = torch.randint(-100, 100, (10, 10)) - y = torch.full_like(x, -2) # Divisor of -2 - self._test_op( - FloorDivideModel(), (x, y), flow, generate_random_test_inputs=False - ) - - # Fractional divisor - x = torch.randint(-100, 100, (10, 10)).float() - y = torch.full_like(x, 2.5) # Divisor of 2.5 - self._test_op( - FloorDivideModel(), (x, y), flow, generate_random_test_inputs=False - ) - - # Large divisor - x = torch.randint(-1000, 1000, (10, 10)) - y = torch.full_like(x, 100) # Divisor of 100 - self._test_op( - FloorDivideModel(), (x, y), flow, generate_random_test_inputs=False - ) - - # Small divisor - x = torch.randint(-100, 100, (10, 10)).float() - y = torch.full_like(x, 0.5) # Divisor of 0.5 - self._test_op( - FloorDivideModel(), (x, y), flow, generate_random_test_inputs=False - ) - - def test_floor_divide_tensor_divisors(self, flow: TestFlow) -> None: - # Test with tensor divisors - - # Constant divisor tensor - x = torch.randint(-100, 100, (10, 10)) - y = torch.full_like(x, 2) # All elements are 2 - self._test_op( - FloorDivideModel(), (x, y), flow, generate_random_test_inputs=False - ) - - # Random divisor tensor (non-zero) - x = torch.randint(-100, 100, (10, 10)) - y = torch.randint(1, 10, (10, 10)) # Positive divisors - self._test_op( - FloorDivideModel(), (x, y), flow, generate_random_test_inputs=False - ) - - # Mixed positive and negative divisors - x = torch.randint(-100, 100, (10, 10)) - y = torch.randint(-10, 10, (10, 10)) - # Replace zeros to avoid division by zero - y[y == 0] = 1 - self._test_op( - FloorDivideModel(), (x, y), flow, generate_random_test_inputs=False - ) - - # Broadcasting: scalar dividend, tensor divisor - x = torch.tensor([10]) - y = torch.arange(1, 5) # [1, 2, 3, 4] - self._test_op( - FloorDivideModel(), (x, y), flow, generate_random_test_inputs=False - ) - - # Broadcasting: tensor dividend, scalar divisor - x = torch.arange(-10, 10) - y = torch.tensor([2]) - self._test_op( - FloorDivideModel(), (x, y), flow, generate_random_test_inputs=False - ) - - def test_floor_divide_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes - model = FloorDivideModel() - - # 1D tensor - x = torch.randint(-100, 100, (20,)) - y = torch.full_like(x, 2) # Divisor of 2 - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) - - # 2D tensor - x = torch.randint(-100, 100, (5, 10)) - y = torch.full_like(x, 2) # Divisor of 2 - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) - - # 3D tensor - x = torch.randint(-100, 100, (3, 4, 5)) - y = torch.full_like(x, 2) # Divisor of 2 - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) - - # 4D tensor - x = torch.randint(-100, 100, (2, 3, 4, 5)) - y = torch.full_like(x, 2) # Divisor of 2 - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) - - # 5D tensor - x = torch.randint(-100, 100, (2, 2, 3, 4, 5)) - y = torch.full_like(x, 2) # Divisor of 2 - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) - - def test_floor_divide_values(self, flow: TestFlow) -> None: - # Test with different value ranges - model = FloorDivideModel() - - # Test with specific dividend values - x = torch.tensor([-7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7]) - - # Divide by 2 - y = torch.tensor([2]).expand_as(x).clone() - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) - - # Divide by -2 - y = torch.tensor([-2]).expand_as(x).clone() - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) - - # Divide by 3 - y = torch.tensor([3]).expand_as(x).clone() - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) - - # Divide by -3 - y = torch.tensor([-3]).expand_as(x).clone() - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) - - # Test with floating point values - x = torch.tensor( - [-3.8, -3.5, -3.2, -0.8, -0.5, -0.2, 0.0, 0.2, 0.5, 0.8, 3.2, 3.5, 3.8] - ) - - # Divide by 2.0 - y = torch.tensor([2.0]).expand_as(x).clone() - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) - - # Divide by -2.0 - y = torch.tensor([-2.0]).expand_as(x).clone() - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) - - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_floor_divide_edge_cases(self, flow: TestFlow) -> None: - # Test edge cases - model = FloorDivideModel() - - # Zero dividend - x = torch.zeros(10) - y = torch.full_like(x, 2) - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) - - # Division with remainder - x = torch.tensor([1, 3, 5, 7, 9]) - y = torch.full_like(x, 2) - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) - - # Tensor with infinity - x = torch.tensor([float("inf"), float("-inf"), 10.0, -10.0]) - y = torch.full_like(x, 2) - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) - - # Tensor with NaN - x = torch.tensor([float("nan"), 10.0, -10.0]) - y = torch.full_like(x, 2) - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) - - # Very large values - x = torch.tensor([1e10, -1e10]) - y = torch.full_like(x, 3) - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) - - # Very small values - x = torch.tensor([1e-10, -1e-10]) - y = torch.full_like(x, 2) - self._test_op(model, (x, y), flow, generate_random_test_inputs=False) +@parameterize_by_dtype +def test_floor_divide_dtype(test_runner, dtype) -> None: + # Test with different dtypes + model = FloorDivideModel().to(dtype) + # Use values that won't cause division by zero + x = torch.randint(-100, 100, (10, 10)).to(dtype) + y = torch.full_like(x, 2) # Divisor of 2 + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) + + +def test_floor_divide_scalar_divisors(test_runner) -> None: + # Test with different scalar divisors as tensors + + # Positive divisor + x = torch.randint(-100, 100, (10, 10)) + y = torch.full_like(x, 3) # Divisor of 3 + test_runner.lower_and_run_model( + FloorDivideModel(), (x, y), generate_random_test_inputs=False + ) + + # Negative divisor + x = torch.randint(-100, 100, (10, 10)) + y = torch.full_like(x, -2) # Divisor of -2 + test_runner.lower_and_run_model( + FloorDivideModel(), (x, y), generate_random_test_inputs=False + ) + + # Fractional divisor + x = torch.randint(-100, 100, (10, 10)).float() + y = torch.full_like(x, 2.5) # Divisor of 2.5 + test_runner.lower_and_run_model( + FloorDivideModel(), (x, y), generate_random_test_inputs=False + ) + + # Large divisor + x = torch.randint(-1000, 1000, (10, 10)) + y = torch.full_like(x, 100) # Divisor of 100 + test_runner.lower_and_run_model( + FloorDivideModel(), (x, y), generate_random_test_inputs=False + ) + + # Small divisor + x = torch.randint(-100, 100, (10, 10)).float() + y = torch.full_like(x, 0.5) # Divisor of 0.5 + test_runner.lower_and_run_model( + FloorDivideModel(), (x, y), generate_random_test_inputs=False + ) + + +def test_floor_divide_tensor_divisors(test_runner) -> None: + # Test with tensor divisors + + # Constant divisor tensor + x = torch.randint(-100, 100, (10, 10)) + y = torch.full_like(x, 2) # All elements are 2 + test_runner.lower_and_run_model( + FloorDivideModel(), (x, y), generate_random_test_inputs=False + ) + + # Random divisor tensor (non-zero) + x = torch.randint(-100, 100, (10, 10)) + y = torch.randint(1, 10, (10, 10)) # Positive divisors + test_runner.lower_and_run_model( + FloorDivideModel(), (x, y), generate_random_test_inputs=False + ) + + # Mixed positive and negative divisors + x = torch.randint(-100, 100, (10, 10)) + y = torch.randint(-10, 10, (10, 10)) + # Replace zeros to avoid division by zero + y[y == 0] = 1 + test_runner.lower_and_run_model( + FloorDivideModel(), (x, y), generate_random_test_inputs=False + ) + + # Broadcasting: scalar dividend, tensor divisor + x = torch.tensor([10]) + y = torch.arange(1, 5) # [1, 2, 3, 4] + test_runner.lower_and_run_model( + FloorDivideModel(), (x, y), generate_random_test_inputs=False + ) + + # Broadcasting: tensor dividend, scalar divisor + x = torch.arange(-10, 10) + y = torch.tensor([2]) + test_runner.lower_and_run_model( + FloorDivideModel(), (x, y), generate_random_test_inputs=False + ) + + +def test_floor_divide_shapes(test_runner) -> None: + # Test with different tensor shapes + model = FloorDivideModel() + + # 1D tensor + x = torch.randint(-100, 100, (20,)) + y = torch.full_like(x, 2) # Divisor of 2 + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) + + # 2D tensor + x = torch.randint(-100, 100, (5, 10)) + y = torch.full_like(x, 2) # Divisor of 2 + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) + + # 3D tensor + x = torch.randint(-100, 100, (3, 4, 5)) + y = torch.full_like(x, 2) # Divisor of 2 + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) + + # 4D tensor + x = torch.randint(-100, 100, (2, 3, 4, 5)) + y = torch.full_like(x, 2) # Divisor of 2 + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) + + # 5D tensor + x = torch.randint(-100, 100, (2, 2, 3, 4, 5)) + y = torch.full_like(x, 2) # Divisor of 2 + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) + + +def test_floor_divide_values(test_runner) -> None: + # Test with different value ranges + model = FloorDivideModel() + + # Test with specific dividend values + x = torch.tensor([-7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7]) + + # Divide by 2 + y = torch.tensor([2]).expand_as(x).clone() + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) + + # Divide by -2 + y = torch.tensor([-2]).expand_as(x).clone() + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) + + # Divide by 3 + y = torch.tensor([3]).expand_as(x).clone() + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) + + # Divide by -3 + y = torch.tensor([-3]).expand_as(x).clone() + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) + + # Test with floating point values + x = torch.tensor( + [-3.8, -3.5, -3.2, -0.8, -0.5, -0.2, 0.0, 0.2, 0.5, 0.8, 3.2, 3.5, 3.8] + ) + + # Divide by 2.0 + y = torch.tensor([2.0]).expand_as(x).clone() + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) + + # Divide by -2.0 + y = torch.tensor([-2.0]).expand_as(x).clone() + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) + + +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_floor_divide_edge_cases(test_runner) -> None: + # Test edge cases + model = FloorDivideModel() + + # Zero dividend + x = torch.zeros(10) + y = torch.full_like(x, 2) + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) + + # Division with remainder + x = torch.tensor([1, 3, 5, 7, 9]) + y = torch.full_like(x, 2) + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) + + # Tensor with infinity + x = torch.tensor([float("inf"), float("-inf"), 10.0, -10.0]) + y = torch.full_like(x, 2) + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) + + # Tensor with NaN + x = torch.tensor([float("nan"), 10.0, -10.0]) + y = torch.full_like(x, 2) + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) + + # Very large values + x = torch.tensor([1e10, -1e10]) + y = torch.full_like(x, 3) + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) + + # Very small values + x = torch.tensor([1e-10, -1e-10]) + y = torch.full_like(x, 2) + test_runner.lower_and_run_model(model, (x, y), generate_random_test_inputs=False) diff --git a/backends/test/suite/operators/test_gelu.py b/backends/test/suite/operators/test_gelu.py index 5c6a9f8f415..9b70d6bc30f 100644 --- a/backends/test/suite/operators/test_gelu.py +++ b/backends/test/suite/operators/test_gelu.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -26,27 +21,30 @@ def forward(self, x): return torch.nn.functional.gelu(x, approximate=self.approximate) -@operator_test -class TestGELU(OperatorTest): - @dtype_test - def test_gelu_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow) +@parameterize_by_dtype +def test_gelu_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),)) - def test_gelu_f32_single_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(20),), flow) - def test_gelu_f32_multi_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) +def test_gelu_f32_single_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(20),)) - def test_gelu_f32_tanh_approximation(self, flow: TestFlow) -> None: - self._test_op(Model(approximate="tanh"), (torch.randn(3, 4, 5),), flow) - def test_gelu_f32_boundary_values(self, flow: TestFlow) -> None: - # Test with specific values spanning negative and positive ranges - x = torch.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]) - self._test_op(Model(), (x,), flow) +def test_gelu_f32_multi_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(2, 3, 4, 5),)) - def test_gelu_f32_tanh_boundary_values(self, flow: TestFlow) -> None: - # Test tanh approximation with specific values - x = torch.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]) - self._test_op(Model(approximate="tanh"), (x,), flow) + +def test_gelu_f32_tanh_approximation(test_runner) -> None: + test_runner.lower_and_run_model(Model(approximate="tanh"), (torch.randn(3, 4, 5),)) + + +def test_gelu_f32_boundary_values(test_runner) -> None: + # Test with specific values spanning negative and positive ranges + x = torch.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]) + test_runner.lower_and_run_model(Model(), (x,)) + + +def test_gelu_f32_tanh_boundary_values(test_runner) -> None: + # Test tanh approximation with specific values + x = torch.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]) + test_runner.lower_and_run_model(Model(approximate="tanh"), (x,)) diff --git a/backends/test/suite/operators/test_glu.py b/backends/test/suite/operators/test_glu.py index cd19377c36b..f5d6745f144 100644 --- a/backends/test/suite/operators/test_glu.py +++ b/backends/test/suite/operators/test_glu.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -26,27 +21,29 @@ def forward(self, x): return torch.nn.functional.glu(x, dim=self.dim) -@operator_test -class TestGLU(OperatorTest): - @dtype_test - def test_glu_dtype(self, flow: TestFlow, dtype) -> None: - # Input must have even number of elements in the specified dimension - self._test_op(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow) +@parameterize_by_dtype +def test_glu_dtype(test_runner, dtype) -> None: + # Input must have even number of elements in the specified dimension + test_runner.lower_and_run_model(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),)) - def test_glu_f32_dim_last(self, flow: TestFlow) -> None: - # Default dim is -1 (last dimension) - self._test_op(Model(), (torch.randn(3, 4, 6),), flow) - def test_glu_f32_dim_first(self, flow: TestFlow) -> None: - # Test with dim=0 (first dimension) - self._test_op(Model(dim=0), (torch.randn(4, 3, 5),), flow) +def test_glu_f32_dim_last(test_runner) -> None: + # Default dim is -1 (last dimension) + test_runner.lower_and_run_model(Model(), (torch.randn(3, 4, 6),)) - def test_glu_f32_dim_middle(self, flow: TestFlow) -> None: - # Test with dim=1 (middle dimension) - self._test_op(Model(dim=1), (torch.randn(3, 8, 5),), flow) - def test_glu_f32_boundary_values(self, flow: TestFlow) -> None: - # Test with specific values spanning negative and positive ranges - # Input must have even number of elements in the specified dimension - x = torch.tensor([[-10.0, -5.0, -1.0, 0.0], [1.0, 5.0, 10.0, -2.0]]) - self._test_op(Model(dim=1), (x,), flow) +def test_glu_f32_dim_first(test_runner) -> None: + # Test with dim=0 (first dimension) + test_runner.lower_and_run_model(Model(dim=0), (torch.randn(4, 3, 5),)) + + +def test_glu_f32_dim_middle(test_runner) -> None: + # Test with dim=1 (middle dimension) + test_runner.lower_and_run_model(Model(dim=1), (torch.randn(3, 8, 5),)) + + +def test_glu_f32_boundary_values(test_runner) -> None: + # Test with specific values spanning negative and positive ranges + # Input must have even number of elements in the specified dimension + x = torch.tensor([[-10.0, -5.0, -1.0, 0.0], [1.0, 5.0, 10.0, -2.0]]) + test_runner.lower_and_run_model(Model(dim=1), (x,)) diff --git a/backends/test/suite/operators/test_hardsigmoid.py b/backends/test/suite/operators/test_hardsigmoid.py index 8ca254d4f61..a1a330aa1ab 100644 --- a/backends/test/suite/operators/test_hardsigmoid.py +++ b/backends/test/suite/operators/test_hardsigmoid.py @@ -10,13 +10,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -28,23 +23,25 @@ def forward(self, x): return torch.nn.functional.hardsigmoid(x, inplace=self.inplace) -@operator_test -class TestHardsigmoid(OperatorTest): - @dtype_test - def test_hardsigmoid_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op(Model(), ((torch.rand(2, 10)).to(dtype),), flow) +@parameterize_by_dtype +def test_hardsigmoid_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model(Model(), ((torch.rand(2, 10)).to(dtype),)) - def test_hardsigmoid_f32_single_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(20),), flow) - def test_hardsigmoid_f32_multi_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) +def test_hardsigmoid_f32_single_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(20),)) - @unittest.skip("In place activations aren't properly defunctionalized yet.") - def test_hardsigmoid_f32_inplace(self, flow: TestFlow) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) - def test_hardsigmoid_f32_boundary_values(self, flow: TestFlow) -> None: - # Test with values that span the hardsigmoid's piecewise regions - x = torch.tensor([-5.0, -3.0, -1.0, 0.0, 1.0, 3.0, 5.0]) - self._test_op(Model(), (x,), flow) +def test_hardsigmoid_f32_multi_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(2, 3, 4, 5),)) + + +@unittest.skip("In place activations aren't properly defunctionalized yet.") +def test_hardsigmoid_f32_inplace(test_runner) -> None: + test_runner.lower_and_run_model(Model(inplace=True), (torch.randn(3, 4, 5),)) + + +def test_hardsigmoid_f32_boundary_values(test_runner) -> None: + # Test with values that span the hardsigmoid's piecewise regions + x = torch.tensor([-5.0, -3.0, -1.0, 0.0, 1.0, 3.0, 5.0]) + test_runner.lower_and_run_model(Model(), (x,)) diff --git a/backends/test/suite/operators/test_hardswish.py b/backends/test/suite/operators/test_hardswish.py index a93516542c8..6dc990c78b3 100644 --- a/backends/test/suite/operators/test_hardswish.py +++ b/backends/test/suite/operators/test_hardswish.py @@ -10,13 +10,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -28,23 +23,25 @@ def forward(self, x): return torch.nn.functional.hardswish(x, inplace=self.inplace) -@operator_test -class TestHardswish(OperatorTest): - @dtype_test - def test_hardswish_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op(Model(), ((torch.rand(2, 10)).to(dtype),), flow) +@parameterize_by_dtype +def test_hardswish_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model(Model(), ((torch.rand(2, 10)).to(dtype),)) - def test_hardswish_f32_single_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(20),), flow) - def test_hardswish_f32_multi_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) +def test_hardswish_f32_single_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(20),)) - @unittest.skip("In place activations aren't properly defunctionalized yet.") - def test_hardswish_f32_inplace(self, flow: TestFlow) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) - def test_hardswish_f32_boundary_values(self, flow: TestFlow) -> None: - # Test with values that span the hardswish's piecewise regions - x = torch.tensor([-5.0, -3.0, -1.0, 0.0, 1.0, 3.0, 5.0]) - self._test_op(Model(), (x,), flow) +def test_hardswish_f32_multi_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(2, 3, 4, 5),)) + + +@unittest.skip("In place activations aren't properly defunctionalized yet.") +def test_hardswish_f32_inplace(test_runner) -> None: + test_runner.lower_and_run_model(Model(inplace=True), (torch.randn(3, 4, 5),)) + + +def test_hardswish_f32_boundary_values(test_runner) -> None: + # Test with values that span the hardswish's piecewise regions + x = torch.tensor([-5.0, -3.0, -1.0, 0.0, 1.0, 3.0, 5.0]) + test_runner.lower_and_run_model(Model(), (x,)) diff --git a/backends/test/suite/operators/test_hardtanh.py b/backends/test/suite/operators/test_hardtanh.py index 7520c3faeae..214e492cece 100644 --- a/backends/test/suite/operators/test_hardtanh.py +++ b/backends/test/suite/operators/test_hardtanh.py @@ -10,13 +10,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -32,26 +27,31 @@ def forward(self, x): ) -@operator_test -class TestHardtanh(OperatorTest): - @dtype_test - def test_hardtanh_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op(Model(), ((torch.rand(2, 10) * 4 - 2).to(dtype),), flow) +@parameterize_by_dtype +def test_hardtanh_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model(Model(), ((torch.rand(2, 10) * 4 - 2).to(dtype),)) - def test_hardtanh_f32_single_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(20),), flow) - def test_hardtanh_f32_multi_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) +def test_hardtanh_f32_single_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(20),)) - def test_hardtanh_f32_custom_range(self, flow: TestFlow) -> None: - self._test_op(Model(min_val=-2.0, max_val=2.0), (torch.randn(3, 4, 5),), flow) - @unittest.skip("In place activations aren't properly defunctionalized yet.") - def test_hardtanh_f32_inplace(self, flow: TestFlow) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) +def test_hardtanh_f32_multi_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(2, 3, 4, 5),)) - def test_hardtanh_f32_boundary_values(self, flow: TestFlow) -> None: - # Test with values that span the hardtanh's piecewise regions - x = torch.tensor([-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]) - self._test_op(Model(), (x,), flow) + +def test_hardtanh_f32_custom_range(test_runner) -> None: + test_runner.lower_and_run_model( + Model(min_val=-2.0, max_val=2.0), (torch.randn(3, 4, 5),) + ) + + +@unittest.skip("In place activations aren't properly defunctionalized yet.") +def test_hardtanh_f32_inplace(test_runner) -> None: + test_runner.lower_and_run_model(Model(inplace=True), (torch.randn(3, 4, 5),)) + + +def test_hardtanh_f32_boundary_values(test_runner) -> None: + # Test with values that span the hardtanh's piecewise regions + x = torch.tensor([-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]) + test_runner.lower_and_run_model(Model(), (x,)) diff --git a/backends/test/suite/operators/test_index_put.py b/backends/test/suite/operators/test_index_put.py index b5333b40984..31194cb62a7 100644 --- a/backends/test/suite/operators/test_index_put.py +++ b/backends/test/suite/operators/test_index_put.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class IndexPutInPlaceModel(torch.nn.Module): @@ -40,416 +35,385 @@ def forward(self, x, indices, values): return torch.index_put(x, indices, values, self.accumulate) -@operator_test -class IndexPut(OperatorTest): - @dtype_test - def test_index_put_in_place_dtype(self, flow: TestFlow, dtype) -> None: - indices = (torch.tensor([0, 2]),) - values = torch.tensor([10.0, 20.0]).to(dtype) - self._test_op( - IndexPutInPlaceModel(), - ((torch.rand(5, 2) * 100).to(dtype), indices, values), - flow, - generate_random_test_inputs=False, - ) - - @dtype_test - def test_index_put_dtype(self, flow: TestFlow, dtype) -> None: - indices = (torch.tensor([0, 2]),) - values = torch.tensor([10.0, 20.0]).to(dtype) - self._test_op( - IndexPutModel(), - ((torch.rand(5, 2) * 100).to(dtype), indices, values), - flow, - generate_random_test_inputs=False, - ) - - def test_index_put_in_place_accumulate(self, flow: TestFlow) -> None: - indices = (torch.tensor([0, 2]),) - values = torch.tensor([10.0, 20.0]) - self._test_op( - IndexPutInPlaceModel(accumulate=False), - (torch.ones(5, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - indices = (torch.tensor([0, 2]),) - values = torch.tensor([10.0, 20.0]) - self._test_op( - IndexPutInPlaceModel(accumulate=True), - (torch.ones(5, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - def test_index_put_accumulate(self, flow: TestFlow) -> None: - indices = (torch.tensor([0, 2]),) - values = torch.tensor([10.0, 20.0]) - self._test_op( - IndexPutModel(accumulate=False), - (torch.ones(5, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - indices = (torch.tensor([0, 2]),) - values = torch.tensor([10.0, 20.0]) - self._test_op( - IndexPutModel(accumulate=True), - (torch.ones(5, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - def test_index_put_in_place_shapes(self, flow: TestFlow) -> None: - indices = (torch.tensor([0, 2]),) - values = torch.tensor([10.0, 20.0]) - self._test_op( - IndexPutInPlaceModel(), - (torch.randn(5), indices, values), - flow, - generate_random_test_inputs=False, - ) - - indices = (torch.tensor([0, 2]), torch.tensor([1, 1])) - values = torch.tensor([10.0, 20.0]) - self._test_op( - IndexPutInPlaceModel(), - (torch.randn(5, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - indices = (torch.tensor([0, 2]), torch.tensor([1, 1]), torch.tensor([0, 1])) - values = torch.tensor([10.0, 20.0]) - self._test_op( - IndexPutInPlaceModel(), - (torch.randn(5, 3, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - indices = ( - torch.tensor([0, 2]), - torch.tensor([1, 1]), - torch.tensor([0, 1]), - torch.tensor([2, 3]), - ) - values = torch.tensor( - [ - 10.0, - ] - ) - self._test_op( - IndexPutInPlaceModel(), - (torch.randn(5, 3, 2, 4), indices, values), - flow, - generate_random_test_inputs=False, - ) - - def test_index_put_shapes(self, flow: TestFlow) -> None: - indices = (torch.tensor([0, 2]),) - values = torch.tensor([10.0, 20.0]) - self._test_op( - IndexPutModel(), - (torch.randn(5), indices, values), - flow, - generate_random_test_inputs=False, - ) - - indices = (torch.tensor([0, 2]), torch.tensor([1, 1])) - values = torch.tensor([10.0, 20.0]) - self._test_op( - IndexPutModel(), - (torch.randn(5, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - indices = (torch.tensor([0, 2]), torch.tensor([1, 1]), torch.tensor([0, 1])) - values = torch.tensor([10.0, 20.0]) - self._test_op( - IndexPutModel(), - (torch.randn(5, 3, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - indices = ( - torch.tensor([0, 2]), - torch.tensor([1, 1]), - torch.tensor([0, 1]), - torch.tensor([2, 3]), - ) - values = torch.tensor( - [ - 10.0, - ] - ) - self._test_op( - IndexPutModel(), - (torch.randn(5, 3, 2, 4), indices, values), - flow, - generate_random_test_inputs=False, - ) - - def test_index_put_in_place_indices(self, flow: TestFlow) -> None: - indices = (torch.tensor([2]),) - values = torch.tensor([10.0]) - self._test_op( - IndexPutInPlaceModel(), - (torch.randn(5, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - indices = (torch.tensor([0, 2, 4]),) - values = torch.tensor([10.0, 20.0, 30.0]) - self._test_op( - IndexPutInPlaceModel(), - (torch.randn(5, 3), indices, values), - flow, - generate_random_test_inputs=False, - ) - - indices = (torch.tensor([1, 1, 3, 3]),) - values = torch.tensor([10.0, 20.0, 30.0, 40.0]) - self._test_op( - IndexPutInPlaceModel(accumulate=True), - (torch.randn(5), indices, values), - flow, - generate_random_test_inputs=False, - ) - - def test_index_put_indices(self, flow: TestFlow) -> None: - indices = (torch.tensor([2]),) - values = torch.tensor([10.0]) - self._test_op( - IndexPutModel(), - (torch.randn(5, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - indices = (torch.tensor([0, 2, 4]),) - values = torch.tensor([10.0, 20.0, 30.0]) - self._test_op( - IndexPutModel(), - (torch.randn(5, 3), indices, values), - flow, - generate_random_test_inputs=False, - ) - - indices = (torch.tensor([1, 1, 3, 3]),) - values = torch.tensor([10.0, 20.0, 30.0, 40.0]) - self._test_op( - IndexPutModel(accumulate=True), - (torch.randn(5), indices, values), - flow, - generate_random_test_inputs=False, - ) - - def test_index_put_in_place_broadcasting(self, flow: TestFlow) -> None: - # Test scalar broadcasting - single value to multiple positions - indices = (torch.tensor([0, 2, 4]),) - values = torch.tensor([42.0]) - self._test_op( - IndexPutInPlaceModel(), - (torch.randn(5, 3), indices, values), - flow, - generate_random_test_inputs=False, - ) - - # Test 1D broadcasting to 2D indexed positions - indices = (torch.tensor([0, 1]), torch.tensor([1, 2])) - values = torch.tensor([10.0, 20.0]) # 1D tensor - self._test_op( - IndexPutInPlaceModel(), - (torch.randn(3, 4), indices, values), - flow, - generate_random_test_inputs=False, - ) - - # Test broadcasting with compatible shapes - 1D to multiple 2D slices - indices = (torch.tensor([0, 2]),) - values = torch.tensor([5.0, 15.0]) # Will broadcast to (2, 3) shape - self._test_op( - IndexPutInPlaceModel(), - (torch.randn(4, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - # Test 2D values broadcasting to 3D indexed positions - indices = (torch.tensor([0, 1]),) - values = torch.tensor([[1.0, 2.0], [3.0, 4.0]]) # 2D tensor - self._test_op( - IndexPutInPlaceModel(), - (torch.randn(3, 2, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - # Test broadcasting with accumulate=True - indices = (torch.tensor([1, 1, 1]),) - values = torch.tensor([5.0]) # Scalar will be added 3 times to same position - self._test_op( - IndexPutInPlaceModel(accumulate=True), - (torch.ones(4, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - def test_index_put_broadcasting(self, flow: TestFlow) -> None: - # Test scalar broadcasting - single value to multiple positions - indices = (torch.tensor([0, 2, 4]),) - values = torch.tensor([42.0]) - self._test_op( - IndexPutModel(), - (torch.randn(5, 3), indices, values), - flow, - generate_random_test_inputs=False, - ) - - # Test 1D broadcasting to 2D indexed positions - indices = (torch.tensor([0, 1]), torch.tensor([1, 2])) - values = torch.tensor([10.0, 20.0]) # 1D tensor - self._test_op( - IndexPutModel(), - (torch.randn(3, 4), indices, values), - flow, - generate_random_test_inputs=False, - ) - - # Test broadcasting with compatible shapes - 1D to multiple 2D slices - indices = (torch.tensor([0, 2]),) - values = torch.tensor([5.0, 15.0]) # Will broadcast to (2, 3) shape - self._test_op( - IndexPutModel(), - (torch.randn(4, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - # Test 2D values broadcasting to 3D indexed positions - indices = (torch.tensor([0, 1]),) - values = torch.tensor([[1.0, 2.0], [3.0, 4.0]]) # 2D tensor - self._test_op( - IndexPutModel(), - (torch.randn(3, 2, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - # Test broadcasting with accumulate=True - indices = (torch.tensor([1, 1, 1]),) - values = torch.tensor([5.0]) # Scalar will be added 3 times to same position - self._test_op( - IndexPutModel(accumulate=True), - (torch.ones(4, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - def test_index_put_in_place_two_indices(self, flow: TestFlow) -> None: - # Test basic two-index tensor indexing - indices = (torch.tensor([0, 1, 2]), torch.tensor([1, 0, 2])) - values = torch.tensor([10.0, 20.0, 30.0]) - self._test_op( - IndexPutInPlaceModel(), - (torch.randn(4, 3), indices, values), - flow, - generate_random_test_inputs=False, - ) - - # Test two-index with different lengths (broadcasting) - indices = (torch.tensor([0, 2]), torch.tensor([1, 1])) - values = torch.tensor([15.0, 25.0]) - self._test_op( - IndexPutInPlaceModel(), - (torch.randn(3, 3), indices, values), - flow, - generate_random_test_inputs=False, - ) - - # Test two-index with repeated positions and accumulate=True - indices = (torch.tensor([1, 1, 2]), torch.tensor([0, 0, 1])) - values = torch.tensor([5.0, 10.0, 15.0]) - self._test_op( - IndexPutInPlaceModel(accumulate=True), - (torch.zeros(3, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - # Test two-index with repeated positions and accumulate=False - indices = (torch.tensor([1, 1, 2]), torch.tensor([0, 0, 1])) - values = torch.tensor([5.0, 10.0, 15.0]) - self._test_op( - IndexPutInPlaceModel(accumulate=False), - (torch.zeros(3, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - # Test two-index with index broadcast. - indices = (torch.tensor([1]), torch.tensor([0, 0, 1])) - values = torch.tensor([5.0, 10.0, 15.0]) - self._test_op( - IndexPutInPlaceModel(accumulate=False), - (torch.zeros(3, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - def test_index_put_two_indices(self, flow: TestFlow) -> None: - # Test basic two-index tensor indexing - indices = (torch.tensor([0, 1, 2]), torch.tensor([1, 0, 2])) - values = torch.tensor([10.0, 20.0, 30.0]) - self._test_op( - IndexPutModel(), - (torch.randn(4, 3), indices, values), - flow, - generate_random_test_inputs=False, - ) - - # Test two-index with different lengths (broadcasting) - indices = (torch.tensor([0, 2]), torch.tensor([1, 1])) - values = torch.tensor([15.0, 25.0]) - self._test_op( - IndexPutModel(), - (torch.randn(3, 3), indices, values), - flow, - generate_random_test_inputs=False, - ) - - # Test two-index with repeated positions and accumulate=True - indices = (torch.tensor([1, 1, 2]), torch.tensor([0, 0, 1])) - values = torch.tensor([5.0, 10.0, 15.0]) - self._test_op( - IndexPutModel(accumulate=True), - (torch.zeros(3, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - # Test two-index with repeated positions and accumulate=False - indices = (torch.tensor([1, 1, 2]), torch.tensor([0, 0, 1])) - values = torch.tensor([5.0, 10.0, 15.0]) - self._test_op( - IndexPutModel(accumulate=False), - (torch.zeros(3, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) - - # Test two-index with index broadcast. - indices = (torch.tensor([1]), torch.tensor([0, 0, 1])) - values = torch.tensor([5.0, 10.0, 15.0]) - self._test_op( - IndexPutModel(accumulate=False), - (torch.zeros(3, 2), indices, values), - flow, - generate_random_test_inputs=False, - ) +@parameterize_by_dtype +def test_index_put_in_place_dtype(test_runner, dtype) -> None: + indices = (torch.tensor([0, 2]),) + values = torch.tensor([10.0, 20.0]).to(dtype) + test_runner.lower_and_run_model( + IndexPutInPlaceModel(), + ((torch.rand(5, 2) * 100).to(dtype), indices, values), + generate_random_test_inputs=False, + ) + + +@parameterize_by_dtype +def test_index_put_dtype(test_runner, dtype) -> None: + indices = (torch.tensor([0, 2]),) + values = torch.tensor([10.0, 20.0]).to(dtype) + test_runner.lower_and_run_model( + IndexPutModel(), + ((torch.rand(5, 2) * 100).to(dtype), indices, values), + generate_random_test_inputs=False, + ) + + +def test_index_put_in_place_accumulate(test_runner) -> None: + indices = (torch.tensor([0, 2]),) + values = torch.tensor([10.0, 20.0]) + test_runner.lower_and_run_model( + IndexPutInPlaceModel(accumulate=False), + (torch.ones(5, 2), indices, values), + generate_random_test_inputs=False, + ) + + indices = (torch.tensor([0, 2]),) + values = torch.tensor([10.0, 20.0]) + test_runner.lower_and_run_model( + IndexPutInPlaceModel(accumulate=True), + (torch.ones(5, 2), indices, values), + generate_random_test_inputs=False, + ) + + +def test_index_put_accumulate(test_runner) -> None: + indices = (torch.tensor([0, 2]),) + values = torch.tensor([10.0, 20.0]) + test_runner.lower_and_run_model( + IndexPutModel(accumulate=False), + (torch.ones(5, 2), indices, values), + generate_random_test_inputs=False, + ) + + indices = (torch.tensor([0, 2]),) + values = torch.tensor([10.0, 20.0]) + test_runner.lower_and_run_model( + IndexPutModel(accumulate=True), + (torch.ones(5, 2), indices, values), + generate_random_test_inputs=False, + ) + + +def test_index_put_in_place_shapes(test_runner) -> None: + indices = (torch.tensor([0, 2]),) + values = torch.tensor([10.0, 20.0]) + test_runner.lower_and_run_model( + IndexPutInPlaceModel(), + (torch.randn(5), indices, values), + generate_random_test_inputs=False, + ) + + indices = (torch.tensor([0, 2]), torch.tensor([1, 1])) + values = torch.tensor([10.0, 20.0]) + test_runner.lower_and_run_model( + IndexPutInPlaceModel(), + (torch.randn(5, 2), indices, values), + generate_random_test_inputs=False, + ) + + indices = (torch.tensor([0, 2]), torch.tensor([1, 1]), torch.tensor([0, 1])) + values = torch.tensor([10.0, 20.0]) + test_runner.lower_and_run_model( + IndexPutInPlaceModel(), + (torch.randn(5, 3, 2), indices, values), + generate_random_test_inputs=False, + ) + + indices = ( + torch.tensor([0, 2]), + torch.tensor([1, 1]), + torch.tensor([0, 1]), + torch.tensor([2, 3]), + ) + values = torch.tensor( + [ + 10.0, + ] + ) + test_runner.lower_and_run_model( + IndexPutInPlaceModel(), + (torch.randn(5, 3, 2, 4), indices, values), + generate_random_test_inputs=False, + ) + + +def test_index_put_shapes(test_runner) -> None: + indices = (torch.tensor([0, 2]),) + values = torch.tensor([10.0, 20.0]) + test_runner.lower_and_run_model( + IndexPutModel(), + (torch.randn(5), indices, values), + generate_random_test_inputs=False, + ) + + indices = (torch.tensor([0, 2]), torch.tensor([1, 1])) + values = torch.tensor([10.0, 20.0]) + test_runner.lower_and_run_model( + IndexPutModel(), + (torch.randn(5, 2), indices, values), + generate_random_test_inputs=False, + ) + + indices = (torch.tensor([0, 2]), torch.tensor([1, 1]), torch.tensor([0, 1])) + values = torch.tensor([10.0, 20.0]) + test_runner.lower_and_run_model( + IndexPutModel(), + (torch.randn(5, 3, 2), indices, values), + generate_random_test_inputs=False, + ) + + indices = ( + torch.tensor([0, 2]), + torch.tensor([1, 1]), + torch.tensor([0, 1]), + torch.tensor([2, 3]), + ) + values = torch.tensor( + [ + 10.0, + ] + ) + test_runner.lower_and_run_model( + IndexPutModel(), + (torch.randn(5, 3, 2, 4), indices, values), + generate_random_test_inputs=False, + ) + + +def test_index_put_in_place_indices(test_runner) -> None: + indices = (torch.tensor([2]),) + values = torch.tensor([10.0]) + test_runner.lower_and_run_model( + IndexPutInPlaceModel(), + (torch.randn(5, 2), indices, values), + generate_random_test_inputs=False, + ) + + indices = (torch.tensor([0, 2, 4]),) + values = torch.tensor([10.0, 20.0, 30.0]) + test_runner.lower_and_run_model( + IndexPutInPlaceModel(), + (torch.randn(5, 3), indices, values), + generate_random_test_inputs=False, + ) + + indices = (torch.tensor([1, 1, 3, 3]),) + values = torch.tensor([10.0, 20.0, 30.0, 40.0]) + test_runner.lower_and_run_model( + IndexPutInPlaceModel(accumulate=True), + (torch.randn(5), indices, values), + generate_random_test_inputs=False, + ) + + +def test_index_put_indices(test_runner) -> None: + indices = (torch.tensor([2]),) + values = torch.tensor([10.0]) + test_runner.lower_and_run_model( + IndexPutModel(), + (torch.randn(5, 2), indices, values), + generate_random_test_inputs=False, + ) + + indices = (torch.tensor([0, 2, 4]),) + values = torch.tensor([10.0, 20.0, 30.0]) + test_runner.lower_and_run_model( + IndexPutModel(), + (torch.randn(5, 3), indices, values), + generate_random_test_inputs=False, + ) + + indices = (torch.tensor([1, 1, 3, 3]),) + values = torch.tensor([10.0, 20.0, 30.0, 40.0]) + test_runner.lower_and_run_model( + IndexPutModel(accumulate=True), + (torch.randn(5), indices, values), + generate_random_test_inputs=False, + ) + + +def test_index_put_in_place_broadcasting(test_runner) -> None: + # Test scalar broadcasting - single value to multiple positions + indices = (torch.tensor([0, 2, 4]),) + values = torch.tensor([42.0]) + test_runner.lower_and_run_model( + IndexPutInPlaceModel(), + (torch.randn(5, 3), indices, values), + generate_random_test_inputs=False, + ) + + # Test 1D broadcasting to 2D indexed positions + indices = (torch.tensor([0, 1]), torch.tensor([1, 2])) + values = torch.tensor([10.0, 20.0]) # 1D tensor + test_runner.lower_and_run_model( + IndexPutInPlaceModel(), + (torch.randn(3, 4), indices, values), + generate_random_test_inputs=False, + ) + + # Test broadcasting with compatible shapes - 1D to multiple 2D slices + indices = (torch.tensor([0, 2]),) + values = torch.tensor([5.0, 15.0]) # Will broadcast to (2, 3) shape + test_runner.lower_and_run_model( + IndexPutInPlaceModel(), + (torch.randn(4, 2), indices, values), + generate_random_test_inputs=False, + ) + + # Test 2D values broadcasting to 3D indexed positions + indices = (torch.tensor([0, 1]),) + values = torch.tensor([[1.0, 2.0], [3.0, 4.0]]) # 2D tensor + test_runner.lower_and_run_model( + IndexPutInPlaceModel(), + (torch.randn(3, 2, 2), indices, values), + generate_random_test_inputs=False, + ) + + # Test broadcasting with accumulate=True + indices = (torch.tensor([1, 1, 1]),) + values = torch.tensor([5.0]) # Scalar will be added 3 times to same position + test_runner.lower_and_run_model( + IndexPutInPlaceModel(accumulate=True), + (torch.ones(4, 2), indices, values), + generate_random_test_inputs=False, + ) + + +def test_index_put_broadcasting(test_runner) -> None: + # Test scalar broadcasting - single value to multiple positions + indices = (torch.tensor([0, 2, 4]),) + values = torch.tensor([42.0]) + test_runner.lower_and_run_model( + IndexPutModel(), + (torch.randn(5, 3), indices, values), + generate_random_test_inputs=False, + ) + + # Test 1D broadcasting to 2D indexed positions + indices = (torch.tensor([0, 1]), torch.tensor([1, 2])) + values = torch.tensor([10.0, 20.0]) # 1D tensor + test_runner.lower_and_run_model( + IndexPutModel(), + (torch.randn(3, 4), indices, values), + generate_random_test_inputs=False, + ) + + # Test broadcasting with compatible shapes - 1D to multiple 2D slices + indices = (torch.tensor([0, 2]),) + values = torch.tensor([5.0, 15.0]) # Will broadcast to (2, 3) shape + test_runner.lower_and_run_model( + IndexPutModel(), + (torch.randn(4, 2), indices, values), + generate_random_test_inputs=False, + ) + + # Test 2D values broadcasting to 3D indexed positions + indices = (torch.tensor([0, 1]),) + values = torch.tensor([[1.0, 2.0], [3.0, 4.0]]) # 2D tensor + test_runner.lower_and_run_model( + IndexPutModel(), + (torch.randn(3, 2, 2), indices, values), + generate_random_test_inputs=False, + ) + + # Test broadcasting with accumulate=True + indices = (torch.tensor([1, 1, 1]),) + values = torch.tensor([5.0]) # Scalar will be added 3 times to same position + test_runner.lower_and_run_model( + IndexPutModel(accumulate=True), + (torch.ones(4, 2), indices, values), + generate_random_test_inputs=False, + ) + + +def test_index_put_in_place_two_indices(test_runner) -> None: + # Test basic two-index tensor indexing + indices = (torch.tensor([0, 1, 2]), torch.tensor([1, 0, 2])) + values = torch.tensor([10.0, 20.0, 30.0]) + test_runner.lower_and_run_model( + IndexPutInPlaceModel(), + (torch.randn(4, 3), indices, values), + generate_random_test_inputs=False, + ) + + # Test two-index with different lengths (broadcasting) + indices = (torch.tensor([0, 2]), torch.tensor([1, 1])) + values = torch.tensor([15.0, 25.0]) + test_runner.lower_and_run_model( + IndexPutInPlaceModel(), + (torch.randn(3, 3), indices, values), + generate_random_test_inputs=False, + ) + + # Test two-index with repeated positions and accumulate=True + indices = (torch.tensor([1, 1, 2]), torch.tensor([0, 0, 1])) + values = torch.tensor([5.0, 10.0, 15.0]) + test_runner.lower_and_run_model( + IndexPutInPlaceModel(accumulate=True), + (torch.zeros(3, 2), indices, values), + generate_random_test_inputs=False, + ) + + # Test two-index with repeated positions and accumulate=False + indices = (torch.tensor([1, 1, 2]), torch.tensor([0, 0, 1])) + values = torch.tensor([5.0, 10.0, 15.0]) + test_runner.lower_and_run_model( + IndexPutInPlaceModel(accumulate=False), + (torch.zeros(3, 2), indices, values), + generate_random_test_inputs=False, + ) + + # Test two-index with index broadcast. + indices = (torch.tensor([1]), torch.tensor([0, 0, 1])) + values = torch.tensor([5.0, 10.0, 15.0]) + test_runner.lower_and_run_model( + IndexPutInPlaceModel(accumulate=False), + (torch.zeros(3, 2), indices, values), + generate_random_test_inputs=False, + ) + + +def test_index_put_two_indices(test_runner) -> None: + # Test basic two-index tensor indexing + indices = (torch.tensor([0, 1, 2]), torch.tensor([1, 0, 2])) + values = torch.tensor([10.0, 20.0, 30.0]) + test_runner.lower_and_run_model( + IndexPutModel(), + (torch.randn(4, 3), indices, values), + generate_random_test_inputs=False, + ) + + # Test two-index with different lengths (broadcasting) + indices = (torch.tensor([0, 2]), torch.tensor([1, 1])) + values = torch.tensor([15.0, 25.0]) + test_runner.lower_and_run_model( + IndexPutModel(), + (torch.randn(3, 3), indices, values), + generate_random_test_inputs=False, + ) + + # Test two-index with repeated positions and accumulate=True + indices = (torch.tensor([1, 1, 2]), torch.tensor([0, 0, 1])) + values = torch.tensor([5.0, 10.0, 15.0]) + test_runner.lower_and_run_model( + IndexPutModel(accumulate=True), + (torch.zeros(3, 2), indices, values), + generate_random_test_inputs=False, + ) + + # Test two-index with repeated positions and accumulate=False + indices = (torch.tensor([1, 1, 2]), torch.tensor([0, 0, 1])) + values = torch.tensor([5.0, 10.0, 15.0]) + test_runner.lower_and_run_model( + IndexPutModel(accumulate=False), + (torch.zeros(3, 2), indices, values), + generate_random_test_inputs=False, + ) + + # Test two-index with index broadcast. + indices = (torch.tensor([1]), torch.tensor([0, 0, 1])) + values = torch.tensor([5.0, 10.0, 15.0]) + test_runner.lower_and_run_model( + IndexPutModel(accumulate=False), + (torch.zeros(3, 2), indices, values), + generate_random_test_inputs=False, + ) diff --git a/backends/test/suite/operators/test_index_select.py b/backends/test/suite/operators/test_index_select.py index 46a8018ef93..d2e0b87aaac 100644 --- a/backends/test/suite/operators/test_index_select.py +++ b/backends/test/suite/operators/test_index_select.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class IndexSelectModel(torch.nn.Module): @@ -26,103 +21,92 @@ def forward(self, x, indices): return torch.index_select(x, self.dim, indices) -@operator_test -class IndexSelect(OperatorTest): - @dtype_test - def test_index_select_dtype(self, flow: TestFlow, dtype) -> None: - indices = torch.tensor([0, 2], dtype=torch.int64) - self._test_op( - IndexSelectModel(dim=0), - ((torch.rand(5, 3) * 100).to(dtype), indices), - flow, - generate_random_test_inputs=False, - ) - - def test_index_select_dimensions(self, flow: TestFlow) -> None: - indices = torch.tensor([0, 2], dtype=torch.int64) - self._test_op( - IndexSelectModel(dim=0), - (torch.randn(5, 3), indices), - flow, - generate_random_test_inputs=False, - ) - - indices = torch.tensor([0, 1], dtype=torch.int64) - self._test_op( - IndexSelectModel(dim=1), - (torch.randn(5, 3), indices), - flow, - generate_random_test_inputs=False, - ) - - indices = torch.tensor([0, 2], dtype=torch.int64) - self._test_op( - IndexSelectModel(dim=2), - (torch.randn(3, 4, 5), indices), - flow, - generate_random_test_inputs=False, - ) - - def test_index_select_shapes(self, flow: TestFlow) -> None: - indices = torch.tensor([0, 1], dtype=torch.int64) - - self._test_op( - IndexSelectModel(dim=0), - (torch.randn(5), indices), - flow, - generate_random_test_inputs=False, - ) - - self._test_op( - IndexSelectModel(dim=0), - (torch.randn(5, 3), indices), - flow, - generate_random_test_inputs=False, - ) - - self._test_op( - IndexSelectModel(dim=0), - (torch.randn(5, 3, 2), indices), - flow, - generate_random_test_inputs=False, - ) - - self._test_op( - IndexSelectModel(dim=0), - (torch.randn(5, 3, 2, 4), indices), - flow, - generate_random_test_inputs=False, - ) - - def test_index_select_indices(self, flow: TestFlow) -> None: - indices = torch.tensor([2], dtype=torch.int64) - self._test_op( - IndexSelectModel(dim=0), - (torch.randn(5, 3), indices), - flow, - generate_random_test_inputs=False, - ) - - indices = torch.tensor([0, 2, 4], dtype=torch.int64) - self._test_op( - IndexSelectModel(dim=0), - (torch.randn(5, 3), indices), - flow, - generate_random_test_inputs=False, - ) - - indices = torch.tensor([1, 1, 3, 3], dtype=torch.int64) - self._test_op( - IndexSelectModel(dim=0), - (torch.randn(5, 3), indices), - flow, - generate_random_test_inputs=False, - ) - - indices = torch.tensor([4, 3, 2, 1, 0], dtype=torch.int64) - self._test_op( - IndexSelectModel(dim=0), - (torch.randn(5, 3), indices), - flow, - generate_random_test_inputs=False, - ) +@parameterize_by_dtype +def test_index_select_dtype(test_runner, dtype) -> None: + indices = torch.tensor([0, 2], dtype=torch.int64) + test_runner.lower_and_run_model( + IndexSelectModel(dim=0), + ((torch.rand(5, 3) * 100).to(dtype), indices), + generate_random_test_inputs=False, + ) + + +def test_index_select_dimensions(test_runner) -> None: + indices = torch.tensor([0, 2], dtype=torch.int64) + test_runner.lower_and_run_model( + IndexSelectModel(dim=0), + (torch.randn(5, 3), indices), + generate_random_test_inputs=False, + ) + + indices = torch.tensor([0, 1], dtype=torch.int64) + test_runner.lower_and_run_model( + IndexSelectModel(dim=1), + (torch.randn(5, 3), indices), + generate_random_test_inputs=False, + ) + + indices = torch.tensor([0, 2], dtype=torch.int64) + test_runner.lower_and_run_model( + IndexSelectModel(dim=2), + (torch.randn(3, 4, 5), indices), + generate_random_test_inputs=False, + ) + + +def test_index_select_shapes(test_runner) -> None: + indices = torch.tensor([0, 1], dtype=torch.int64) + + test_runner.lower_and_run_model( + IndexSelectModel(dim=0), + (torch.randn(5), indices), + generate_random_test_inputs=False, + ) + + test_runner.lower_and_run_model( + IndexSelectModel(dim=0), + (torch.randn(5, 3), indices), + generate_random_test_inputs=False, + ) + + test_runner.lower_and_run_model( + IndexSelectModel(dim=0), + (torch.randn(5, 3, 2), indices), + generate_random_test_inputs=False, + ) + + test_runner.lower_and_run_model( + IndexSelectModel(dim=0), + (torch.randn(5, 3, 2, 4), indices), + generate_random_test_inputs=False, + ) + + +def test_index_select_indices(test_runner) -> None: + indices = torch.tensor([2], dtype=torch.int64) + test_runner.lower_and_run_model( + IndexSelectModel(dim=0), + (torch.randn(5, 3), indices), + generate_random_test_inputs=False, + ) + + indices = torch.tensor([0, 2, 4], dtype=torch.int64) + test_runner.lower_and_run_model( + IndexSelectModel(dim=0), + (torch.randn(5, 3), indices), + generate_random_test_inputs=False, + ) + + indices = torch.tensor([1, 1, 3, 3], dtype=torch.int64) + test_runner.lower_and_run_model( + IndexSelectModel(dim=0), + (torch.randn(5, 3), indices), + generate_random_test_inputs=False, + ) + + indices = torch.tensor([4, 3, 2, 1, 0], dtype=torch.int64) + test_runner.lower_and_run_model( + IndexSelectModel(dim=0), + (torch.randn(5, 3), indices), + generate_random_test_inputs=False, + ) diff --git a/backends/test/suite/operators/test_leaky_relu.py b/backends/test/suite/operators/test_leaky_relu.py index 79ed5425623..06d0e6f2c73 100644 --- a/backends/test/suite/operators/test_leaky_relu.py +++ b/backends/test/suite/operators/test_leaky_relu.py @@ -10,13 +10,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -31,26 +26,29 @@ def forward(self, x): ) -@operator_test -class TestLeakyReLU(OperatorTest): - @dtype_test - def test_leaky_relu_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op(Model(), ((torch.rand(2, 10) * 2 - 1).to(dtype),), flow) +@parameterize_by_dtype +def test_leaky_relu_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model(Model(), ((torch.rand(2, 10) * 2 - 1).to(dtype),)) - def test_leaky_relu_f32_single_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(20),), flow) - def test_leaky_relu_f32_multi_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) +def test_leaky_relu_f32_single_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(20),)) - def test_leaky_relu_f32_custom_slope(self, flow: TestFlow) -> None: - self._test_op(Model(negative_slope=0.1), (torch.randn(3, 4, 5),), flow) - @unittest.skip("In place activations aren't properly defunctionalized yet.") - def test_leaky_relu_f32_inplace(self, flow: TestFlow) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) +def test_leaky_relu_f32_multi_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(2, 3, 4, 5),)) - def test_leaky_relu_f32_boundary_values(self, flow: TestFlow) -> None: - # Test with specific positive and negative values - x = torch.tensor([-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]) - self._test_op(Model(), (x,), flow) + +def test_leaky_relu_f32_custom_slope(test_runner) -> None: + test_runner.lower_and_run_model(Model(negative_slope=0.1), (torch.randn(3, 4, 5),)) + + +@unittest.skip("In place activations aren't properly defunctionalized yet.") +def test_leaky_relu_f32_inplace(test_runner) -> None: + test_runner.lower_and_run_model(Model(inplace=True), (torch.randn(3, 4, 5),)) + + +def test_leaky_relu_f32_boundary_values(test_runner) -> None: + # Test with specific positive and negative values + x = torch.tensor([-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]) + test_runner.lower_and_run_model(Model(), (x,)) diff --git a/backends/test/suite/operators/test_linear.py b/backends/test/suite/operators/test_linear.py index 30ae963a1ba..b16a6469ac2 100644 --- a/backends/test/suite/operators/test_linear.py +++ b/backends/test/suite/operators/test_linear.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -35,90 +30,80 @@ def forward(self, x): return self.linear(x) -@operator_test -class Linear(OperatorTest): - @dtype_test - def test_linear_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - Model().to(dtype), - ((torch.rand(16, 64) * 10).to(dtype),), - flow, - ) +@parameterize_by_dtype +def test_linear_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(16, 64) * 10).to(dtype),), + ) - @dtype_test - def test_linear_no_bias_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - Model(bias=False).to(dtype), - ((torch.rand(16, 64) * 10).to(dtype),), - flow, - ) - def test_linear_feature_sizes(self, flow: TestFlow) -> None: - self._test_op( - Model(in_features=32, out_features=16), - (torch.randn(20, 32),), - flow, - ) - self._test_op( - Model(in_features=128, out_features=64), - (torch.randn(8, 128),), - flow, - ) - self._test_op( - Model(in_features=256, out_features=1), - (torch.randn(4, 256),), - flow, - ) - self._test_op( - Model(in_features=1, out_features=512), - (torch.randn(1024, 1),), - flow, - ) +@parameterize_by_dtype +def test_linear_no_bias_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + Model(bias=False).to(dtype), + ((torch.rand(16, 64) * 10).to(dtype),), + ) - def test_linear_no_bias(self, flow: TestFlow) -> None: - self._test_op( - Model(bias=False), - (torch.randn(16, 64),), - flow, - ) - self._test_op( - Model(in_features=128, out_features=96, bias=False), - (torch.randn(8, 128),), - flow, - ) - def test_linear_batch_sizes(self, flow: TestFlow) -> None: - self._test_op( - Model(), - (torch.randn(8, 64),), - flow, - ) - self._test_op( - Model(), - (torch.randn(32, 64),), - flow, - ) - self._test_op( - Model(), - (torch.randn(100, 64),), - flow, - ) +def test_linear_feature_sizes(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_features=32, out_features=16), + (torch.randn(20, 32),), + ) + test_runner.lower_and_run_model( + Model(in_features=128, out_features=64), + (torch.randn(8, 128),), + ) + test_runner.lower_and_run_model( + Model(in_features=256, out_features=1), + (torch.randn(4, 256),), + ) + test_runner.lower_and_run_model( + Model(in_features=1, out_features=512), + (torch.randn(1024, 1),), + ) - def test_linear_unbatched(self, flow: TestFlow) -> None: - self._test_op( - Model(in_features=512), - (torch.randn(512),), - flow, - ) - def test_linear_leading_batch(self, flow: TestFlow) -> None: - self._test_op( - Model(), - (torch.randn(4, 8, 64),), - flow, - ) - self._test_op( - Model(), - (torch.randn(2, 4, 8, 64),), - flow, - ) +def test_linear_no_bias(test_runner) -> None: + test_runner.lower_and_run_model( + Model(bias=False), + (torch.randn(16, 64),), + ) + test_runner.lower_and_run_model( + Model(in_features=128, out_features=96, bias=False), + (torch.randn(8, 128),), + ) + + +def test_linear_batch_sizes(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + (torch.randn(8, 64),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(32, 64),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(100, 64),), + ) + + +def test_linear_unbatched(test_runner) -> None: + test_runner.lower_and_run_model( + Model(in_features=512), + (torch.randn(512),), + ) + + +def test_linear_leading_batch(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + (torch.randn(4, 8, 64),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(2, 4, 8, 64),), + ) diff --git a/backends/test/suite/operators/test_log.py b/backends/test/suite/operators/test_log.py index c4af1fe442b..b310feeb41e 100644 --- a/backends/test/suite/operators/test_log.py +++ b/backends/test/suite/operators/test_log.py @@ -10,13 +10,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class LogModel(torch.nn.Module): @@ -27,34 +22,34 @@ def forward(self, x): return torch.log(x) -@operator_test -class TestLog(OperatorTest): - @dtype_test - def test_log_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes - model = LogModel().to(dtype) - # Use positive values only for log - self._test_op(model, (torch.rand(10, 10).to(dtype) + 0.01,), flow) +@parameterize_by_dtype +def test_log_dtype(test_runner, dtype) -> None: + # Test with different dtypes + model = LogModel().to(dtype) + # Use positive values only for log + test_runner.lower_and_run_model(model, (torch.rand(10, 10).to(dtype) + 0.01,)) - def test_log_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes - # 1D tensor - self._test_op(LogModel(), (torch.rand(20) + 0.01,), flow) +def test_log_shapes(test_runner) -> None: + # Test with different tensor shapes - # 2D tensor - self._test_op(LogModel(), (torch.rand(5, 10) + 0.01,), flow) + # 1D tensor + test_runner.lower_and_run_model(LogModel(), (torch.rand(20) + 0.01,)) - # 3D tensor - self._test_op(LogModel(), (torch.rand(3, 4, 5) + 0.01,), flow) + # 2D tensor + test_runner.lower_and_run_model(LogModel(), (torch.rand(5, 10) + 0.01,)) - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_log_edge_cases(self, flow: TestFlow) -> None: - # Test edge cases - # Tensor with infinity - x = torch.tensor([float("inf"), 1.0, 2.0]) - self._test_op(LogModel(), (x,), flow, generate_random_test_inputs=False) + # 3D tensor + test_runner.lower_and_run_model(LogModel(), (torch.rand(3, 4, 5) + 0.01,)) - # Tensor with NaN - x = torch.tensor([float("nan"), 1.0, 2.0]) - self._test_op(LogModel(), (x,), flow, generate_random_test_inputs=False) + +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_log_edge_cases(test_runner) -> None: + # Test edge cases + # Tensor with infinity + x = torch.tensor([float("inf"), 1.0, 2.0]) + test_runner.lower_and_run_model(LogModel(), (x,), generate_random_test_inputs=False) + + # Tensor with NaN + x = torch.tensor([float("nan"), 1.0, 2.0]) + test_runner.lower_and_run_model(LogModel(), (x,), generate_random_test_inputs=False) diff --git a/backends/test/suite/operators/test_log10.py b/backends/test/suite/operators/test_log10.py index aeb97671f1b..0910290e6cf 100644 --- a/backends/test/suite/operators/test_log10.py +++ b/backends/test/suite/operators/test_log10.py @@ -10,13 +10,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Log10Model(torch.nn.Module): @@ -27,34 +22,38 @@ def forward(self, x): return torch.log10(x) -@operator_test -class TestLog10(OperatorTest): - @dtype_test - def test_log10_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes - model = Log10Model().to(dtype) - # Use positive values only for log10 - self._test_op(model, (torch.rand(10, 10).to(dtype) + 0.01,), flow) +@parameterize_by_dtype +def test_log10_dtype(test_runner, dtype) -> None: + # Test with different dtypes + model = Log10Model().to(dtype) + # Use positive values only for log10 + test_runner.lower_and_run_model(model, (torch.rand(10, 10).to(dtype) + 0.01,)) - def test_log10_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes - # 1D tensor - self._test_op(Log10Model(), (torch.rand(20) + 0.01,), flow) +def test_log10_shapes(test_runner) -> None: + # Test with different tensor shapes - # 2D tensor - self._test_op(Log10Model(), (torch.rand(5, 10) + 0.01,), flow) + # 1D tensor + test_runner.lower_and_run_model(Log10Model(), (torch.rand(20) + 0.01,)) - # 3D tensor - self._test_op(Log10Model(), (torch.rand(3, 4, 5) + 0.01,), flow) + # 2D tensor + test_runner.lower_and_run_model(Log10Model(), (torch.rand(5, 10) + 0.01,)) - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_log10_edge_cases(self, flow: TestFlow) -> None: - # Test edge cases - # Tensor with infinity - x = torch.tensor([float("inf"), 1.0, 10.0]) - self._test_op(Log10Model(), (x,), flow, generate_random_test_inputs=False) + # 3D tensor + test_runner.lower_and_run_model(Log10Model(), (torch.rand(3, 4, 5) + 0.01,)) - # Tensor with NaN - x = torch.tensor([float("nan"), 1.0, 10.0]) - self._test_op(Log10Model(), (x,), flow, generate_random_test_inputs=False) + +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_log10_edge_cases(test_runner) -> None: + # Test edge cases + # Tensor with infinity + x = torch.tensor([float("inf"), 1.0, 10.0]) + test_runner.lower_and_run_model( + Log10Model(), (x,), generate_random_test_inputs=False + ) + + # Tensor with NaN + x = torch.tensor([float("nan"), 1.0, 10.0]) + test_runner.lower_and_run_model( + Log10Model(), (x,), generate_random_test_inputs=False + ) diff --git a/backends/test/suite/operators/test_log1p.py b/backends/test/suite/operators/test_log1p.py index 08a5c382076..763ae5301ae 100644 --- a/backends/test/suite/operators/test_log1p.py +++ b/backends/test/suite/operators/test_log1p.py @@ -10,13 +10,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Log1pModel(torch.nn.Module): @@ -27,34 +22,38 @@ def forward(self, x): return torch.log1p(x) -@operator_test -class TestLog1p(OperatorTest): - @dtype_test - def test_log1p_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes - model = Log1pModel().to(dtype) - # Use values greater than -1 for log1p - self._test_op(model, (torch.rand(10, 10).to(dtype) * 2 - 0.5,), flow) +@parameterize_by_dtype +def test_log1p_dtype(test_runner, dtype) -> None: + # Test with different dtypes + model = Log1pModel().to(dtype) + # Use values greater than -1 for log1p + test_runner.lower_and_run_model(model, (torch.rand(10, 10).to(dtype) * 2 - 0.5,)) - def test_log1p_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes - # 1D tensor - self._test_op(Log1pModel(), (torch.rand(20) * 2 - 0.5,), flow) +def test_log1p_shapes(test_runner) -> None: + # Test with different tensor shapes - # 2D tensor - self._test_op(Log1pModel(), (torch.rand(5, 10) * 2 - 0.5,), flow) + # 1D tensor + test_runner.lower_and_run_model(Log1pModel(), (torch.rand(20) * 2 - 0.5,)) - # 3D tensor - self._test_op(Log1pModel(), (torch.rand(3, 4, 5) * 2 - 0.5,), flow) + # 2D tensor + test_runner.lower_and_run_model(Log1pModel(), (torch.rand(5, 10) * 2 - 0.5,)) - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_log1p_edge_cases(self, flow: TestFlow) -> None: - # Test edge cases - # Tensor with infinity - x = torch.tensor([float("inf"), 0.0, 1.0]) - self._test_op(Log1pModel(), (x,), flow, generate_random_test_inputs=False) + # 3D tensor + test_runner.lower_and_run_model(Log1pModel(), (torch.rand(3, 4, 5) * 2 - 0.5,)) - # Tensor with NaN - x = torch.tensor([float("nan"), 0.0, 1.0]) - self._test_op(Log1pModel(), (x,), flow, generate_random_test_inputs=False) + +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_log1p_edge_cases(test_runner) -> None: + # Test edge cases + # Tensor with infinity + x = torch.tensor([float("inf"), 0.0, 1.0]) + test_runner.lower_and_run_model( + Log1pModel(), (x,), generate_random_test_inputs=False + ) + + # Tensor with NaN + x = torch.tensor([float("nan"), 0.0, 1.0]) + test_runner.lower_and_run_model( + Log1pModel(), (x,), generate_random_test_inputs=False + ) diff --git a/backends/test/suite/operators/test_log2.py b/backends/test/suite/operators/test_log2.py index 16161d334f6..a0762e25959 100644 --- a/backends/test/suite/operators/test_log2.py +++ b/backends/test/suite/operators/test_log2.py @@ -10,13 +10,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Log2Model(torch.nn.Module): @@ -27,34 +22,38 @@ def forward(self, x): return torch.log2(x) -@operator_test -class TestLog2(OperatorTest): - @dtype_test - def test_log2_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes - model = Log2Model().to(dtype) - # Use positive values only for log2 - self._test_op(model, (torch.rand(10, 10).to(dtype) + 0.01,), flow) +@parameterize_by_dtype +def test_log2_dtype(test_runner, dtype) -> None: + # Test with different dtypes + model = Log2Model().to(dtype) + # Use positive values only for log2 + test_runner.lower_and_run_model(model, (torch.rand(10, 10).to(dtype) + 0.01,)) - def test_log2_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes - # 1D tensor - self._test_op(Log2Model(), (torch.rand(20) + 0.01,), flow) +def test_log2_shapes(test_runner) -> None: + # Test with different tensor shapes - # 2D tensor - self._test_op(Log2Model(), (torch.rand(5, 10) + 0.01,), flow) + # 1D tensor + test_runner.lower_and_run_model(Log2Model(), (torch.rand(20) + 0.01,)) - # 3D tensor - self._test_op(Log2Model(), (torch.rand(3, 4, 5) + 0.01,), flow) + # 2D tensor + test_runner.lower_and_run_model(Log2Model(), (torch.rand(5, 10) + 0.01,)) - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_log2_edge_cases(self, flow: TestFlow) -> None: - # Test edge cases - # Tensor with infinity - x = torch.tensor([float("inf"), 1.0, 2.0]) - self._test_op(Log2Model(), (x,), flow, generate_random_test_inputs=False) + # 3D tensor + test_runner.lower_and_run_model(Log2Model(), (torch.rand(3, 4, 5) + 0.01,)) - # Tensor with NaN - x = torch.tensor([float("nan"), 1.0, 2.0]) - self._test_op(Log2Model(), (x,), flow, generate_random_test_inputs=False) + +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_log2_edge_cases(test_runner) -> None: + # Test edge cases + # Tensor with infinity + x = torch.tensor([float("inf"), 1.0, 2.0]) + test_runner.lower_and_run_model( + Log2Model(), (x,), generate_random_test_inputs=False + ) + + # Tensor with NaN + x = torch.tensor([float("nan"), 1.0, 2.0]) + test_runner.lower_and_run_model( + Log2Model(), (x,), generate_random_test_inputs=False + ) diff --git a/backends/test/suite/operators/test_logsigmoid.py b/backends/test/suite/operators/test_logsigmoid.py index 1df1d11546f..f0841b28a6f 100644 --- a/backends/test/suite/operators/test_logsigmoid.py +++ b/backends/test/suite/operators/test_logsigmoid.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -22,19 +17,20 @@ def forward(self, x): return torch.nn.functional.logsigmoid(x) -@operator_test -class TestLogSigmoid(OperatorTest): - @dtype_test - def test_logsigmoid_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow) +@parameterize_by_dtype +def test_logsigmoid_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),)) - def test_logsigmoid_f32_single_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(20),), flow) - def test_logsigmoid_f32_multi_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) +def test_logsigmoid_f32_single_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(20),)) - def test_logsigmoid_f32_boundary_values(self, flow: TestFlow) -> None: - # Test with specific values spanning negative and positive ranges - x = torch.tensor([-10.0, -5.0, -1.0, 0.0, 1.0, 5.0, 10.0]) - self._test_op(Model(), (x,), flow) + +def test_logsigmoid_f32_multi_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(2, 3, 4, 5),)) + + +def test_logsigmoid_f32_boundary_values(test_runner) -> None: + # Test with specific values spanning negative and positive ranges + x = torch.tensor([-10.0, -5.0, -1.0, 0.0, 1.0, 5.0, 10.0]) + test_runner.lower_and_run_model(Model(), (x,)) diff --git a/backends/test/suite/operators/test_lstm.py b/backends/test/suite/operators/test_lstm.py index 91dd73c9052..c703804abe8 100644 --- a/backends/test/suite/operators/test_lstm.py +++ b/backends/test/suite/operators/test_lstm.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -43,166 +38,155 @@ def forward(self, x): return self.lstm(x)[0] # Return only the output, not the hidden states -@operator_test -class LSTM(OperatorTest): - @dtype_test - def test_lstm_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - Model(num_layers=2).to(dtype), - ((torch.rand(1, 10, 64) * 10).to(dtype),), # (batch=1, seq_len, input_size) - flow, - ) - - @dtype_test - def test_lstm_no_bias_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - Model(num_layers=2, bias=False).to(dtype), - ((torch.rand(1, 10, 64) * 10).to(dtype),), - flow, - ) - - def test_lstm_feature_sizes(self, flow: TestFlow) -> None: - self._test_op( - Model(input_size=32, hidden_size=16), - (torch.randn(1, 8, 32),), # (batch=1, seq_len, input_size) - flow, - ) - self._test_op( - Model(input_size=128, hidden_size=64), - (torch.randn(1, 12, 128),), - flow, - ) - self._test_op( - Model(input_size=256, hidden_size=128), - (torch.randn(1, 6, 256),), - flow, - ) - self._test_op( - Model(input_size=16, hidden_size=32), - (torch.randn(1, 5, 16),), - flow, - ) - - def test_lstm_batch_sizes(self, flow: TestFlow) -> None: - self._test_op( - Model(), - (torch.randn(8, 10, 64),), - flow, - ) - self._test_op( - Model(), - (torch.randn(32, 10, 64),), - flow, - ) - self._test_op( - Model(), - (torch.randn(100, 10, 64),), - flow, - ) - - def test_lstm_seq_lengths(self, flow: TestFlow) -> None: - self._test_op( - Model(), - (torch.randn(1, 5, 64),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 20, 64),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 50, 64),), - flow, - ) - - def test_lstm_batch_first_false(self, flow: TestFlow) -> None: - self._test_op( - Model(batch_first=False), - (torch.randn(10, 1, 64),), # (seq_len, batch=1, input_size) - flow, - ) - - def test_lstm_num_layers(self, flow: TestFlow) -> None: - self._test_op( - Model(num_layers=2), - (torch.randn(1, 10, 64),), - flow, - ) - self._test_op( - Model(num_layers=3), - (torch.randn(1, 10, 64),), - flow, - ) - - def test_lstm_bidirectional(self, flow: TestFlow) -> None: - self._test_op( - Model(bidirectional=True), - (torch.randn(1, 10, 64),), - flow, - ) - - def test_lstm_with_dropout(self, flow: TestFlow) -> None: - # Note: Dropout is only effective with num_layers > 1 - self._test_op( - Model(num_layers=2, dropout=0.2), - (torch.randn(1, 10, 64),), - flow, - ) - - def test_lstm_with_initial_states(self, flow: TestFlow) -> None: - # Create a model that accepts initial states - class ModelWithStates(torch.nn.Module): - def __init__(self): - super().__init__() - self.lstm = torch.nn.LSTM( - input_size=64, - hidden_size=32, - num_layers=2, - batch_first=True, - ) - - def forward(self, x, h0, c0): - return self.lstm(x, (h0, c0))[0] # Return only the output - - batch_size = 1 - num_layers = 2 - hidden_size = 32 - - self._test_op( - ModelWithStates(), - ( - torch.randn(batch_size, 10, 64), # input - torch.randn(num_layers, batch_size, hidden_size), # h0 - torch.randn(num_layers, batch_size, hidden_size), # c0 - ), - flow, - ) - - def test_lstm_return_hidden_states(self, flow: TestFlow) -> None: - # Create a model that returns both output and hidden states - class ModelWithHiddenStates(torch.nn.Module): - def __init__(self): - super().__init__() - self.lstm = torch.nn.LSTM( - input_size=64, - hidden_size=32, - num_layers=2, - batch_first=True, - ) - - def forward(self, x): - # Return the complete output tuple: (output, (h_n, c_n)) - output, (h_n, c_n) = self.lstm(x) - return output, h_n, c_n - - batch_size = 1 - seq_len = 10 - input_size = 64 - - self._test_op( - ModelWithHiddenStates(), - (torch.randn(batch_size, seq_len, input_size),), - flow, - ) +@parameterize_by_dtype +def test_lstm_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + Model(num_layers=2).to(dtype), + ((torch.rand(1, 10, 64) * 10).to(dtype),), # (batch=1, seq_len, input_size) + ) + + +@parameterize_by_dtype +def test_lstm_no_bias_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + Model(num_layers=2, bias=False).to(dtype), + ((torch.rand(1, 10, 64) * 10).to(dtype),), + ) + + +def test_lstm_feature_sizes(test_runner) -> None: + test_runner.lower_and_run_model( + Model(input_size=32, hidden_size=16), + (torch.randn(1, 8, 32),), # (batch=1, seq_len, input_size) + ) + test_runner.lower_and_run_model( + Model(input_size=128, hidden_size=64), + (torch.randn(1, 12, 128),), + ) + test_runner.lower_and_run_model( + Model(input_size=256, hidden_size=128), + (torch.randn(1, 6, 256),), + ) + test_runner.lower_and_run_model( + Model(input_size=16, hidden_size=32), + (torch.randn(1, 5, 16),), + ) + + +def test_lstm_batch_sizes(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + (torch.randn(8, 10, 64),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(32, 10, 64),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(100, 10, 64),), + ) + + +def test_lstm_seq_lengths(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 5, 64),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 20, 64),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 50, 64),), + ) + + +def test_lstm_batch_first_false(test_runner) -> None: + test_runner.lower_and_run_model( + Model(batch_first=False), + (torch.randn(10, 1, 64),), # (seq_len, batch=1, input_size) + ) + + +def test_lstm_num_layers(test_runner) -> None: + test_runner.lower_and_run_model( + Model(num_layers=2), + (torch.randn(1, 10, 64),), + ) + test_runner.lower_and_run_model( + Model(num_layers=3), + (torch.randn(1, 10, 64),), + ) + + +def test_lstm_bidirectional(test_runner) -> None: + test_runner.lower_and_run_model( + Model(bidirectional=True), + (torch.randn(1, 10, 64),), + ) + + +def test_lstm_with_dropout(test_runner) -> None: + # Note: Dropout is only effective with num_layers > 1 + test_runner.lower_and_run_model( + Model(num_layers=2, dropout=0.2), + (torch.randn(1, 10, 64),), + ) + + +def test_lstm_with_initial_states(test_runner) -> None: + # Create a model that accepts initial states + class ModelWithStates(torch.nn.Module): + def __init__(self): + super().__init__() + self.lstm = torch.nn.LSTM( + input_size=64, + hidden_size=32, + num_layers=2, + batch_first=True, + ) + + def forward(self, x, h0, c0): + return self.lstm(x, (h0, c0))[0] # Return only the output + + batch_size = 1 + num_layers = 2 + hidden_size = 32 + + test_runner.lower_and_run_model( + ModelWithStates(), + ( + torch.randn(batch_size, 10, 64), # input + torch.randn(num_layers, batch_size, hidden_size), # h0 + torch.randn(num_layers, batch_size, hidden_size), # c0 + ), + ) + + +def test_lstm_return_hidden_states(test_runner) -> None: + # Create a model that returns both output and hidden states + class ModelWithHiddenStates(torch.nn.Module): + def __init__(self): + super().__init__() + self.lstm = torch.nn.LSTM( + input_size=64, + hidden_size=32, + num_layers=2, + batch_first=True, + ) + + def forward(self, x): + # Return the complete output tuple: (output, (h_n, c_n)) + output, (h_n, c_n) = self.lstm(x) + return output, h_n, c_n + + batch_size = 1 + seq_len = 10 + input_size = 64 + + test_runner.lower_and_run_model( + ModelWithHiddenStates(), + (torch.randn(batch_size, seq_len, input_size),), + ) diff --git a/backends/test/suite/operators/test_masked_fill.py b/backends/test/suite/operators/test_masked_fill.py index 68dccba69f3..6b41b79fb42 100644 --- a/backends/test/suite/operators/test_masked_fill.py +++ b/backends/test/suite/operators/test_masked_fill.py @@ -9,13 +9,8 @@ from typing import Union import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class MaskedFillModel(torch.nn.Module): @@ -27,75 +22,69 @@ def forward(self, x, mask): return x.masked_fill(mask, self.value) -@operator_test -class MaskedFill(OperatorTest): - @dtype_test - def test_masked_fill_dtype(self, flow: TestFlow, dtype) -> None: - mask = torch.randint(0, 2, (16, 32), dtype=torch.bool) - self._test_op( - MaskedFillModel(value=0.0), - ( - torch.rand(16, 32).to(dtype), - mask, - ), - flow, - ) - - def test_masked_fill_different_values(self, flow: TestFlow) -> None: - mask = torch.randint(0, 2, (16, 32), dtype=torch.bool) - - self._test_op( - MaskedFillModel(value=5.0), - ( - torch.randn(16, 32), - mask, - ), - flow, - ) - - self._test_op( - MaskedFillModel(value=-5.0), - ( - torch.randn(16, 32), - mask, - ), - flow, - ) - - self._test_op( - MaskedFillModel(value=1), - ( - torch.randn(16, 32), - mask, - ), - flow, - ) - - def test_masked_fill_different_shapes(self, flow: TestFlow) -> None: - self._test_op( - MaskedFillModel(value=0.0), - ( - torch.randn(512), - torch.randint(0, 2, (512,), dtype=torch.bool), - ), - flow, - ) - - self._test_op( - MaskedFillModel(value=0.0), - ( - torch.randn(4, 8, 16), - torch.randint(0, 2, (4, 8, 16), dtype=torch.bool), - ), - flow, - ) - - def test_masked_fill_broadcast(self, flow: TestFlow) -> None: - self._test_op( - MaskedFillModel(value=0.0), - ( - torch.randn(16, 32), - torch.randint(0, 2, (32,), dtype=torch.bool), - ), - flow, - ) +@parameterize_by_dtype +def test_masked_fill_dtype(test_runner, dtype) -> None: + mask = torch.randint(0, 2, (16, 32), dtype=torch.bool) + test_runner.lower_and_run_model( + MaskedFillModel(value=0.0), + ( + torch.rand(16, 32).to(dtype), + mask, + ), + ) + + +def test_masked_fill_different_values(test_runner) -> None: + mask = torch.randint(0, 2, (16, 32), dtype=torch.bool) + + test_runner.lower_and_run_model( + MaskedFillModel(value=5.0), + ( + torch.randn(16, 32), + mask, + ), + ) + + test_runner.lower_and_run_model( + MaskedFillModel(value=-5.0), + ( + torch.randn(16, 32), + mask, + ), + ) + + test_runner.lower_and_run_model( + MaskedFillModel(value=1), + ( + torch.randn(16, 32), + mask, + ), + ) + + +def test_masked_fill_different_shapes(test_runner) -> None: + test_runner.lower_and_run_model( + MaskedFillModel(value=0.0), + ( + torch.randn(512), + torch.randint(0, 2, (512,), dtype=torch.bool), + ), + ) + + test_runner.lower_and_run_model( + MaskedFillModel(value=0.0), + ( + torch.randn(4, 8, 16), + torch.randint(0, 2, (4, 8, 16), dtype=torch.bool), + ), + ) + + +def test_masked_fill_broadcast(test_runner) -> None: + test_runner.lower_and_run_model( + MaskedFillModel(value=0.0), + ( + torch.randn(16, 32), + torch.randint(0, 2, (32,), dtype=torch.bool), + ), + ) diff --git a/backends/test/suite/operators/test_maxpool1d.py b/backends/test/suite/operators/test_maxpool1d.py index e6de4dee2b7..966e26f16f2 100644 --- a/backends/test/suite/operators/test_maxpool1d.py +++ b/backends/test/suite/operators/test_maxpool1d.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -41,145 +36,133 @@ def forward(self, x): return self.maxpool(x) -@operator_test -class MaxPool1d(OperatorTest): - @dtype_test - def test_maxpool1d_dtype(self, flow: TestFlow, dtype) -> None: - # Input shape: (batch_size, channels, length) - self._test_op( - Model().to(dtype), - ((torch.rand(1, 8, 100) * 10).to(dtype),), - flow, - ) - - def test_maxpool1d_kernel_size(self, flow: TestFlow) -> None: - # Test with different kernel sizes - self._test_op( - Model(kernel_size=1), - (torch.randn(1, 8, 100),), - flow, - ) - self._test_op( - Model(kernel_size=5), - (torch.randn(1, 8, 100),), - flow, - ) - - def test_maxpool1d_stride(self, flow: TestFlow) -> None: - # Test with different stride values - self._test_op( - Model(stride=2), - (torch.randn(1, 8, 100),), - flow, - ) - self._test_op( - Model(stride=3), - (torch.randn(1, 8, 100),), - flow, - ) - - def test_maxpool1d_padding(self, flow: TestFlow) -> None: - # Test with different padding values - self._test_op( - Model(padding=1), - (torch.randn(1, 8, 100),), - flow, - ) - self._test_op( - Model(padding=2), - (torch.randn(1, 8, 100),), - flow, - ) - - def test_maxpool1d_dilation(self, flow: TestFlow) -> None: - # Test with different dilation values - self._test_op( - Model(dilation=2), - (torch.randn(1, 8, 100),), - flow, - ) - self._test_op( - Model(dilation=3), - (torch.randn(1, 8, 100),), - flow, - ) - - def test_maxpool1d_ceil_mode(self, flow: TestFlow) -> None: - # Test with ceil_mode=True - self._test_op( - Model(ceil_mode=True), - (torch.randn(1, 8, 100),), - flow, - ) - - def test_maxpool1d_return_indices(self, flow: TestFlow) -> None: - # Test with return_indices=True - class ModelWithIndices(torch.nn.Module): - def __init__(self): - super().__init__() - self.maxpool = torch.nn.MaxPool1d( - kernel_size=3, - stride=2, - padding=1, - return_indices=True, - ) - - def forward(self, x): - return self.maxpool(x) - - input_tensor = torch.randn(1, 8, 100) - - self._test_op( - Model(kernel_size=3, stride=2, padding=1), - (input_tensor,), - flow, - ) - - def test_maxpool1d_batch_sizes(self, flow: TestFlow) -> None: - # Test with batch inputs - self._test_op( - Model(), - (torch.randn(2, 8, 100),), - flow, - ) - self._test_op( - Model(), - (torch.randn(8, 8, 100),), - flow, - ) - self._test_op( - Model(), - (torch.randn(16, 8, 100),), - flow, - ) - - def test_maxpool1d_input_sizes(self, flow: TestFlow) -> None: - # Test with different input sizes - self._test_op( - Model(), - (torch.randn(1, 4, 100),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 16, 100),), - flow, - ) - - def test_maxpool1d_combinations(self, flow: TestFlow) -> None: - # Test with combinations of parameters - self._test_op( - Model(kernel_size=2, stride=2, padding=1), - (torch.randn(1, 8, 100),), - flow, - ) - self._test_op( - Model(kernel_size=3, stride=2, padding=1, ceil_mode=True), - (torch.randn(1, 8, 100),), - flow, - ) - self._test_op( - Model(kernel_size=2, stride=2, padding=1, dilation=2), - (torch.randn(1, 8, 100),), - flow, - ) +@parameterize_by_dtype +def test_maxpool1d_dtype(test_runner, dtype) -> None: + # Input shape: (batch_size, channels, length) + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(1, 8, 100) * 10).to(dtype),), + ) + + +def test_maxpool1d_kernel_size(test_runner) -> None: + # Test with different kernel sizes + test_runner.lower_and_run_model( + Model(kernel_size=1), + (torch.randn(1, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=5), + (torch.randn(1, 8, 100),), + ) + + +def test_maxpool1d_stride(test_runner) -> None: + # Test with different stride values + test_runner.lower_and_run_model( + Model(stride=2), + (torch.randn(1, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(stride=3), + (torch.randn(1, 8, 100),), + ) + + +def test_maxpool1d_padding(test_runner) -> None: + # Test with different padding values + test_runner.lower_and_run_model( + Model(padding=1), + (torch.randn(1, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(padding=2), + (torch.randn(1, 8, 100),), + ) + + +def test_maxpool1d_dilation(test_runner) -> None: + # Test with different dilation values + test_runner.lower_and_run_model( + Model(dilation=2), + (torch.randn(1, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(dilation=3), + (torch.randn(1, 8, 100),), + ) + + +def test_maxpool1d_ceil_mode(test_runner) -> None: + # Test with ceil_mode=True + test_runner.lower_and_run_model( + Model(ceil_mode=True), + (torch.randn(1, 8, 100),), + ) + + +def test_maxpool1d_return_indices(test_runner) -> None: + # Test with return_indices=True + class ModelWithIndices(torch.nn.Module): + def __init__(self): + super().__init__() + self.maxpool = torch.nn.MaxPool1d( + kernel_size=3, + stride=2, + padding=1, + return_indices=True, + ) + + def forward(self, x): + return self.maxpool(x) + + input_tensor = torch.randn(1, 8, 100) + + test_runner.lower_and_run_model( + Model(kernel_size=3, stride=2, padding=1), + (input_tensor,), + ) + + +def test_maxpool1d_batch_sizes(test_runner) -> None: + # Test with batch inputs + test_runner.lower_and_run_model( + Model(), + (torch.randn(2, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(8, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(16, 8, 100),), + ) + + +def test_maxpool1d_input_sizes(test_runner) -> None: + # Test with different input sizes + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 4, 100),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 16, 100),), + ) + + +def test_maxpool1d_combinations(test_runner) -> None: + # Test with combinations of parameters + test_runner.lower_and_run_model( + Model(kernel_size=2, stride=2, padding=1), + (torch.randn(1, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=3, stride=2, padding=1, ceil_mode=True), + (torch.randn(1, 8, 100),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=2, stride=2, padding=1, dilation=2), + (torch.randn(1, 8, 100),), + ) diff --git a/backends/test/suite/operators/test_maxpool2d.py b/backends/test/suite/operators/test_maxpool2d.py index f8112d3b7da..750c497de9f 100644 --- a/backends/test/suite/operators/test_maxpool2d.py +++ b/backends/test/suite/operators/test_maxpool2d.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -41,151 +36,138 @@ def forward(self, x): return self.maxpool(x) -@operator_test -class MaxPool2d(OperatorTest): - @dtype_test - def test_maxpool2d_dtype(self, flow: TestFlow, dtype) -> None: - # Input shape: (batch_size, channels, height, width) - self._test_op( - Model().to(dtype), - ((torch.rand(1, 8, 20, 20) * 10).to(dtype),), - flow, - ) - - def test_maxpool2d_kernel_size(self, flow: TestFlow) -> None: - # Test with different kernel sizes - self._test_op( - Model(kernel_size=1), - (torch.randn(1, 8, 20, 20),), - flow, - ) - self._test_op( - Model(kernel_size=5), - (torch.randn(1, 8, 20, 20),), - flow, - ) - self._test_op( - Model(kernel_size=(3, 2)), - (torch.randn(1, 8, 20, 20),), - flow, - ) - - def test_maxpool2d_stride(self, flow: TestFlow) -> None: - # Test with different stride values - self._test_op( - Model(stride=2), - (torch.randn(1, 8, 20, 20),), - flow, - ) - self._test_op( - Model(stride=(2, 1)), - (torch.randn(1, 8, 20, 20),), - flow, - ) - - def test_maxpool2d_padding(self, flow: TestFlow) -> None: - # Test with different padding values - self._test_op( - Model(padding=1), - (torch.randn(1, 8, 20, 20),), - flow, - ) - self._test_op( - Model(padding=(1, 2)), - (torch.randn(1, 8, 20, 20),), - flow, - ) - - def test_maxpool2d_dilation(self, flow: TestFlow) -> None: - # Test with different dilation values - self._test_op( - Model(dilation=2), - (torch.randn(1, 8, 20, 20),), - flow, - ) - self._test_op( - Model(dilation=(2, 1)), - (torch.randn(1, 8, 20, 20),), - flow, - ) - - def test_maxpool2d_ceil_mode(self, flow: TestFlow) -> None: - # Test with ceil_mode=True - self._test_op( - Model(ceil_mode=True), - (torch.randn(1, 8, 20, 20),), - flow, - ) - - def test_maxpool2d_return_indices(self, flow: TestFlow) -> None: - # Test with return_indices=True - class ModelWithIndices(torch.nn.Module): - def __init__(self): - super().__init__() - self.maxpool = torch.nn.MaxPool2d( - kernel_size=3, - stride=2, - padding=1, - return_indices=True, - ) - - def forward(self, x): - return self.maxpool(x) - - # Create a test input tensor - input_tensor = torch.randn(1, 8, 20, 20) - - self._test_op( - Model(kernel_size=3, stride=2, padding=1), - (input_tensor,), - flow, - ) - - def test_maxpool2d_batch_sizes(self, flow: TestFlow) -> None: - # Test with batch inputs - self._test_op( - Model(), - (torch.randn(2, 8, 20, 20),), - flow, - ) - self._test_op( - Model(), - (torch.randn(8, 8, 20, 20),), - flow, - ) - self._test_op( - Model(), - (torch.randn(16, 8, 20, 20),), - flow, - ) - - def test_maxpool2d_input_sizes(self, flow: TestFlow) -> None: - # Test with different input sizes - self._test_op( - Model(), - (torch.randn(1, 4, 20, 20),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 16, 20, 20),), - flow, - ) - - def test_maxpool2d_combinations(self, flow: TestFlow) -> None: - # Test with combinations of parameters - self._test_op( - Model(kernel_size=2, stride=2, padding=1), - (torch.randn(1, 8, 20, 20),), - flow, - ) - self._test_op( - Model(kernel_size=3, stride=2, padding=1, ceil_mode=True), - (torch.randn(1, 8, 21, 21),), - flow, - ) - self._test_op( - Model(kernel_size=(2, 3), stride=(2, 1), padding=(1, 0), dilation=2), - (torch.randn(1, 8, 20, 20),), - flow, - ) +@parameterize_by_dtype +def test_maxpool2d_dtype(test_runner, dtype) -> None: + # Input shape: (batch_size, channels, height, width) + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(1, 8, 20, 20) * 10).to(dtype),), + ) + + +def test_maxpool2d_kernel_size(test_runner) -> None: + # Test with different kernel sizes + test_runner.lower_and_run_model( + Model(kernel_size=1), + (torch.randn(1, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=5), + (torch.randn(1, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=(3, 2)), + (torch.randn(1, 8, 20, 20),), + ) + + +def test_maxpool2d_stride(test_runner) -> None: + # Test with different stride values + test_runner.lower_and_run_model( + Model(stride=2), + (torch.randn(1, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(stride=(2, 1)), + (torch.randn(1, 8, 20, 20),), + ) + + +def test_maxpool2d_padding(test_runner) -> None: + # Test with different padding values + test_runner.lower_and_run_model( + Model(padding=1), + (torch.randn(1, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(padding=(1, 2)), + (torch.randn(1, 8, 20, 20),), + ) + + +def test_maxpool2d_dilation(test_runner) -> None: + # Test with different dilation values + test_runner.lower_and_run_model( + Model(dilation=2), + (torch.randn(1, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(dilation=(2, 1)), + (torch.randn(1, 8, 20, 20),), + ) + + +def test_maxpool2d_ceil_mode(test_runner) -> None: + # Test with ceil_mode=True + test_runner.lower_and_run_model( + Model(ceil_mode=True), + (torch.randn(1, 8, 20, 20),), + ) + + +def test_maxpool2d_return_indices(test_runner) -> None: + # Test with return_indices=True + class ModelWithIndices(torch.nn.Module): + def __init__(self): + super().__init__() + self.maxpool = torch.nn.MaxPool2d( + kernel_size=3, + stride=2, + padding=1, + return_indices=True, + ) + + def forward(self, x): + return self.maxpool(x) + + # Create a test input tensor + input_tensor = torch.randn(1, 8, 20, 20) + + test_runner.lower_and_run_model( + Model(kernel_size=3, stride=2, padding=1), + (input_tensor,), + ) + + +def test_maxpool2d_batch_sizes(test_runner) -> None: + # Test with batch inputs + test_runner.lower_and_run_model( + Model(), + (torch.randn(2, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(8, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(16, 8, 20, 20),), + ) + + +def test_maxpool2d_input_sizes(test_runner) -> None: + # Test with different input sizes + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 4, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 16, 20, 20),), + ) + + +def test_maxpool2d_combinations(test_runner) -> None: + # Test with combinations of parameters + test_runner.lower_and_run_model( + Model(kernel_size=2, stride=2, padding=1), + (torch.randn(1, 8, 20, 20),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=3, stride=2, padding=1, ceil_mode=True), + (torch.randn(1, 8, 21, 21),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=(2, 3), stride=(2, 1), padding=(1, 0), dilation=2), + (torch.randn(1, 8, 20, 20),), + ) diff --git a/backends/test/suite/operators/test_maxpool3d.py b/backends/test/suite/operators/test_maxpool3d.py index 3b231169371..b5a356a97aa 100644 --- a/backends/test/suite/operators/test_maxpool3d.py +++ b/backends/test/suite/operators/test_maxpool3d.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -41,149 +36,135 @@ def forward(self, x): return self.maxpool(x) -@operator_test -class MaxPool3d(OperatorTest): - @dtype_test - def test_maxpool3d_dtype(self, flow: TestFlow, dtype) -> None: - # Input shape: (batch_size, channels, depth, height, width) - self._test_op( - Model().to(dtype), - ((torch.rand(1, 4, 8, 8, 8) * 10).to(dtype),), - flow, - ) - - def test_maxpool3d_kernel_size(self, flow: TestFlow) -> None: - # Test with different kernel sizes - self._test_op( - Model(kernel_size=1), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(kernel_size=(1, 2, 2)), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - - def test_maxpool3d_stride(self, flow: TestFlow) -> None: - # Test with different stride values - self._test_op( - Model(stride=2), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(stride=(1, 2, 2)), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - - def test_maxpool3d_padding(self, flow: TestFlow) -> None: - # Test with different padding values - self._test_op( - Model(padding=1), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(padding=(0, 1, 1)), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - - def test_maxpool3d_dilation(self, flow: TestFlow) -> None: - # Test with different dilation values - self._test_op( - Model(dilation=2), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(dilation=(1, 2, 2)), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - - def test_maxpool3d_ceil_mode(self, flow: TestFlow) -> None: - # Test with ceil_mode=True - self._test_op( - Model(ceil_mode=True), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - - def test_maxpool3d_return_indices(self, flow: TestFlow) -> None: - # Test with return_indices=True - class ModelWithIndices(torch.nn.Module): - def __init__(self): - super().__init__() - self.maxpool = torch.nn.MaxPool3d( - kernel_size=3, - stride=2, - padding=1, - return_indices=True, - ) - - def forward(self, x): - # Return both output and indices - return self.maxpool(x) - - # Create a test input tensor - input_tensor = torch.randn(1, 4, 8, 8, 8) - - self._test_op( - Model(kernel_size=3, stride=2, padding=1), - (input_tensor,), - flow, - ) - - def test_maxpool3d_batch_sizes(self, flow: TestFlow) -> None: - # Test with batch inputs - self._test_op( - Model(), - (torch.randn(2, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(), - (torch.randn(8, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(), - (torch.randn(16, 4, 8, 8, 8),), - flow, - ) - - def test_maxpool3d_input_sizes(self, flow: TestFlow) -> None: - # Test with different input sizes - self._test_op( - Model(), - (torch.randn(1, 2, 8, 8, 8),), - flow, - ) - self._test_op( - Model(), - (torch.randn(1, 8, 8, 8, 8),), - flow, - ) - - def test_maxpool3d_combinations(self, flow: TestFlow) -> None: - # Test with combinations of parameters - self._test_op( - Model(kernel_size=2, stride=2, padding=1), - (torch.randn(1, 4, 8, 8, 8),), - flow, - ) - self._test_op( - Model(kernel_size=3, stride=2, padding=1, ceil_mode=True), - (torch.randn(1, 4, 10, 10, 10),), - flow, - ) - self._test_op( - Model( - kernel_size=(2, 2, 2), stride=(1, 2, 2), padding=(0, 1, 1), dilation=2 - ), - (torch.randn(1, 4, 8, 10, 10),), - flow, - ) +@parameterize_by_dtype +def test_maxpool3d_dtype(test_runner, dtype) -> None: + # Input shape: (batch_size, channels, depth, height, width) + test_runner.lower_and_run_model( + Model().to(dtype), + ((torch.rand(1, 4, 8, 8, 8) * 10).to(dtype),), + ) + + +def test_maxpool3d_kernel_size(test_runner) -> None: + # Test with different kernel sizes + test_runner.lower_and_run_model( + Model(kernel_size=1), + (torch.randn(1, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=(1, 2, 2)), + (torch.randn(1, 4, 8, 8, 8),), + ) + + +def test_maxpool3d_stride(test_runner) -> None: + # Test with different stride values + test_runner.lower_and_run_model( + Model(stride=2), + (torch.randn(1, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(stride=(1, 2, 2)), + (torch.randn(1, 4, 8, 8, 8),), + ) + + +def test_maxpool3d_padding(test_runner) -> None: + # Test with different padding values + test_runner.lower_and_run_model( + Model(padding=1), + (torch.randn(1, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(padding=(0, 1, 1)), + (torch.randn(1, 4, 8, 8, 8),), + ) + + +def test_maxpool3d_dilation(test_runner) -> None: + # Test with different dilation values + test_runner.lower_and_run_model( + Model(dilation=2), + (torch.randn(1, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(dilation=(1, 2, 2)), + (torch.randn(1, 4, 8, 8, 8),), + ) + + +def test_maxpool3d_ceil_mode(test_runner) -> None: + # Test with ceil_mode=True + test_runner.lower_and_run_model( + Model(ceil_mode=True), + (torch.randn(1, 4, 8, 8, 8),), + ) + + +def test_maxpool3d_return_indices(test_runner) -> None: + # Test with return_indices=True + class ModelWithIndices(torch.nn.Module): + def __init__(self): + super().__init__() + self.maxpool = torch.nn.MaxPool3d( + kernel_size=3, + stride=2, + padding=1, + return_indices=True, + ) + + def forward(self, x): + # Return both output and indices + return self.maxpool(x) + + # Create a test input tensor + input_tensor = torch.randn(1, 4, 8, 8, 8) + + test_runner.lower_and_run_model( + Model(kernel_size=3, stride=2, padding=1), + (input_tensor,), + ) + + +def test_maxpool3d_batch_sizes(test_runner) -> None: + # Test with batch inputs + test_runner.lower_and_run_model( + Model(), + (torch.randn(2, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(8, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(16, 4, 8, 8, 8),), + ) + + +def test_maxpool3d_input_sizes(test_runner) -> None: + # Test with different input sizes + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 2, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(), + (torch.randn(1, 8, 8, 8, 8),), + ) + + +def test_maxpool3d_combinations(test_runner) -> None: + # Test with combinations of parameters + test_runner.lower_and_run_model( + Model(kernel_size=2, stride=2, padding=1), + (torch.randn(1, 4, 8, 8, 8),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=3, stride=2, padding=1, ceil_mode=True), + (torch.randn(1, 4, 10, 10, 10),), + ) + test_runner.lower_and_run_model( + Model(kernel_size=(2, 2, 2), stride=(1, 2, 2), padding=(0, 1, 1), dilation=2), + (torch.randn(1, 4, 8, 10, 10),), + ) diff --git a/backends/test/suite/operators/test_mean.py b/backends/test/suite/operators/test_mean.py index 6c5c779364b..465d3b6d8e7 100644 --- a/backends/test/suite/operators/test_mean.py +++ b/backends/test/suite/operators/test_mean.py @@ -10,13 +10,8 @@ from typing import List, Optional, Tuple, Union import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class MeanModel(torch.nn.Module): @@ -35,271 +30,235 @@ def forward(self, x): return torch.mean(x, dim=self.dim, keepdim=self.keepdim, dtype=self.dtype) -@operator_test -class Mean(OperatorTest): - @dtype_test - def test_mean_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - MeanModel().to(dtype), - (torch.rand(10, 10).to(dtype),), - flow, - ) - - def test_mean_basic(self, flow: TestFlow) -> None: - self._test_op( - MeanModel(), - (torch.randn(10, 10),), - flow, - ) - - def test_mean_dim(self, flow: TestFlow) -> None: - self._test_op( - MeanModel(dim=0), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - MeanModel(dim=1), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - MeanModel(dim=0), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - MeanModel(dim=1), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - MeanModel(dim=2), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - MeanModel(dim=1), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - MeanModel(dim=-1), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - MeanModel(dim=-2), - (torch.randn(3, 4, 5),), - flow, - ) - - def test_mean_multi_dim(self, flow: TestFlow) -> None: - self._test_op( - MeanModel(dim=(0, 1)), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - MeanModel(dim=(0, 2)), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - MeanModel(dim=(1, 2)), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - MeanModel(dim=(1, 3)), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - MeanModel(dim=(0, 2)), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - MeanModel(dim=(-1, -3)), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - MeanModel(dim=(0, 1, 2, 3)), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - def test_mean_keepdim(self, flow: TestFlow) -> None: - self._test_op( - MeanModel(dim=0, keepdim=True), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - MeanModel(dim=1, keepdim=True), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - MeanModel(dim=1, keepdim=True), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - MeanModel(dim=2, keepdim=True), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - MeanModel(dim=(1, 2), keepdim=True), - (torch.randn(3, 4, 5),), - flow, - ) - - def test_mean_output_dtype(self, flow: TestFlow) -> None: - self._test_op( - MeanModel(dtype=torch.float32), - (torch.randint(0, 10, (5, 10)),), - flow, - ) - - self._test_op( - MeanModel(dtype=torch.float64), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - MeanModel(dim=1, dtype=torch.float64), - (torch.randn(5, 10),), - flow, - ) - - def test_mean_shapes(self, flow: TestFlow) -> None: - self._test_op( - MeanModel(), - (torch.randn(20),), - flow, - ) - self._test_op( - MeanModel(dim=0), - (torch.randn(20),), - flow, - ) - - self._test_op( - MeanModel(), - (torch.randn(5, 10),), - flow, - ) - - self._test_op( - MeanModel(), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - MeanModel(), - (torch.randn(2, 3, 4, 5),), - flow, - ) - - self._test_op( - MeanModel(), - (torch.randn(2, 2, 3, 4, 5),), - flow, - ) - - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_mean_edge_cases(self, flow: TestFlow) -> None: - x = torch.tensor([[1.0, float("inf"), 3.0], [4.0, 5.0, float("inf")]]) - self._test_op( - MeanModel(), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - MeanModel(dim=0), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - MeanModel(dim=1), - (x,), - flow, - generate_random_test_inputs=False, - ) - - x = torch.tensor([[1.0, float("-inf"), 3.0], [4.0, 5.0, float("-inf")]]) - self._test_op( - MeanModel(), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - MeanModel(dim=0), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - MeanModel(dim=1), - (x,), - flow, - generate_random_test_inputs=False, - ) - - x = torch.tensor([[1.0, float("nan"), 3.0], [4.0, 5.0, float("nan")]]) - self._test_op( - MeanModel(), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - MeanModel(dim=0), - (x,), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - MeanModel(dim=1), - (x,), - flow, - generate_random_test_inputs=False, - ) - - def test_mean_scalar(self, flow: TestFlow) -> None: - self._test_op( - MeanModel(), - (torch.tensor([5.0]),), - flow, - ) - self._test_op( - MeanModel(dim=0), - (torch.tensor([5.0]),), - flow, - ) +@parameterize_by_dtype +def test_mean_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + MeanModel().to(dtype), + (torch.rand(10, 10).to(dtype),), + ) + + +def test_mean_basic(test_runner) -> None: + test_runner.lower_and_run_model( + MeanModel(), + (torch.randn(10, 10),), + ) + + +def test_mean_dim(test_runner) -> None: + test_runner.lower_and_run_model( + MeanModel(dim=0), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=1), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=0), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=1), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=2), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=1), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=-1), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=-2), + (torch.randn(3, 4, 5),), + ) + + +def test_mean_multi_dim(test_runner) -> None: + test_runner.lower_and_run_model( + MeanModel(dim=(0, 1)), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=(0, 2)), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=(1, 2)), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=(1, 3)), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=(0, 2)), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=(-1, -3)), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=(0, 1, 2, 3)), + (torch.randn(2, 3, 4, 5),), + ) + + +def test_mean_keepdim(test_runner) -> None: + test_runner.lower_and_run_model( + MeanModel(dim=0, keepdim=True), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=1, keepdim=True), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=1, keepdim=True), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=2, keepdim=True), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=(1, 2), keepdim=True), + (torch.randn(3, 4, 5),), + ) + + +def test_mean_output_dtype(test_runner) -> None: + test_runner.lower_and_run_model( + MeanModel(dtype=torch.float32), + (torch.randint(0, 10, (5, 10)),), + ) + + test_runner.lower_and_run_model( + MeanModel(dtype=torch.float64), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + MeanModel(dim=1, dtype=torch.float64), + (torch.randn(5, 10),), + ) + + +def test_mean_shapes(test_runner) -> None: + test_runner.lower_and_run_model( + MeanModel(), + (torch.randn(20),), + ) + test_runner.lower_and_run_model( + MeanModel(dim=0), + (torch.randn(20),), + ) + + test_runner.lower_and_run_model( + MeanModel(), + (torch.randn(5, 10),), + ) + + test_runner.lower_and_run_model( + MeanModel(), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + MeanModel(), + (torch.randn(2, 3, 4, 5),), + ) + + test_runner.lower_and_run_model( + MeanModel(), + (torch.randn(2, 2, 3, 4, 5),), + ) + + +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_mean_edge_cases(test_runner) -> None: + x = torch.tensor([[1.0, float("inf"), 3.0], [4.0, 5.0, float("inf")]]) + test_runner.lower_and_run_model( + MeanModel(), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + MeanModel(dim=0), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + MeanModel(dim=1), + (x,), + generate_random_test_inputs=False, + ) + + x = torch.tensor([[1.0, float("-inf"), 3.0], [4.0, 5.0, float("-inf")]]) + test_runner.lower_and_run_model( + MeanModel(), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + MeanModel(dim=0), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + MeanModel(dim=1), + (x,), + generate_random_test_inputs=False, + ) + + x = torch.tensor([[1.0, float("nan"), 3.0], [4.0, 5.0, float("nan")]]) + test_runner.lower_and_run_model( + MeanModel(), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + MeanModel(dim=0), + (x,), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + MeanModel(dim=1), + (x,), + generate_random_test_inputs=False, + ) + + +def test_mean_scalar(test_runner) -> None: + test_runner.lower_and_run_model( + MeanModel(), + (torch.tensor([5.0]),), + ) + test_runner.lower_and_run_model( + MeanModel(dim=0), + (torch.tensor([5.0]),), + ) diff --git a/backends/test/suite/operators/test_median.py b/backends/test/suite/operators/test_median.py index 0b515d68efd..d2b8a2a1a31 100644 --- a/backends/test/suite/operators/test_median.py +++ b/backends/test/suite/operators/test_median.py @@ -10,13 +10,8 @@ from typing import Optional import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class MedianModel(torch.nn.Module): @@ -44,145 +39,171 @@ def forward(self, x): return torch.median(x) -@operator_test -class Median(OperatorTest): - @dtype_test - def test_median_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes (global reduction) - model = MedianValueOnlyModel().to(dtype) - self._test_op(model, (torch.rand(10, 10).to(dtype),), flow) +@parameterize_by_dtype +def test_median_dtype(test_runner, dtype) -> None: + # Test with different dtypes (global reduction) + model = MedianValueOnlyModel().to(dtype) + test_runner.lower_and_run_model(model, (torch.rand(10, 10).to(dtype),)) - def test_median_basic(self, flow: TestFlow) -> None: - # Basic test with default parameters (global reduction) - self._test_op(MedianValueOnlyModel(), (torch.randn(10, 10),), flow) - def test_median_dim(self, flow: TestFlow) -> None: - # Test with different dimensions (values only) +def test_median_basic(test_runner) -> None: + # Basic test with default parameters (global reduction) + test_runner.lower_and_run_model(MedianValueOnlyModel(), (torch.randn(10, 10),)) - # 2D tensor, dim=0 - self._test_op(MedianValueOnlyModel(dim=0), (torch.randn(5, 10),), flow) - # 2D tensor, dim=1 - self._test_op(MedianValueOnlyModel(dim=1), (torch.randn(5, 10),), flow) +def test_median_dim(test_runner) -> None: + # Test with different dimensions (values only) - # 3D tensor, dim=0 - self._test_op(MedianValueOnlyModel(dim=0), (torch.randn(3, 4, 5),), flow) + # 2D tensor, dim=0 + test_runner.lower_and_run_model(MedianValueOnlyModel(dim=0), (torch.randn(5, 10),)) - # 3D tensor, dim=1 - self._test_op(MedianValueOnlyModel(dim=1), (torch.randn(3, 4, 5),), flow) + # 2D tensor, dim=1 + test_runner.lower_and_run_model(MedianValueOnlyModel(dim=1), (torch.randn(5, 10),)) - # 3D tensor, dim=2 - self._test_op(MedianValueOnlyModel(dim=2), (torch.randn(3, 4, 5),), flow) + # 3D tensor, dim=0 + test_runner.lower_and_run_model( + MedianValueOnlyModel(dim=0), (torch.randn(3, 4, 5),) + ) - # 4D tensor, dim=1 - self._test_op(MedianValueOnlyModel(dim=1), (torch.randn(2, 3, 4, 5),), flow) + # 3D tensor, dim=1 + test_runner.lower_and_run_model( + MedianValueOnlyModel(dim=1), (torch.randn(3, 4, 5),) + ) - # Negative dim (last dimension) - self._test_op(MedianValueOnlyModel(dim=-1), (torch.randn(3, 4, 5),), flow) + # 3D tensor, dim=2 + test_runner.lower_and_run_model( + MedianValueOnlyModel(dim=2), (torch.randn(3, 4, 5),) + ) - # Negative dim (second-to-last dimension) - self._test_op(MedianValueOnlyModel(dim=-2), (torch.randn(3, 4, 5),), flow) + # 4D tensor, dim=1 + test_runner.lower_and_run_model( + MedianValueOnlyModel(dim=1), (torch.randn(2, 3, 4, 5),) + ) - def test_median_with_indices(self, flow: TestFlow) -> None: - # Test with different dimensions (values and indices) + # Negative dim (last dimension) + test_runner.lower_and_run_model( + MedianValueOnlyModel(dim=-1), (torch.randn(3, 4, 5),) + ) - # 2D tensor, dim=0 - self._test_op(MedianModel(dim=0), (torch.randn(5, 10),), flow) + # Negative dim (second-to-last dimension) + test_runner.lower_and_run_model( + MedianValueOnlyModel(dim=-2), (torch.randn(3, 4, 5),) + ) - # 2D tensor, dim=1 - self._test_op(MedianModel(dim=1), (torch.randn(5, 10),), flow) - # 3D tensor, dim=0 - self._test_op(MedianModel(dim=0), (torch.randn(3, 4, 5),), flow) +def test_median_with_indices(test_runner) -> None: + # Test with different dimensions (values and indices) - # 3D tensor, dim=1 - self._test_op(MedianModel(dim=1), (torch.randn(3, 4, 5),), flow) + # 2D tensor, dim=0 + test_runner.lower_and_run_model(MedianModel(dim=0), (torch.randn(5, 10),)) - # 3D tensor, dim=2 - self._test_op(MedianModel(dim=2), (torch.randn(3, 4, 5),), flow) + # 2D tensor, dim=1 + test_runner.lower_and_run_model(MedianModel(dim=1), (torch.randn(5, 10),)) - # 4D tensor, dim=1 - self._test_op(MedianModel(dim=1), (torch.randn(2, 3, 4, 5),), flow) + # 3D tensor, dim=0 + test_runner.lower_and_run_model(MedianModel(dim=0), (torch.randn(3, 4, 5),)) - # Negative dim (last dimension) - self._test_op(MedianModel(dim=-1), (torch.randn(3, 4, 5),), flow) + # 3D tensor, dim=1 + test_runner.lower_and_run_model(MedianModel(dim=1), (torch.randn(3, 4, 5),)) - # Negative dim (second-to-last dimension) - self._test_op(MedianModel(dim=-2), (torch.randn(3, 4, 5),), flow) + # 3D tensor, dim=2 + test_runner.lower_and_run_model(MedianModel(dim=2), (torch.randn(3, 4, 5),)) - def test_median_keepdim(self, flow: TestFlow) -> None: - # Test with keepdim=True (values only) + # 4D tensor, dim=1 + test_runner.lower_and_run_model(MedianModel(dim=1), (torch.randn(2, 3, 4, 5),)) - # 2D tensor, dim=0, keepdim=True - self._test_op( - MedianValueOnlyModel(dim=0, keepdim=True), (torch.randn(5, 10),), flow - ) + # Negative dim (last dimension) + test_runner.lower_and_run_model(MedianModel(dim=-1), (torch.randn(3, 4, 5),)) - # 2D tensor, dim=1, keepdim=True - self._test_op( - MedianValueOnlyModel(dim=1, keepdim=True), (torch.randn(5, 10),), flow - ) + # Negative dim (second-to-last dimension) + test_runner.lower_and_run_model(MedianModel(dim=-2), (torch.randn(3, 4, 5),)) - # 3D tensor, dim=1, keepdim=True - self._test_op( - MedianValueOnlyModel(dim=1, keepdim=True), (torch.randn(3, 4, 5),), flow - ) - # 4D tensor, dim=2, keepdim=True - self._test_op( - MedianValueOnlyModel(dim=2, keepdim=True), (torch.randn(2, 3, 4, 5),), flow - ) +def test_median_keepdim(test_runner) -> None: + # Test with keepdim=True (values only) - def test_median_keepdim_with_indices(self, flow: TestFlow) -> None: - # Test with keepdim=True (values and indices) + # 2D tensor, dim=0, keepdim=True + test_runner.lower_and_run_model( + MedianValueOnlyModel(dim=0, keepdim=True), (torch.randn(5, 10),) + ) - # 2D tensor, dim=0, keepdim=True - self._test_op(MedianModel(dim=0, keepdim=True), (torch.randn(5, 10),), flow) + # 2D tensor, dim=1, keepdim=True + test_runner.lower_and_run_model( + MedianValueOnlyModel(dim=1, keepdim=True), (torch.randn(5, 10),) + ) - # 2D tensor, dim=1, keepdim=True - self._test_op(MedianModel(dim=1, keepdim=True), (torch.randn(5, 10),), flow) + # 3D tensor, dim=1, keepdim=True + test_runner.lower_and_run_model( + MedianValueOnlyModel(dim=1, keepdim=True), (torch.randn(3, 4, 5),) + ) - # 3D tensor, dim=1, keepdim=True - self._test_op(MedianModel(dim=1, keepdim=True), (torch.randn(3, 4, 5),), flow) + # 4D tensor, dim=2, keepdim=True + test_runner.lower_and_run_model( + MedianValueOnlyModel(dim=2, keepdim=True), (torch.randn(2, 3, 4, 5),) + ) - # 4D tensor, dim=2, keepdim=True - self._test_op( - MedianModel(dim=2, keepdim=True), (torch.randn(2, 3, 4, 5),), flow - ) - def test_median_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes (global reduction) +def test_median_keepdim_with_indices(test_runner) -> None: + # Test with keepdim=True (values and indices) - # 1D tensor - self._test_op(MedianValueOnlyModel(), (torch.randn(20),), flow) + # 2D tensor, dim=0, keepdim=True + test_runner.lower_and_run_model( + MedianModel(dim=0, keepdim=True), (torch.randn(5, 10),) + ) - # 2D tensor - self._test_op(MedianValueOnlyModel(), (torch.randn(5, 10),), flow) + # 2D tensor, dim=1, keepdim=True + test_runner.lower_and_run_model( + MedianModel(dim=1, keepdim=True), (torch.randn(5, 10),) + ) - # 3D tensor - self._test_op(MedianValueOnlyModel(), (torch.randn(3, 4, 5),), flow) + # 3D tensor, dim=1, keepdim=True + test_runner.lower_and_run_model( + MedianModel(dim=1, keepdim=True), (torch.randn(3, 4, 5),) + ) - # 4D tensor - self._test_op(MedianValueOnlyModel(), (torch.randn(2, 3, 4, 5),), flow) + # 4D tensor, dim=2, keepdim=True + test_runner.lower_and_run_model( + MedianModel(dim=2, keepdim=True), (torch.randn(2, 3, 4, 5),) + ) - # 5D tensor - self._test_op(MedianValueOnlyModel(), (torch.randn(2, 2, 3, 4, 5),), flow) - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_median_edge_cases(self, flow: TestFlow) -> None: - # Tensor with NaN (NaN should be propagated) - x = torch.tensor([[1.0, float("nan"), 3.0], [4.0, 5.0, float("nan")]]) - self._test_op( - MedianValueOnlyModel(), (x,), flow, generate_random_test_inputs=False - ) - self._test_op( - MedianValueOnlyModel(dim=0), (x,), flow, generate_random_test_inputs=False - ) - self._test_op( - MedianValueOnlyModel(dim=1), (x,), flow, generate_random_test_inputs=False - ) +def test_median_shapes(test_runner) -> None: + # Test with different tensor shapes (global reduction) - def test_median_scalar(self, flow: TestFlow) -> None: - # Test with scalar input (1-element tensor) - self._test_op(MedianValueOnlyModel(), (torch.tensor([5.0]),), flow) - self._test_op(MedianValueOnlyModel(dim=0), (torch.tensor([5.0]),), flow) + # 1D tensor + test_runner.lower_and_run_model(MedianValueOnlyModel(), (torch.randn(20),)) + + # 2D tensor + test_runner.lower_and_run_model(MedianValueOnlyModel(), (torch.randn(5, 10),)) + + # 3D tensor + test_runner.lower_and_run_model(MedianValueOnlyModel(), (torch.randn(3, 4, 5),)) + + # 4D tensor + test_runner.lower_and_run_model(MedianValueOnlyModel(), (torch.randn(2, 3, 4, 5),)) + + # 5D tensor + test_runner.lower_and_run_model( + MedianValueOnlyModel(), (torch.randn(2, 2, 3, 4, 5),) + ) + + +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_median_edge_cases(test_runner) -> None: + # Tensor with NaN (NaN should be propagated) + x = torch.tensor([[1.0, float("nan"), 3.0], [4.0, 5.0, float("nan")]]) + test_runner.lower_and_run_model( + MedianValueOnlyModel(), (x,), generate_random_test_inputs=False + ) + test_runner.lower_and_run_model( + MedianValueOnlyModel(dim=0), (x,), generate_random_test_inputs=False + ) + test_runner.lower_and_run_model( + MedianValueOnlyModel(dim=1), (x,), generate_random_test_inputs=False + ) + + +def test_median_scalar(test_runner) -> None: + # Test with scalar input (1-element tensor) + test_runner.lower_and_run_model(MedianValueOnlyModel(), (torch.tensor([5.0]),)) + test_runner.lower_and_run_model(MedianValueOnlyModel(dim=0), (torch.tensor([5.0]),)) diff --git a/backends/test/suite/operators/test_mul.py b/backends/test/suite/operators/test_mul.py index ceadc1edf7a..6f901a82421 100644 --- a/backends/test/suite/operators/test_mul.py +++ b/backends/test/suite/operators/test_mul.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -22,45 +17,42 @@ def forward(self, x, y): return x * y -@operator_test -class Multiply(OperatorTest): - @dtype_test - def test_multiply_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - Model(), - ( - (torch.rand(2, 10) * 100).to(dtype), - (torch.rand(2, 10) * 100).to(dtype), - ), - flow, - ) - - def test_multiply_f32_bcast_first(self, flow: TestFlow) -> None: - self._test_op( - Model(), - ( - torch.randn(5), - torch.randn(1, 5, 1, 5), - ), - flow, - ) - - def test_multiply_f32_bcast_second(self, flow: TestFlow) -> None: - self._test_op( - Model(), - ( - torch.randn(4, 4, 2, 7), - torch.randn(2, 7), - ), - flow, - ) - - def test_multiply_f32_bcast_unary(self, flow: TestFlow) -> None: - self._test_op( - Model(), - ( - torch.randn(5), - torch.randn(1, 1, 5), - ), - flow, - ) +@parameterize_by_dtype +def test_multiply_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + Model(), + ( + (torch.rand(2, 10) * 100).to(dtype), + (torch.rand(2, 10) * 100).to(dtype), + ), + ) + + +def test_multiply_f32_bcast_first(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + ( + torch.randn(5), + torch.randn(1, 5, 1, 5), + ), + ) + + +def test_multiply_f32_bcast_second(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + ( + torch.randn(4, 4, 2, 7), + torch.randn(2, 7), + ), + ) + + +def test_multiply_f32_bcast_unary(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + ( + torch.randn(5), + torch.randn(1, 1, 5), + ), + ) diff --git a/backends/test/suite/operators/test_neg.py b/backends/test/suite/operators/test_neg.py index bc1adede877..72c9b610a84 100644 --- a/backends/test/suite/operators/test_neg.py +++ b/backends/test/suite/operators/test_neg.py @@ -9,13 +9,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class NegModel(torch.nn.Module): @@ -26,45 +21,44 @@ def forward(self, x): return torch.neg(x) -@operator_test -class TestNeg(OperatorTest): - @dtype_test - def test_neg_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes - model = NegModel().to(dtype) - self._test_op( - model, - (torch.rand(10, 10).to(dtype) * 2 - 1,), - flow, - generate_random_test_inputs=False, - ) - - def test_neg_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes - - # 1D tensor - self._test_op( - NegModel(), (torch.randn(20),), flow, generate_random_test_inputs=False - ) - - # 2D tensor - self._test_op( - NegModel(), (torch.randn(5, 10),), flow, generate_random_test_inputs=False - ) - - # 3D tensor - self._test_op( - NegModel(), (torch.randn(3, 4, 5),), flow, generate_random_test_inputs=False - ) - - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_neg_edge_cases(self, flow: TestFlow) -> None: - # Test edge cases - - # Tensor with infinity - x = torch.tensor([float("inf"), float("-inf"), 1.0, -1.0]) - self._test_op(NegModel(), (x,), flow, generate_random_test_inputs=False) - - # Tensor with NaN - x = torch.tensor([float("nan"), 1.0, -1.0]) - self._test_op(NegModel(), (x,), flow, generate_random_test_inputs=False) +@parameterize_by_dtype +def test_neg_dtype(test_runner, dtype) -> None: + # Test with different dtypes + model = NegModel().to(dtype) + test_runner.lower_and_run_model( + model, + (torch.rand(10, 10).to(dtype) * 2 - 1,), + generate_random_test_inputs=False, + ) + + +def test_neg_shapes(test_runner) -> None: + # Test with different tensor shapes + + # 1D tensor + test_runner.lower_and_run_model( + NegModel(), (torch.randn(20),), generate_random_test_inputs=False + ) + + # 2D tensor + test_runner.lower_and_run_model( + NegModel(), (torch.randn(5, 10),), generate_random_test_inputs=False + ) + + # 3D tensor + test_runner.lower_and_run_model( + NegModel(), (torch.randn(3, 4, 5),), generate_random_test_inputs=False + ) + + +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_neg_edge_cases(test_runner) -> None: + # Test edge cases + + # Tensor with infinity + x = torch.tensor([float("inf"), float("-inf"), 1.0, -1.0]) + test_runner.lower_and_run_model(NegModel(), (x,), generate_random_test_inputs=False) + + # Tensor with NaN + x = torch.tensor([float("nan"), 1.0, -1.0]) + test_runner.lower_and_run_model(NegModel(), (x,), generate_random_test_inputs=False) diff --git a/backends/test/suite/operators/test_permute.py b/backends/test/suite/operators/test_permute.py index bc79a63d610..98d1f6d3b54 100644 --- a/backends/test/suite/operators/test_permute.py +++ b/backends/test/suite/operators/test_permute.py @@ -9,13 +9,8 @@ from typing import List import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class PermuteModel(torch.nn.Module): @@ -27,83 +22,74 @@ def forward(self, x): return x.permute(self.dims) -@operator_test -class Permute(OperatorTest): - @dtype_test - def test_permute_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - PermuteModel(dims=[1, 0]), - (torch.rand(20, 32).to(dtype),), - flow, - ) - - def test_permute_3d(self, flow: TestFlow) -> None: - self._test_op( - PermuteModel(dims=[2, 0, 1]), - (torch.randn(8, 10, 12),), - flow, - ) - - self._test_op( - PermuteModel(dims=[1, 2, 0]), - (torch.randn(8, 10, 12),), - flow, - ) - - self._test_op( - PermuteModel(dims=[0, 2, 1]), - (torch.randn(8, 10, 12),), - flow, - ) - - def test_permute_4d(self, flow: TestFlow) -> None: - self._test_op( - PermuteModel(dims=[3, 2, 1, 0]), - (torch.randn(4, 6, 8, 10),), - flow, - ) - - self._test_op( - PermuteModel(dims=[0, 2, 1, 3]), - (torch.randn(4, 6, 8, 10),), - flow, - ) - - def test_permute_identity(self, flow: TestFlow) -> None: - self._test_op( - PermuteModel(dims=[0, 1]), - (torch.randn(20, 32),), - flow, - ) - - self._test_op( - PermuteModel(dims=[0, 1, 2]), - (torch.randn(8, 10, 12),), - flow, - ) - - def test_permute_negative_dims(self, flow: TestFlow) -> None: - self._test_op( - PermuteModel(dims=[-1, -3, -2, -4]), - (torch.randn(4, 6, 8, 10),), - flow, - ) - - self._test_op( - PermuteModel(dims=[-4, -2, -3, -1]), - (torch.randn(4, 6, 8, 10),), - flow, - ) - - def test_permute_different_shapes(self, flow: TestFlow) -> None: - self._test_op( - PermuteModel(dims=[0]), - (torch.randn(512),), - flow, - ) - - self._test_op( - PermuteModel(dims=[4, 3, 2, 1, 0]), - (torch.randn(2, 3, 4, 5, 6),), - flow, - ) +@parameterize_by_dtype +def test_permute_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + PermuteModel(dims=[1, 0]), + (torch.rand(20, 32).to(dtype),), + ) + + +def test_permute_3d(test_runner) -> None: + test_runner.lower_and_run_model( + PermuteModel(dims=[2, 0, 1]), + (torch.randn(8, 10, 12),), + ) + + test_runner.lower_and_run_model( + PermuteModel(dims=[1, 2, 0]), + (torch.randn(8, 10, 12),), + ) + + test_runner.lower_and_run_model( + PermuteModel(dims=[0, 2, 1]), + (torch.randn(8, 10, 12),), + ) + + +def test_permute_4d(test_runner) -> None: + test_runner.lower_and_run_model( + PermuteModel(dims=[3, 2, 1, 0]), + (torch.randn(4, 6, 8, 10),), + ) + + test_runner.lower_and_run_model( + PermuteModel(dims=[0, 2, 1, 3]), + (torch.randn(4, 6, 8, 10),), + ) + + +def test_permute_identity(test_runner) -> None: + test_runner.lower_and_run_model( + PermuteModel(dims=[0, 1]), + (torch.randn(20, 32),), + ) + + test_runner.lower_and_run_model( + PermuteModel(dims=[0, 1, 2]), + (torch.randn(8, 10, 12),), + ) + + +def test_permute_negative_dims(test_runner) -> None: + test_runner.lower_and_run_model( + PermuteModel(dims=[-1, -3, -2, -4]), + (torch.randn(4, 6, 8, 10),), + ) + + test_runner.lower_and_run_model( + PermuteModel(dims=[-4, -2, -3, -1]), + (torch.randn(4, 6, 8, 10),), + ) + + +def test_permute_different_shapes(test_runner) -> None: + test_runner.lower_and_run_model( + PermuteModel(dims=[0]), + (torch.randn(512),), + ) + + test_runner.lower_and_run_model( + PermuteModel(dims=[4, 3, 2, 1, 0]), + (torch.randn(2, 3, 4, 5, 6),), + ) diff --git a/backends/test/suite/operators/test_pow.py b/backends/test/suite/operators/test_pow.py index 3082ad6ebaf..b77f7f2bd8e 100644 --- a/backends/test/suite/operators/test_pow.py +++ b/backends/test/suite/operators/test_pow.py @@ -9,13 +9,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class PowModel(torch.nn.Module): @@ -37,113 +32,112 @@ def forward(self, x, y): return torch.pow(x, y) -@operator_test -class TestPow(OperatorTest): - @dtype_test - def test_pow_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes - model = PowModel(2).to(dtype) - # Use positive values to avoid complex results with fractional powers - self._test_op( - model, - (torch.rand(10, 10).to(dtype) + 0.1,), - flow, - generate_random_test_inputs=False, - ) - - def test_pow_scalar_exponents(self, flow: TestFlow) -> None: - # Test with different scalar exponents - - # Power of 0 (should return 1 for all inputs) - self._test_op( - PowModel(0), - (torch.rand(10, 10) + 0.1,), - flow, - generate_random_test_inputs=False, - ) - - # Power of 1 (should return the input unchanged) - self._test_op( - PowModel(1), - (torch.rand(10, 10) + 0.1,), - flow, - generate_random_test_inputs=False, - ) - - # Power of 2 (squaring) - self._test_op( - PowModel(2), - (torch.rand(10, 10) + 0.1,), - flow, - generate_random_test_inputs=False, - ) - - # Power of 3 (cubing) - self._test_op( - PowModel(3), - (torch.rand(10, 10) + 0.1,), - flow, - generate_random_test_inputs=False, - ) - - # Negative power (-1, reciprocal) - self._test_op( - PowModel(-1), - (torch.rand(10, 10) + 0.1,), - flow, - generate_random_test_inputs=False, - ) - - # Fractional power (square root) - self._test_op( - PowModel(0.5), - (torch.rand(10, 10) + 0.1,), - flow, - generate_random_test_inputs=False, - ) - - # Large power - self._test_op( - PowModel(10), - (torch.rand(10, 10) * 0.5 + 0.5,), - flow, - generate_random_test_inputs=False, - ) - - def test_pow_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes - model = PowModel(2) # Square the input - - # 1D tensor - self._test_op( - model, (torch.rand(20) + 0.1,), flow, generate_random_test_inputs=False - ) - - # 2D tensor - self._test_op( - model, (torch.rand(5, 10) + 0.1,), flow, generate_random_test_inputs=False - ) - - # 3D tensor - self._test_op( - model, (torch.rand(3, 4, 5) + 0.1,), flow, generate_random_test_inputs=False - ) - - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_pow_edge_cases(self, flow: TestFlow) -> None: - # Test edge cases - - # 0^0 = 1 (by convention) - x = torch.zeros(1) - y = torch.zeros(1) - self._test_op(PowTensorModel(), (x, y), flow, generate_random_test_inputs=False) - - # Tensor with infinity - x = torch.tensor([float("inf"), 2.0, 3.0]) - y = torch.tensor([2.0, 2.0, 2.0]) - self._test_op(PowTensorModel(), (x, y), flow, generate_random_test_inputs=False) - - # Tensor with NaN - x = torch.tensor([float("nan"), 2.0, 3.0]) - y = torch.tensor([2.0, 2.0, 2.0]) - self._test_op(PowTensorModel(), (x, y), flow, generate_random_test_inputs=False) +@parameterize_by_dtype +def test_pow_dtype(test_runner, dtype) -> None: + # Test with different dtypes + model = PowModel(2).to(dtype) + # Use positive values to avoid complex results with fractional powers + test_runner.lower_and_run_model( + model, + (torch.rand(10, 10).to(dtype) + 0.1,), + generate_random_test_inputs=False, + ) + + +def test_pow_scalar_exponents(test_runner) -> None: + # Test with different scalar exponents + + # Power of 0 (should return 1 for all inputs) + test_runner.lower_and_run_model( + PowModel(0), + (torch.rand(10, 10) + 0.1,), + generate_random_test_inputs=False, + ) + + # Power of 1 (should return the input unchanged) + test_runner.lower_and_run_model( + PowModel(1), + (torch.rand(10, 10) + 0.1,), + generate_random_test_inputs=False, + ) + + # Power of 2 (squaring) + test_runner.lower_and_run_model( + PowModel(2), + (torch.rand(10, 10) + 0.1,), + generate_random_test_inputs=False, + ) + + # Power of 3 (cubing) + test_runner.lower_and_run_model( + PowModel(3), + (torch.rand(10, 10) + 0.1,), + generate_random_test_inputs=False, + ) + + # Negative power (-1, reciprocal) + test_runner.lower_and_run_model( + PowModel(-1), + (torch.rand(10, 10) + 0.1,), + generate_random_test_inputs=False, + ) + + # Fractional power (square root) + test_runner.lower_and_run_model( + PowModel(0.5), + (torch.rand(10, 10) + 0.1,), + generate_random_test_inputs=False, + ) + + # Large power + test_runner.lower_and_run_model( + PowModel(10), + (torch.rand(10, 10) * 0.5 + 0.5,), + generate_random_test_inputs=False, + ) + + +def test_pow_shapes(test_runner) -> None: + # Test with different tensor shapes + model = PowModel(2) # Square the input + + # 1D tensor + test_runner.lower_and_run_model( + model, (torch.rand(20) + 0.1,), generate_random_test_inputs=False + ) + + # 2D tensor + test_runner.lower_and_run_model( + model, (torch.rand(5, 10) + 0.1,), generate_random_test_inputs=False + ) + + # 3D tensor + test_runner.lower_and_run_model( + model, (torch.rand(3, 4, 5) + 0.1,), generate_random_test_inputs=False + ) + + +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_pow_edge_cases(test_runner) -> None: + # Test edge cases + + # 0^0 = 1 (by convention) + x = torch.zeros(1) + y = torch.zeros(1) + test_runner.lower_and_run_model( + PowTensorModel(), (x, y), generate_random_test_inputs=False + ) + + # Tensor with infinity + x = torch.tensor([float("inf"), 2.0, 3.0]) + y = torch.tensor([2.0, 2.0, 2.0]) + test_runner.lower_and_run_model( + PowTensorModel(), (x, y), generate_random_test_inputs=False + ) + + # Tensor with NaN + x = torch.tensor([float("nan"), 2.0, 3.0]) + y = torch.tensor([2.0, 2.0, 2.0]) + test_runner.lower_and_run_model( + PowTensorModel(), (x, y), generate_random_test_inputs=False + ) diff --git a/backends/test/suite/operators/test_prelu.py b/backends/test/suite/operators/test_prelu.py index c02fc5692a5..f4d29b581c2 100644 --- a/backends/test/suite/operators/test_prelu.py +++ b/backends/test/suite/operators/test_prelu.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -26,30 +21,36 @@ def forward(self, x): return self.prelu(x) -@operator_test -class TestPReLU(OperatorTest): - @dtype_test - def test_prelu_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op(Model().to(dtype), ((torch.rand(2, 10) * 2 - 1).to(dtype),), flow) +@parameterize_by_dtype +def test_prelu_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + Model().to(dtype), ((torch.rand(2, 10) * 2 - 1).to(dtype),) + ) - def test_prelu_f32_single_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(20),), flow) - def test_prelu_f32_multi_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) +def test_prelu_f32_single_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(20),)) - def test_prelu_f32_custom_init(self, flow: TestFlow) -> None: - self._test_op(Model(init=0.1), (torch.randn(3, 4, 5),), flow) - def test_prelu_f32_channel_shared(self, flow: TestFlow) -> None: - # Default num_parameters=1 means the parameter is shared across all channels - self._test_op(Model(num_parameters=1), (torch.randn(2, 3, 4, 5),), flow) +def test_prelu_f32_multi_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(2, 3, 4, 5),)) - def test_prelu_f32_per_channel_parameter(self, flow: TestFlow) -> None: - # num_parameters=3 means each channel has its own parameter (for dim=1) - self._test_op(Model(num_parameters=3), (torch.randn(2, 3, 4, 5),), flow) - def test_prelu_f32_boundary_values(self, flow: TestFlow) -> None: - # Test with specific positive and negative values - x = torch.tensor([-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]) - self._test_op(Model(), (x,), flow) +def test_prelu_f32_custom_init(test_runner) -> None: + test_runner.lower_and_run_model(Model(init=0.1), (torch.randn(3, 4, 5),)) + + +def test_prelu_f32_channel_shared(test_runner) -> None: + # Default num_parameters=1 means the parameter is shared across all channels + test_runner.lower_and_run_model(Model(num_parameters=1), (torch.randn(2, 3, 4, 5),)) + + +def test_prelu_f32_per_channel_parameter(test_runner) -> None: + # num_parameters=3 means each channel has its own parameter (for dim=1) + test_runner.lower_and_run_model(Model(num_parameters=3), (torch.randn(2, 3, 4, 5),)) + + +def test_prelu_f32_boundary_values(test_runner) -> None: + # Test with specific positive and negative values + x = torch.tensor([-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]) + test_runner.lower_and_run_model(Model(), (x,)) diff --git a/backends/test/suite/operators/test_relu.py b/backends/test/suite/operators/test_relu.py index 3c4ef2a98d0..6b0eb9a7118 100644 --- a/backends/test/suite/operators/test_relu.py +++ b/backends/test/suite/operators/test_relu.py @@ -10,13 +10,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -28,18 +23,19 @@ def forward(self, x): return torch.nn.functional.relu(x, self.inplace) -@operator_test -class TestReLU(OperatorTest): - @dtype_test - def test_relu_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op(Model(), ((torch.rand(2, 10) * 100).to(dtype),), flow) +@parameterize_by_dtype +def test_relu_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model(Model(), ((torch.rand(2, 10) * 100).to(dtype),)) - def test_relu_f32_single_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(20),), flow) - def test_relu_f32_multi_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) +def test_relu_f32_single_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(20),)) - @unittest.skip("In place activations aren't properly defunctionalized yet.") - def test_relu_f32_inplace(self, flow: TestFlow) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) + +def test_relu_f32_multi_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(2, 3, 4, 5),)) + + +@unittest.skip("In place activations aren't properly defunctionalized yet.") +def test_relu_f32_inplace(test_runner) -> None: + test_runner.lower_and_run_model(Model(inplace=True), (torch.randn(3, 4, 5),)) diff --git a/backends/test/suite/operators/test_reshape.py b/backends/test/suite/operators/test_reshape.py index 8bb75ac7844..b35ef50bc89 100644 --- a/backends/test/suite/operators/test_reshape.py +++ b/backends/test/suite/operators/test_reshape.py @@ -9,13 +9,8 @@ from typing import List import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class ReshapeModel(torch.nn.Module): @@ -27,56 +22,48 @@ def forward(self, x): return torch.reshape(x, self.shape) -@operator_test -class Reshape(OperatorTest): - @dtype_test - def test_reshape_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - ReshapeModel(shape=[3, 5]), - (torch.rand(15).to(dtype),), - flow, - ) - - def test_reshape_dimensions(self, flow: TestFlow) -> None: - self._test_op( - ReshapeModel(shape=[3, 5]), - (torch.randn(15),), - flow, - ) - - self._test_op( - ReshapeModel(shape=[20]), - (torch.randn(4, 5),), - flow, - ) - - self._test_op( - ReshapeModel(shape=[2, 2, 5]), - (torch.randn(4, 5),), - flow, - ) - - self._test_op( - ReshapeModel(shape=[6, 4]), - (torch.randn(3, 2, 4),), - flow, - ) - - def test_reshape_inferred_dimension(self, flow: TestFlow) -> None: - self._test_op( - ReshapeModel(shape=[3, -1]), - (torch.randn(15),), - flow, - ) - - self._test_op( - ReshapeModel(shape=[-1, 5]), - (torch.randn(15),), - flow, - ) - - self._test_op( - ReshapeModel(shape=[2, -1, 3]), - (torch.randn(24),), - flow, - ) +@parameterize_by_dtype +def test_reshape_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + ReshapeModel(shape=[3, 5]), + (torch.rand(15).to(dtype),), + ) + + +def test_reshape_dimensions(test_runner) -> None: + test_runner.lower_and_run_model( + ReshapeModel(shape=[3, 5]), + (torch.randn(15),), + ) + + test_runner.lower_and_run_model( + ReshapeModel(shape=[20]), + (torch.randn(4, 5),), + ) + + test_runner.lower_and_run_model( + ReshapeModel(shape=[2, 2, 5]), + (torch.randn(4, 5),), + ) + + test_runner.lower_and_run_model( + ReshapeModel(shape=[6, 4]), + (torch.randn(3, 2, 4),), + ) + + +def test_reshape_inferred_dimension(test_runner) -> None: + test_runner.lower_and_run_model( + ReshapeModel(shape=[3, -1]), + (torch.randn(15),), + ) + + test_runner.lower_and_run_model( + ReshapeModel(shape=[-1, 5]), + (torch.randn(15),), + ) + + test_runner.lower_and_run_model( + ReshapeModel(shape=[2, -1, 3]), + (torch.randn(24),), + ) diff --git a/backends/test/suite/operators/test_round.py b/backends/test/suite/operators/test_round.py index 3a3577bea32..9a760ff6e46 100644 --- a/backends/test/suite/operators/test_round.py +++ b/backends/test/suite/operators/test_round.py @@ -9,13 +9,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class RoundModel(torch.nn.Module): @@ -29,101 +24,114 @@ def forward(self, x): return torch.round(x) -@operator_test -class TestRound(OperatorTest): - @dtype_test - def test_round_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes - model = RoundModel().to(dtype) - self._test_op(model, (torch.rand(10, 10).to(dtype) * 10 - 5,), flow) - - def test_round_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes - - # 1D tensor - self._test_op(RoundModel(), (torch.randn(20) * 5,), flow) - - # 2D tensor - self._test_op(RoundModel(), (torch.randn(5, 10) * 5,), flow) - - # 3D tensor - self._test_op(RoundModel(), (torch.randn(3, 4, 5) * 5,), flow) - - def test_round_values(self, flow: TestFlow) -> None: - # Values with specific fractional parts - x = torch.arange(-5, 5, 0.5) # [-5.0, -4.5, -4.0, ..., 4.0, 4.5] - self._test_op(RoundModel(), (x,), flow, generate_random_test_inputs=False) - - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_round_edge_cases(self, flow: TestFlow) -> None: - # Test edge cases - - # Values exactly halfway between integers (should round to even) - x = torch.tensor([-2.5, -1.5, -0.5, 0.5, 1.5, 2.5]) - self._test_op(RoundModel(), (x,), flow, generate_random_test_inputs=False) - - # Tensor with infinity - x = torch.tensor([float("inf"), float("-inf"), 1.4, -1.4]) - self._test_op(RoundModel(), (x,), flow, generate_random_test_inputs=False) - - # Tensor with NaN - x = torch.tensor([float("nan"), 1.4, -1.4]) - self._test_op(RoundModel(), (x,), flow, generate_random_test_inputs=False) - - # Very large values (where fractional part becomes insignificant) - x = torch.tensor([1e10, 1e10 + 0.4, 1e10 + 0.6]) - self._test_op(RoundModel(), (x,), flow, generate_random_test_inputs=False) - - def test_round_decimals(self, flow: TestFlow) -> None: - # Test with different decimal places - - # Round to 1 decimal place - x = torch.tensor([1.44, 1.45, 1.46, -1.44, -1.45, -1.46]) - self._test_op( - RoundModel(decimals=1), (x,), flow, generate_random_test_inputs=False - ) - - # Round to 2 decimal places - x = torch.tensor([1.444, 1.445, 1.446, -1.444, -1.445, -1.446]) - self._test_op( - RoundModel(decimals=2), (x,), flow, generate_random_test_inputs=False - ) - - # Round to negative decimal places (tens) - x = torch.tensor([14.4, 15.5, 16.6, -14.4, -15.5, -16.6]) - self._test_op( - RoundModel(decimals=-1), (x,), flow, generate_random_test_inputs=False - ) - - # Round to negative decimal places (hundreds) - x = torch.tensor([144.4, 155.5, 166.6, -144.4, -155.5, -166.6]) - self._test_op( - RoundModel(decimals=-2), (x,), flow, generate_random_test_inputs=False - ) - - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_round_decimals_edge_cases(self, flow: TestFlow) -> None: - # Test edge cases with decimal places - - # Infinity and NaN with various decimal places - x = torch.tensor([float("inf"), float("-inf"), float("nan")]) - self._test_op( - RoundModel(decimals=2), (x,), flow, generate_random_test_inputs=False - ) - self._test_op( - RoundModel(decimals=-2), (x,), flow, generate_random_test_inputs=False - ) - - # Values exactly at the rounding threshold for different decimal places - x = torch.tensor([0.05, 0.15, 0.25, 0.35, 0.45, 0.55, 0.65, 0.75, 0.85, 0.95]) - self._test_op( - RoundModel(decimals=1), (x,), flow, generate_random_test_inputs=False - ) - - # Negative values exactly at the rounding threshold - x = torch.tensor( - [-0.05, -0.15, -0.25, -0.35, -0.45, -0.55, -0.65, -0.75, -0.85, -0.95] - ) - self._test_op( - RoundModel(decimals=1), (x,), flow, generate_random_test_inputs=False - ) +@parameterize_by_dtype +def test_round_dtype(test_runner, dtype) -> None: + # Test with different dtypes + model = RoundModel().to(dtype) + test_runner.lower_and_run_model(model, (torch.rand(10, 10).to(dtype) * 10 - 5,)) + + +def test_round_shapes(test_runner) -> None: + # Test with different tensor shapes + + # 1D tensor + test_runner.lower_and_run_model(RoundModel(), (torch.randn(20) * 5,)) + + # 2D tensor + test_runner.lower_and_run_model(RoundModel(), (torch.randn(5, 10) * 5,)) + + # 3D tensor + test_runner.lower_and_run_model(RoundModel(), (torch.randn(3, 4, 5) * 5,)) + + +def test_round_values(test_runner) -> None: + # Values with specific fractional parts + x = torch.arange(-5, 5, 0.5) # [-5.0, -4.5, -4.0, ..., 4.0, 4.5] + test_runner.lower_and_run_model( + RoundModel(), (x,), generate_random_test_inputs=False + ) + + +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_round_edge_cases(test_runner) -> None: + # Test edge cases + + # Values exactly halfway between integers (should round to even) + x = torch.tensor([-2.5, -1.5, -0.5, 0.5, 1.5, 2.5]) + test_runner.lower_and_run_model( + RoundModel(), (x,), generate_random_test_inputs=False + ) + + # Tensor with infinity + x = torch.tensor([float("inf"), float("-inf"), 1.4, -1.4]) + test_runner.lower_and_run_model( + RoundModel(), (x,), generate_random_test_inputs=False + ) + + # Tensor with NaN + x = torch.tensor([float("nan"), 1.4, -1.4]) + test_runner.lower_and_run_model( + RoundModel(), (x,), generate_random_test_inputs=False + ) + + # Very large values (where fractional part becomes insignificant) + x = torch.tensor([1e10, 1e10 + 0.4, 1e10 + 0.6]) + test_runner.lower_and_run_model( + RoundModel(), (x,), generate_random_test_inputs=False + ) + + +def test_round_decimals(test_runner) -> None: + # Test with different decimal places + + # Round to 1 decimal place + x = torch.tensor([1.44, 1.45, 1.46, -1.44, -1.45, -1.46]) + test_runner.lower_and_run_model( + RoundModel(decimals=1), (x,), generate_random_test_inputs=False + ) + + # Round to 2 decimal places + x = torch.tensor([1.444, 1.445, 1.446, -1.444, -1.445, -1.446]) + test_runner.lower_and_run_model( + RoundModel(decimals=2), (x,), generate_random_test_inputs=False + ) + + # Round to negative decimal places (tens) + x = torch.tensor([14.4, 15.5, 16.6, -14.4, -15.5, -16.6]) + test_runner.lower_and_run_model( + RoundModel(decimals=-1), (x,), generate_random_test_inputs=False + ) + + # Round to negative decimal places (hundreds) + x = torch.tensor([144.4, 155.5, 166.6, -144.4, -155.5, -166.6]) + test_runner.lower_and_run_model( + RoundModel(decimals=-2), (x,), generate_random_test_inputs=False + ) + + +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_round_decimals_edge_cases(test_runner) -> None: + # Test edge cases with decimal places + + # Infinity and NaN with various decimal places + x = torch.tensor([float("inf"), float("-inf"), float("nan")]) + test_runner.lower_and_run_model( + RoundModel(decimals=2), (x,), generate_random_test_inputs=False + ) + test_runner.lower_and_run_model( + RoundModel(decimals=-2), (x,), generate_random_test_inputs=False + ) + + # Values exactly at the rounding threshold for different decimal places + x = torch.tensor([0.05, 0.15, 0.25, 0.35, 0.45, 0.55, 0.65, 0.75, 0.85, 0.95]) + test_runner.lower_and_run_model( + RoundModel(decimals=1), (x,), generate_random_test_inputs=False + ) + + # Negative values exactly at the rounding threshold + x = torch.tensor( + [-0.05, -0.15, -0.25, -0.35, -0.45, -0.55, -0.65, -0.75, -0.85, -0.95] + ) + test_runner.lower_and_run_model( + RoundModel(decimals=1), (x,), generate_random_test_inputs=False + ) diff --git a/backends/test/suite/operators/test_rsqrt.py b/backends/test/suite/operators/test_rsqrt.py index 705833194fb..1154332463a 100644 --- a/backends/test/suite/operators/test_rsqrt.py +++ b/backends/test/suite/operators/test_rsqrt.py @@ -9,13 +9,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class RsqrtModel(torch.nn.Module): @@ -26,33 +21,37 @@ def forward(self, x): return torch.rsqrt(x) -@operator_test -class TestRsqrt(OperatorTest): - @dtype_test - def test_rsqrt_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes - model = RsqrtModel().to(dtype) - # Use positive values only for rsqrt to avoid division by zero - self._test_op(model, (torch.rand(10, 10).to(dtype) + 0.01,), flow) +@parameterize_by_dtype +def test_rsqrt_dtype(test_runner, dtype) -> None: + # Test with different dtypes + model = RsqrtModel().to(dtype) + # Use positive values only for rsqrt to avoid division by zero + test_runner.lower_and_run_model(model, (torch.rand(10, 10).to(dtype) + 0.01,)) - def test_rsqrt_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes - # 1D tensor - self._test_op(RsqrtModel(), (torch.rand(20) + 0.01,), flow) +def test_rsqrt_shapes(test_runner) -> None: + # Test with different tensor shapes - # 2D tensor - self._test_op(RsqrtModel(), (torch.rand(5, 10) + 0.01,), flow) + # 1D tensor + test_runner.lower_and_run_model(RsqrtModel(), (torch.rand(20) + 0.01,)) - # 3D tensor - self._test_op(RsqrtModel(), (torch.rand(3, 4, 5) + 0.01,), flow) + # 2D tensor + test_runner.lower_and_run_model(RsqrtModel(), (torch.rand(5, 10) + 0.01,)) - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_rsqrt_edge_cases(self, flow: TestFlow) -> None: - # Tensor with infinity - x = torch.tensor([float("inf"), 1.0, 4.0]) - self._test_op(RsqrtModel(), (x,), flow, generate_random_test_inputs=False) + # 3D tensor + test_runner.lower_and_run_model(RsqrtModel(), (torch.rand(3, 4, 5) + 0.01,)) - # Tensor with NaN - x = torch.tensor([float("nan"), 1.0, 4.0]) - self._test_op(RsqrtModel(), (x,), flow, generate_random_test_inputs=False) + +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_rsqrt_edge_cases(test_runner) -> None: + # Tensor with infinity + x = torch.tensor([float("inf"), 1.0, 4.0]) + test_runner.lower_and_run_model( + RsqrtModel(), (x,), generate_random_test_inputs=False + ) + + # Tensor with NaN + x = torch.tensor([float("nan"), 1.0, 4.0]) + test_runner.lower_and_run_model( + RsqrtModel(), (x,), generate_random_test_inputs=False + ) diff --git a/backends/test/suite/operators/test_select.py b/backends/test/suite/operators/test_select.py index a230f786463..d25c8f02029 100644 --- a/backends/test/suite/operators/test_select.py +++ b/backends/test/suite/operators/test_select.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class SelectModel(torch.nn.Module): @@ -27,63 +22,55 @@ def forward(self, x): return torch.select(x, dim=self.dim, index=self.index) -@operator_test -class Select(OperatorTest): - @dtype_test - def test_select_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - SelectModel(dim=0, index=0), - (torch.rand(3, 4, 5).to(dtype),), - flow, - ) - - def test_select_dimensions(self, flow: TestFlow) -> None: - self._test_op( - SelectModel(dim=0, index=1), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - SelectModel(dim=1, index=2), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - SelectModel(dim=2, index=3), - (torch.randn(3, 4, 5),), - flow, - ) - - def test_select_negative_dim(self, flow: TestFlow) -> None: - self._test_op( - SelectModel(dim=-1, index=2), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - SelectModel(dim=-2, index=1), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - SelectModel(dim=-3, index=0), - (torch.randn(3, 4, 5),), - flow, - ) - - def test_select_different_shapes(self, flow: TestFlow) -> None: - self._test_op( - SelectModel(dim=0, index=1), - (torch.randn(3, 4),), - flow, - ) - - self._test_op( - SelectModel(dim=1, index=1), - (torch.randn(2, 3, 4, 5),), - flow, - ) +@parameterize_by_dtype +def test_select_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + SelectModel(dim=0, index=0), + (torch.rand(3, 4, 5).to(dtype),), + ) + + +def test_select_dimensions(test_runner) -> None: + test_runner.lower_and_run_model( + SelectModel(dim=0, index=1), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + SelectModel(dim=1, index=2), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + SelectModel(dim=2, index=3), + (torch.randn(3, 4, 5),), + ) + + +def test_select_negative_dim(test_runner) -> None: + test_runner.lower_and_run_model( + SelectModel(dim=-1, index=2), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + SelectModel(dim=-2, index=1), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + SelectModel(dim=-3, index=0), + (torch.randn(3, 4, 5),), + ) + + +def test_select_different_shapes(test_runner) -> None: + test_runner.lower_and_run_model( + SelectModel(dim=0, index=1), + (torch.randn(3, 4),), + ) + + test_runner.lower_and_run_model( + SelectModel(dim=1, index=1), + (torch.randn(2, 3, 4, 5),), + ) diff --git a/backends/test/suite/operators/test_sigmoid.py b/backends/test/suite/operators/test_sigmoid.py index df083218884..98942548ee1 100644 --- a/backends/test/suite/operators/test_sigmoid.py +++ b/backends/test/suite/operators/test_sigmoid.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -22,19 +17,20 @@ def forward(self, x): return torch.nn.functional.sigmoid(x) -@operator_test -class TestSigmoid(OperatorTest): - @dtype_test - def test_sigmoid_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow) +@parameterize_by_dtype +def test_sigmoid_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),)) - def test_sigmoid_f32_single_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(20),), flow) - def test_sigmoid_f32_multi_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) +def test_sigmoid_f32_single_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(20),)) - def test_sigmoid_f32_boundary_values(self, flow: TestFlow) -> None: - # Test with specific values spanning negative and positive ranges - x = torch.tensor([-10.0, -5.0, -1.0, 0.0, 1.0, 5.0, 10.0]) - self._test_op(Model(), (x,), flow) + +def test_sigmoid_f32_multi_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(2, 3, 4, 5),)) + + +def test_sigmoid_f32_boundary_values(test_runner) -> None: + # Test with specific values spanning negative and positive ranges + x = torch.tensor([-10.0, -5.0, -1.0, 0.0, 1.0, 5.0, 10.0]) + test_runner.lower_and_run_model(Model(), (x,)) diff --git a/backends/test/suite/operators/test_silu.py b/backends/test/suite/operators/test_silu.py index cf6d343f271..4e436c33e84 100644 --- a/backends/test/suite/operators/test_silu.py +++ b/backends/test/suite/operators/test_silu.py @@ -10,13 +10,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -28,23 +23,25 @@ def forward(self, x): return torch.nn.functional.silu(x, inplace=self.inplace) -@operator_test -class TestSiLU(OperatorTest): - @dtype_test - def test_silu_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op(Model(), ((torch.randn(2, 10) * 100).to(dtype),), flow) +@parameterize_by_dtype +def test_silu_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model(Model(), ((torch.randn(2, 10) * 100).to(dtype),)) - def test_silu_f32_single_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(20),), flow) - def test_silu_f32_multi_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) +def test_silu_f32_single_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(20),)) - @unittest.skip("In place activations aren't properly defunctionalized yet.") - def test_silu_f32_inplace(self, flow: TestFlow) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) - def test_silu_f32_boundary_values(self, flow: TestFlow) -> None: - # Test with specific values spanning negative and positive ranges - x = torch.tensor([-10.0, -5.0, -1.0, 0.0, 1.0, 5.0, 10.0]) - self._test_op(Model(), (x,), flow) +def test_silu_f32_multi_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(2, 3, 4, 5),)) + + +@unittest.skip("In place activations aren't properly defunctionalized yet.") +def test_silu_f32_inplace(test_runner) -> None: + test_runner.lower_and_run_model(Model(inplace=True), (torch.randn(3, 4, 5),)) + + +def test_silu_f32_boundary_values(test_runner) -> None: + # Test with specific values spanning negative and positive ranges + x = torch.tensor([-10.0, -5.0, -1.0, 0.0, 1.0, 5.0, 10.0]) + test_runner.lower_and_run_model(Model(), (x,)) diff --git a/backends/test/suite/operators/test_slice.py b/backends/test/suite/operators/test_slice.py index e39f451268e..76ba655e146 100644 --- a/backends/test/suite/operators/test_slice.py +++ b/backends/test/suite/operators/test_slice.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class SliceSimple(torch.nn.Module): @@ -52,95 +47,86 @@ def forward(self, x): return x[1, 2:8, 3:15] -@operator_test -class Slice(OperatorTest): - @dtype_test - def test_slice_simple_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - SliceSimple().to(dtype), - (torch.rand(8, 16, 20).to(dtype),), - flow, - ) - - def test_slice_range(self, flow: TestFlow) -> None: - self._test_op( - SliceRange(), - (torch.rand(8, 32, 16),), - flow, - ) - - def test_slice_multi_dimensions(self, flow: TestFlow) -> None: - # Test 2D multi-dimensional slicing - self._test_op( - SliceMultiDim2D(), - (torch.randn(12, 20),), - flow, - ) - - # Test 3D multi-dimensional slicing - self._test_op( - SliceMultiDim3D(), - (torch.randn(8, 12, 20),), - flow, - ) - - # Test 4D multi-dimensional slicing - self._test_op( - SliceMultiDim4D(), - (torch.randn(4, 8, 12, 16),), - flow, - ) - - # Test mixed slicing (single index + ranges) - self._test_op( - SliceMultiDimMixed(), - (torch.randn(4, 12, 20),), - flow, - ) - - def test_slice_different_patterns(self, flow: TestFlow) -> None: - # Test various slicing patterns on larger tensors - - # Pattern 1: Start from beginning - class SliceFromStart(torch.nn.Module): - def forward(self, x): - return x[:4, :8, 2:16] - - self._test_op( - SliceFromStart(), - (torch.randn(8, 12, 20),), - flow, - ) - - # Pattern 2: Slice to end - class SliceToEnd(torch.nn.Module): - def forward(self, x): - return x[2:, 4:, 1:] - - self._test_op( - SliceToEnd(), - (torch.randn(8, 12, 16),), - flow, - ) - - # Pattern 3: Step slicing on multiple dimensions - class SliceWithStep(torch.nn.Module): - def forward(self, x): - return x[::2, 1::2, 2::3] - - self._test_op( - SliceWithStep(), - (torch.randn(12, 16, 24),), - flow, - ) - - # Pattern 4: Negative indices - class SliceNegative(torch.nn.Module): - def forward(self, x): - return x[-6:-2, -12:-4, -16:-2] - - self._test_op( - SliceNegative(), - (torch.randn(10, 16, 20),), - flow, - ) +@parameterize_by_dtype +def test_slice_simple_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + SliceSimple().to(dtype), + (torch.rand(8, 16, 20).to(dtype),), + ) + + +def test_slice_range(test_runner) -> None: + test_runner.lower_and_run_model( + SliceRange(), + (torch.rand(8, 32, 16),), + ) + + +def test_slice_multi_dimensions(test_runner) -> None: + # Test 2D multi-dimensional slicing + test_runner.lower_and_run_model( + SliceMultiDim2D(), + (torch.randn(12, 20),), + ) + + # Test 3D multi-dimensional slicing + test_runner.lower_and_run_model( + SliceMultiDim3D(), + (torch.randn(8, 12, 20),), + ) + + # Test 4D multi-dimensional slicing + test_runner.lower_and_run_model( + SliceMultiDim4D(), + (torch.randn(4, 8, 12, 16),), + ) + + # Test mixed slicing (single index + ranges) + test_runner.lower_and_run_model( + SliceMultiDimMixed(), + (torch.randn(4, 12, 20),), + ) + + +def test_slice_different_patterns(test_runner) -> None: + # Test various slicing patterns on larger tensors + + # Pattern 1: Start from beginning + class SliceFromStart(torch.nn.Module): + def forward(self, x): + return x[:4, :8, 2:16] + + test_runner.lower_and_run_model( + SliceFromStart(), + (torch.randn(8, 12, 20),), + ) + + # Pattern 2: Slice to end + class SliceToEnd(torch.nn.Module): + def forward(self, x): + return x[2:, 4:, 1:] + + test_runner.lower_and_run_model( + SliceToEnd(), + (torch.randn(8, 12, 16),), + ) + + # Pattern 3: Step slicing on multiple dimensions + class SliceWithStep(torch.nn.Module): + def forward(self, x): + return x[::2, 1::2, 2::3] + + test_runner.lower_and_run_model( + SliceWithStep(), + (torch.randn(12, 16, 24),), + ) + + # Pattern 4: Negative indices + class SliceNegative(torch.nn.Module): + def forward(self, x): + return x[-6:-2, -12:-4, -16:-2] + + test_runner.lower_and_run_model( + SliceNegative(), + (torch.randn(10, 16, 20),), + ) diff --git a/backends/test/suite/operators/test_split.py b/backends/test/suite/operators/test_split.py index 6b5ce5f37b7..c87f4282348 100644 --- a/backends/test/suite/operators/test_split.py +++ b/backends/test/suite/operators/test_split.py @@ -9,13 +9,8 @@ from typing import List import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class SplitSizeModel(torch.nn.Module): @@ -38,82 +33,72 @@ def forward(self, x): return torch.split(x, self.sections, dim=self.dim) -@operator_test -class Split(OperatorTest): - @dtype_test - def test_split_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - SplitSizeModel(split_size=2), - (torch.rand(6, 4).to(dtype),), - flow, - ) - - def test_split_size_dimensions(self, flow: TestFlow) -> None: - self._test_op( - SplitSizeModel(split_size=2, dim=0), - (torch.randn(6, 4),), - flow, - ) - - self._test_op( - SplitSizeModel(split_size=2, dim=1), - (torch.randn(4, 6),), - flow, - ) - - self._test_op( - SplitSizeModel(split_size=2, dim=2), - (torch.randn(3, 4, 6),), - flow, - ) - - def test_split_size_uneven(self, flow: TestFlow) -> None: - self._test_op( - SplitSizeModel(split_size=3), - (torch.randn(7, 4),), - flow, - ) - - self._test_op( - SplitSizeModel(split_size=3, dim=1), - (torch.randn(4, 7),), - flow, - ) - - def test_split_sections_dimensions(self, flow: TestFlow) -> None: - self._test_op( - SplitSectionsModel(sections=[2, 3, 1], dim=0), - (torch.randn(6, 4),), - flow, - ) - - self._test_op( - SplitSectionsModel(sections=[2, 3, 1], dim=1), - (torch.randn(4, 6),), - flow, - ) - - self._test_op( - SplitSectionsModel(sections=[2, 3, 1], dim=2), - (torch.randn(3, 4, 6),), - flow, - ) - - def test_split_negative_dim(self, flow: TestFlow) -> None: - self._test_op( - SplitSizeModel(split_size=2, dim=-1), - (torch.randn(4, 6),), - flow, - ) - - self._test_op( - SplitSizeModel(split_size=2, dim=-2), - (torch.randn(4, 6),), - flow, - ) - - self._test_op( - SplitSectionsModel(sections=[2, 3, 1], dim=-1), - (torch.randn(4, 6),), - flow, - ) +@parameterize_by_dtype +def test_split_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + SplitSizeModel(split_size=2), + (torch.rand(6, 4).to(dtype),), + ) + + +def test_split_size_dimensions(test_runner) -> None: + test_runner.lower_and_run_model( + SplitSizeModel(split_size=2, dim=0), + (torch.randn(6, 4),), + ) + + test_runner.lower_and_run_model( + SplitSizeModel(split_size=2, dim=1), + (torch.randn(4, 6),), + ) + + test_runner.lower_and_run_model( + SplitSizeModel(split_size=2, dim=2), + (torch.randn(3, 4, 6),), + ) + + +def test_split_size_uneven(test_runner) -> None: + test_runner.lower_and_run_model( + SplitSizeModel(split_size=3), + (torch.randn(7, 4),), + ) + + test_runner.lower_and_run_model( + SplitSizeModel(split_size=3, dim=1), + (torch.randn(4, 7),), + ) + + +def test_split_sections_dimensions(test_runner) -> None: + test_runner.lower_and_run_model( + SplitSectionsModel(sections=[2, 3, 1], dim=0), + (torch.randn(6, 4),), + ) + + test_runner.lower_and_run_model( + SplitSectionsModel(sections=[2, 3, 1], dim=1), + (torch.randn(4, 6),), + ) + + test_runner.lower_and_run_model( + SplitSectionsModel(sections=[2, 3, 1], dim=2), + (torch.randn(3, 4, 6),), + ) + + +def test_split_negative_dim(test_runner) -> None: + test_runner.lower_and_run_model( + SplitSizeModel(split_size=2, dim=-1), + (torch.randn(4, 6),), + ) + + test_runner.lower_and_run_model( + SplitSizeModel(split_size=2, dim=-2), + (torch.randn(4, 6),), + ) + + test_runner.lower_and_run_model( + SplitSectionsModel(sections=[2, 3, 1], dim=-1), + (torch.randn(4, 6),), + ) diff --git a/backends/test/suite/operators/test_sqrt.py b/backends/test/suite/operators/test_sqrt.py index 3d327ade6a5..bc08f72b44a 100644 --- a/backends/test/suite/operators/test_sqrt.py +++ b/backends/test/suite/operators/test_sqrt.py @@ -9,13 +9,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class SqrtModel(torch.nn.Module): @@ -26,35 +21,39 @@ def forward(self, x): return torch.sqrt(x) -@operator_test -class TestSqrt(OperatorTest): - @dtype_test - def test_sqrt_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes - model = SqrtModel().to(dtype) - # Use non-negative values only for sqrt - self._test_op(model, (torch.rand(10, 10).to(dtype),), flow) +@parameterize_by_dtype +def test_sqrt_dtype(test_runner, dtype) -> None: + # Test with different dtypes + model = SqrtModel().to(dtype) + # Use non-negative values only for sqrt + test_runner.lower_and_run_model(model, (torch.rand(10, 10).to(dtype),)) - def test_sqrt_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes - # 1D tensor - self._test_op(SqrtModel(), (torch.rand(20),), flow) +def test_sqrt_shapes(test_runner) -> None: + # Test with different tensor shapes - # 2D tensor - self._test_op(SqrtModel(), (torch.rand(5, 10),), flow) + # 1D tensor + test_runner.lower_and_run_model(SqrtModel(), (torch.rand(20),)) - # 3D tensor - self._test_op(SqrtModel(), (torch.rand(3, 4, 5),), flow) + # 2D tensor + test_runner.lower_and_run_model(SqrtModel(), (torch.rand(5, 10),)) - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_sqrt_edge_cases(self, flow: TestFlow) -> None: - # Test edge cases + # 3D tensor + test_runner.lower_and_run_model(SqrtModel(), (torch.rand(3, 4, 5),)) - # Tensor with infinity - x = torch.tensor([float("inf"), 1.0, 4.0]) - self._test_op(SqrtModel(), (x,), flow, generate_random_test_inputs=False) - # Tensor with NaN - x = torch.tensor([float("nan"), 1.0, 4.0]) - self._test_op(SqrtModel(), (x,), flow, generate_random_test_inputs=False) +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_sqrt_edge_cases(test_runner) -> None: + # Test edge cases + + # Tensor with infinity + x = torch.tensor([float("inf"), 1.0, 4.0]) + test_runner.lower_and_run_model( + SqrtModel(), (x,), generate_random_test_inputs=False + ) + + # Tensor with NaN + x = torch.tensor([float("nan"), 1.0, 4.0]) + test_runner.lower_and_run_model( + SqrtModel(), (x,), generate_random_test_inputs=False + ) diff --git a/backends/test/suite/operators/test_square.py b/backends/test/suite/operators/test_square.py index 39ed212e426..896210a9601 100644 --- a/backends/test/suite/operators/test_square.py +++ b/backends/test/suite/operators/test_square.py @@ -9,13 +9,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class SquareModel(torch.nn.Module): @@ -26,42 +21,50 @@ def forward(self, x): return torch.square(x) -@operator_test -class TestSquare(OperatorTest): - @dtype_test - def test_square_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes - model = SquareModel().to(dtype) - self._test_op(model, (torch.rand(10, 10).to(dtype) * 2 - 1,), flow) +@parameterize_by_dtype +def test_square_dtype(test_runner, dtype) -> None: + # Test with different dtypes + model = SquareModel().to(dtype) + test_runner.lower_and_run_model(model, (torch.rand(10, 10).to(dtype) * 2 - 1,)) - def test_square_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes - # 1D tensor - self._test_op(SquareModel(), (torch.randn(20),), flow) +def test_square_shapes(test_runner) -> None: + # Test with different tensor shapes - # 2D tensor - self._test_op(SquareModel(), (torch.randn(5, 10),), flow) + # 1D tensor + test_runner.lower_and_run_model(SquareModel(), (torch.randn(20),)) - # 3D tensor - self._test_op(SquareModel(), (torch.randn(3, 4, 5),), flow) + # 2D tensor + test_runner.lower_and_run_model(SquareModel(), (torch.randn(5, 10),)) - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_square_edge_cases(self, flow: TestFlow) -> None: - # Test edge cases + # 3D tensor + test_runner.lower_and_run_model(SquareModel(), (torch.randn(3, 4, 5),)) - # Tensor with infinity - x = torch.tensor([float("inf"), float("-inf"), 1.0, -1.0]) - self._test_op(SquareModel(), (x,), flow, generate_random_test_inputs=False) - # Tensor with NaN - x = torch.tensor([float("nan"), 1.0, -1.0]) - self._test_op(SquareModel(), (x,), flow, generate_random_test_inputs=False) +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_square_edge_cases(test_runner) -> None: + # Test edge cases - # Very large values (close to overflow for some dtypes) - x = torch.tensor([1e10, -1e10]) - self._test_op(SquareModel(), (x,), flow, generate_random_test_inputs=False) + # Tensor with infinity + x = torch.tensor([float("inf"), float("-inf"), 1.0, -1.0]) + test_runner.lower_and_run_model( + SquareModel(), (x,), generate_random_test_inputs=False + ) - # Very small values (close to underflow) - x = torch.tensor([1e-10, -1e-10]) - self._test_op(SquareModel(), (x,), flow, generate_random_test_inputs=False) + # Tensor with NaN + x = torch.tensor([float("nan"), 1.0, -1.0]) + test_runner.lower_and_run_model( + SquareModel(), (x,), generate_random_test_inputs=False + ) + + # Very large values (close to overflow for some dtypes) + x = torch.tensor([1e10, -1e10]) + test_runner.lower_and_run_model( + SquareModel(), (x,), generate_random_test_inputs=False + ) + + # Very small values (close to underflow) + x = torch.tensor([1e-10, -1e-10]) + test_runner.lower_and_run_model( + SquareModel(), (x,), generate_random_test_inputs=False + ) diff --git a/backends/test/suite/operators/test_squeeze.py b/backends/test/suite/operators/test_squeeze.py index 5ab6333162d..e07228ada6f 100644 --- a/backends/test/suite/operators/test_squeeze.py +++ b/backends/test/suite/operators/test_squeeze.py @@ -7,13 +7,8 @@ # pyre-unsafe import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class SqueezeModel(torch.nn.Module): @@ -30,57 +25,50 @@ def forward(self, x): return torch.squeeze(x, dim=self.dim) -@operator_test -class Squeeze(OperatorTest): - @dtype_test - def test_squeeze_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - SqueezeModel(), - (torch.rand(1, 3, 1, 5).to(dtype),), - flow, - ) - - def test_squeeze_specific_dimension(self, flow: TestFlow) -> None: - self._test_op( - SqueezeDimModel(dim=0), - (torch.randn(1, 3, 5),), - flow, - ) - - self._test_op( - SqueezeDimModel(dim=2), - (torch.randn(3, 4, 1, 5),), - flow, - ) - - self._test_op( - SqueezeDimModel(dim=-1), - (torch.randn(3, 4, 5, 1),), - flow, - ) - - def test_squeeze_no_effect(self, flow: TestFlow) -> None: - self._test_op( - SqueezeDimModel(dim=1), - (torch.randn(3, 4, 5),), - flow, - ) - - self._test_op( - SqueezeModel(), - (torch.randn(3, 4, 5),), - flow, - ) - - def test_squeeze_multiple_dims(self, flow: TestFlow) -> None: - self._test_op( - SqueezeModel(), - (torch.randn(1, 3, 1, 5, 1),), - flow, - ) - - self._test_op( - SqueezeDimModel(dim=(0, 1)), - (torch.randn(1, 1, 1),), - flow, - ) +@parameterize_by_dtype +def test_squeeze_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + SqueezeModel(), + (torch.rand(1, 3, 1, 5).to(dtype),), + ) + + +def test_squeeze_specific_dimension(test_runner) -> None: + test_runner.lower_and_run_model( + SqueezeDimModel(dim=0), + (torch.randn(1, 3, 5),), + ) + + test_runner.lower_and_run_model( + SqueezeDimModel(dim=2), + (torch.randn(3, 4, 1, 5),), + ) + + test_runner.lower_and_run_model( + SqueezeDimModel(dim=-1), + (torch.randn(3, 4, 5, 1),), + ) + + +def test_squeeze_no_effect(test_runner) -> None: + test_runner.lower_and_run_model( + SqueezeDimModel(dim=1), + (torch.randn(3, 4, 5),), + ) + + test_runner.lower_and_run_model( + SqueezeModel(), + (torch.randn(3, 4, 5),), + ) + + +def test_squeeze_multiple_dims(test_runner) -> None: + test_runner.lower_and_run_model( + SqueezeModel(), + (torch.randn(1, 3, 1, 5, 1),), + ) + + test_runner.lower_and_run_model( + SqueezeDimModel(dim=(0, 1)), + (torch.randn(1, 1, 1),), + ) diff --git a/backends/test/suite/operators/test_stack.py b/backends/test/suite/operators/test_stack.py index 14fefa82c6e..78cb1f555cc 100644 --- a/backends/test/suite/operators/test_stack.py +++ b/backends/test/suite/operators/test_stack.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class StackModel(torch.nn.Module): @@ -26,89 +21,82 @@ def forward(self, x1, x2, x3): return torch.stack([x1, x2, x3], dim=self.dim) -@operator_test -class Stack(OperatorTest): - @dtype_test - def test_stack_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - StackModel(), - ( - torch.rand(3, 4).to(dtype), - torch.rand(3, 4).to(dtype), - torch.rand(3, 4).to(dtype), - ), - flow, - ) - - def test_stack_dimensions(self, flow: TestFlow) -> None: - self._test_op( - StackModel(dim=0), - ( - torch.randn(3, 4), - torch.randn(3, 4), - torch.randn(3, 4), - ), - flow, - ) - - self._test_op( - StackModel(dim=1), - ( - torch.randn(3, 4), - torch.randn(3, 4), - torch.randn(3, 4), - ), - flow, - ) - - self._test_op( - StackModel(dim=2), - ( - torch.randn(3, 4), - torch.randn(3, 4), - torch.randn(3, 4), - ), - flow, - ) - - def test_stack_negative_dim(self, flow: TestFlow) -> None: - self._test_op( - StackModel(dim=-1), - ( - torch.randn(3, 4), - torch.randn(3, 4), - torch.randn(3, 4), - ), - flow, - ) - - self._test_op( - StackModel(dim=-2), - ( - torch.randn(3, 4), - torch.randn(3, 4), - torch.randn(3, 4), - ), - flow, - ) - - def test_stack_different_shapes(self, flow: TestFlow) -> None: - self._test_op( - StackModel(), - ( - torch.randn(5), - torch.randn(5), - torch.randn(5), - ), - flow, - ) - - self._test_op( - StackModel(), - ( - torch.randn(2, 3, 4), - torch.randn(2, 3, 4), - torch.randn(2, 3, 4), - ), - flow, - ) +@parameterize_by_dtype +def test_stack_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + StackModel(), + ( + torch.rand(3, 4).to(dtype), + torch.rand(3, 4).to(dtype), + torch.rand(3, 4).to(dtype), + ), + ) + + +def test_stack_dimensions(test_runner) -> None: + test_runner.lower_and_run_model( + StackModel(dim=0), + ( + torch.randn(3, 4), + torch.randn(3, 4), + torch.randn(3, 4), + ), + ) + + test_runner.lower_and_run_model( + StackModel(dim=1), + ( + torch.randn(3, 4), + torch.randn(3, 4), + torch.randn(3, 4), + ), + ) + + test_runner.lower_and_run_model( + StackModel(dim=2), + ( + torch.randn(3, 4), + torch.randn(3, 4), + torch.randn(3, 4), + ), + ) + + +def test_stack_negative_dim(test_runner) -> None: + test_runner.lower_and_run_model( + StackModel(dim=-1), + ( + torch.randn(3, 4), + torch.randn(3, 4), + torch.randn(3, 4), + ), + ) + + test_runner.lower_and_run_model( + StackModel(dim=-2), + ( + torch.randn(3, 4), + torch.randn(3, 4), + torch.randn(3, 4), + ), + ) + + +def test_stack_different_shapes(test_runner) -> None: + test_runner.lower_and_run_model( + StackModel(), + ( + torch.randn(5), + torch.randn(5), + torch.randn(5), + ), + ) + + test_runner.lower_and_run_model( + StackModel(), + ( + torch.randn(2, 3, 4), + torch.randn(2, 3, 4), + torch.randn(2, 3, 4), + ), + ) diff --git a/backends/test/suite/operators/test_sub.py b/backends/test/suite/operators/test_sub.py index 2243eb6ee71..37c00e2904a 100644 --- a/backends/test/suite/operators/test_sub.py +++ b/backends/test/suite/operators/test_sub.py @@ -7,13 +7,8 @@ # pyre-unsafe import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -30,55 +25,52 @@ def forward(self, x, y): return torch.sub(x, y, alpha=self.alpha) -@operator_test -class Subtract(OperatorTest): - @dtype_test - def test_subtract_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - Model(), - ( - (torch.rand(2, 10) * 100).to(dtype), - (torch.rand(2, 10) * 100).to(dtype), - ), - flow, - ) - - def test_subtract_f32_bcast_first(self, flow: TestFlow) -> None: - self._test_op( - Model(), - ( - torch.randn(5), - torch.randn(1, 5, 1, 5), - ), - flow, - ) - - def test_subtract_f32_bcast_second(self, flow: TestFlow) -> None: - self._test_op( - Model(), - ( - torch.randn(4, 4, 2, 7), - torch.randn(2, 7), - ), - flow, - ) - - def test_subtract_f32_bcast_unary(self, flow: TestFlow) -> None: - self._test_op( - Model(), - ( - torch.randn(5), - torch.randn(1, 1, 5), - ), - flow, - ) - - def test_subtract_f32_alpha(self, flow: TestFlow) -> None: - self._test_op( - ModelAlpha(alpha=2), - ( - torch.randn(1, 25), - torch.randn(1, 25), - ), - flow, - ) +@parameterize_by_dtype +def test_subtract_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + Model(), + ( + (torch.rand(2, 10) * 100).to(dtype), + (torch.rand(2, 10) * 100).to(dtype), + ), + ) + + +def test_subtract_f32_bcast_first(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + ( + torch.randn(5), + torch.randn(1, 5, 1, 5), + ), + ) + + +def test_subtract_f32_bcast_second(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + ( + torch.randn(4, 4, 2, 7), + torch.randn(2, 7), + ), + ) + + +def test_subtract_f32_bcast_unary(test_runner) -> None: + test_runner.lower_and_run_model( + Model(), + ( + torch.randn(5), + torch.randn(1, 1, 5), + ), + ) + + +def test_subtract_f32_alpha(test_runner) -> None: + test_runner.lower_and_run_model( + ModelAlpha(alpha=2), + ( + torch.randn(1, 25), + torch.randn(1, 25), + ), + ) diff --git a/backends/test/suite/operators/test_tanh.py b/backends/test/suite/operators/test_tanh.py index 7f961493ce9..c90f3d05287 100644 --- a/backends/test/suite/operators/test_tanh.py +++ b/backends/test/suite/operators/test_tanh.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -22,19 +17,20 @@ def forward(self, x): return torch.nn.functional.tanh(x) -@operator_test -class TestTanh(OperatorTest): - @dtype_test - def test_tanh_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow) +@parameterize_by_dtype +def test_tanh_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),)) - def test_tanh_f32_single_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(20),), flow) - def test_tanh_f32_multi_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) +def test_tanh_f32_single_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(20),)) - def test_tanh_f32_boundary_values(self, flow: TestFlow) -> None: - # Test with specific values spanning negative and positive ranges - x = torch.tensor([-10.0, -5.0, -1.0, 0.0, 1.0, 5.0, 10.0]) - self._test_op(Model(), (x,), flow) + +def test_tanh_f32_multi_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(2, 3, 4, 5),)) + + +def test_tanh_f32_boundary_values(test_runner) -> None: + # Test with specific values spanning negative and positive ranges + x = torch.tensor([-10.0, -5.0, -1.0, 0.0, 1.0, 5.0, 10.0]) + test_runner.lower_and_run_model(Model(), (x,)) diff --git a/backends/test/suite/operators/test_threshold.py b/backends/test/suite/operators/test_threshold.py index 3f69a9f41fe..dd92e8ee016 100644 --- a/backends/test/suite/operators/test_threshold.py +++ b/backends/test/suite/operators/test_threshold.py @@ -10,13 +10,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class Model(torch.nn.Module): @@ -32,40 +27,47 @@ def forward(self, x): ) -@operator_test -class TestThreshold(OperatorTest): - @dtype_test - def test_threshold_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),), flow) +@parameterize_by_dtype +def test_threshold_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model(Model(), ((torch.rand(2, 10) * 10 - 5).to(dtype),)) - def test_threshold_f32_single_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(20),), flow) - def test_threshold_f32_multi_dim(self, flow: TestFlow) -> None: - self._test_op(Model(), (torch.randn(2, 3, 4, 5),), flow) +def test_threshold_f32_single_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(20),)) - def test_threshold_f32_custom_threshold(self, flow: TestFlow) -> None: - self._test_op(Model(threshold=1.0), (torch.randn(3, 4, 5),), flow) - def test_threshold_f32_custom_value(self, flow: TestFlow) -> None: - self._test_op(Model(value=2.0), (torch.randn(3, 4, 5),), flow) +def test_threshold_f32_multi_dim(test_runner) -> None: + test_runner.lower_and_run_model(Model(), (torch.randn(2, 3, 4, 5),)) - def test_threshold_f32_custom_threshold_value(self, flow: TestFlow) -> None: - self._test_op(Model(threshold=0.5, value=1.0), (torch.randn(3, 4, 5),), flow) - @unittest.skip("In place activations aren't properly defunctionalized yet.") - def test_threshold_f32_inplace(self, flow: TestFlow) -> None: - self._test_op(Model(inplace=True), (torch.randn(3, 4, 5),), flow) +def test_threshold_f32_custom_threshold(test_runner) -> None: + test_runner.lower_and_run_model(Model(threshold=1.0), (torch.randn(3, 4, 5),)) - def test_threshold_f32_boundary_values(self, flow: TestFlow) -> None: - # Test with specific values around the threshold - x = torch.tensor([-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]) - self._test_op(Model(), (x,), flow) - def test_threshold_f32_all_params(self, flow: TestFlow) -> None: - # Test with all parameters customized - self._test_op( - Model(threshold=0.5, value=3.0, inplace=True), - (torch.randn(3, 4, 5),), - flow, - ) +def test_threshold_f32_custom_value(test_runner) -> None: + test_runner.lower_and_run_model(Model(value=2.0), (torch.randn(3, 4, 5),)) + + +def test_threshold_f32_custom_threshold_value(test_runner) -> None: + test_runner.lower_and_run_model( + Model(threshold=0.5, value=1.0), (torch.randn(3, 4, 5),) + ) + + +@unittest.skip("In place activations aren't properly defunctionalized yet.") +def test_threshold_f32_inplace(test_runner) -> None: + test_runner.lower_and_run_model(Model(inplace=True), (torch.randn(3, 4, 5),)) + + +def test_threshold_f32_boundary_values(test_runner) -> None: + # Test with specific values around the threshold + x = torch.tensor([-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]) + test_runner.lower_and_run_model(Model(), (x,)) + + +def test_threshold_f32_all_params(test_runner) -> None: + # Test with all parameters customized + test_runner.lower_and_run_model( + Model(threshold=0.5, value=3.0, inplace=True), + (torch.randn(3, 4, 5),), + ) diff --git a/backends/test/suite/operators/test_transpose.py b/backends/test/suite/operators/test_transpose.py index 77f5e62cb18..1670dd89912 100644 --- a/backends/test/suite/operators/test_transpose.py +++ b/backends/test/suite/operators/test_transpose.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class TransposeModel(torch.nn.Module): @@ -27,117 +22,103 @@ def forward(self, x): return torch.transpose(x, self.dim0, self.dim1) -@operator_test -class Transpose(OperatorTest): - @dtype_test - def test_transpose_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - TransposeModel(dim0=0, dim1=1), - (torch.rand(20, 32).to(dtype),), - flow, - ) - - def test_transpose_basic(self, flow: TestFlow) -> None: - self._test_op( - TransposeModel(dim0=0, dim1=1), - (torch.randn(20, 32),), - flow, - ) - - def test_transpose_3d(self, flow: TestFlow) -> None: - self._test_op( - TransposeModel(dim0=0, dim1=1), - (torch.randn(8, 10, 12),), - flow, - ) - - self._test_op( - TransposeModel(dim0=0, dim1=2), - (torch.randn(8, 10, 12),), - flow, - ) - - self._test_op( - TransposeModel(dim0=1, dim1=2), - (torch.randn(8, 10, 12),), - flow, - ) - - def test_transpose_4d(self, flow: TestFlow) -> None: - self._test_op( - TransposeModel(dim0=0, dim1=3), - (torch.randn(4, 6, 8, 10),), - flow, - ) - - self._test_op( - TransposeModel(dim0=1, dim1=2), - (torch.randn(4, 6, 8, 10),), - flow, - ) - - def test_transpose_identity(self, flow: TestFlow) -> None: - self._test_op( - TransposeModel(dim0=0, dim1=0), - (torch.randn(20, 32),), - flow, - ) - self._test_op( - TransposeModel(dim0=1, dim1=1), - (torch.randn(20, 32),), - flow, - ) - - self._test_op( - TransposeModel(dim0=0, dim1=0), - (torch.randn(8, 10, 12),), - flow, - ) - self._test_op( - TransposeModel(dim0=1, dim1=1), - (torch.randn(8, 10, 12),), - flow, - ) - self._test_op( - TransposeModel(dim0=2, dim1=2), - (torch.randn(8, 10, 12),), - flow, - ) - - def test_transpose_negative_dims(self, flow: TestFlow) -> None: - self._test_op( - TransposeModel(dim0=-3, dim1=-1), - (torch.randn(8, 10, 12),), - flow, - ) - - self._test_op( - TransposeModel(dim0=-2, dim1=-1), - (torch.randn(8, 10, 12),), - flow, - ) - - def test_transpose_different_shapes(self, flow: TestFlow) -> None: - self._test_op( - TransposeModel(dim0=0, dim1=1), - (torch.randn(20, 32),), - flow, - ) - - self._test_op( - TransposeModel(dim0=0, dim1=2), - (torch.randn(8, 10, 12),), - flow, - ) - - self._test_op( - TransposeModel(dim0=1, dim1=3), - (torch.randn(4, 6, 8, 10),), - flow, - ) - - self._test_op( - TransposeModel(dim0=0, dim1=4), - (torch.randn(2, 3, 4, 5, 6),), - flow, - ) +@parameterize_by_dtype +def test_transpose_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + TransposeModel(dim0=0, dim1=1), + (torch.rand(20, 32).to(dtype),), + ) + + +def test_transpose_basic(test_runner) -> None: + test_runner.lower_and_run_model( + TransposeModel(dim0=0, dim1=1), + (torch.randn(20, 32),), + ) + + +def test_transpose_3d(test_runner) -> None: + test_runner.lower_and_run_model( + TransposeModel(dim0=0, dim1=1), + (torch.randn(8, 10, 12),), + ) + + test_runner.lower_and_run_model( + TransposeModel(dim0=0, dim1=2), + (torch.randn(8, 10, 12),), + ) + + test_runner.lower_and_run_model( + TransposeModel(dim0=1, dim1=2), + (torch.randn(8, 10, 12),), + ) + + +def test_transpose_4d(test_runner) -> None: + test_runner.lower_and_run_model( + TransposeModel(dim0=0, dim1=3), + (torch.randn(4, 6, 8, 10),), + ) + + test_runner.lower_and_run_model( + TransposeModel(dim0=1, dim1=2), + (torch.randn(4, 6, 8, 10),), + ) + + +def test_transpose_identity(test_runner) -> None: + test_runner.lower_and_run_model( + TransposeModel(dim0=0, dim1=0), + (torch.randn(20, 32),), + ) + test_runner.lower_and_run_model( + TransposeModel(dim0=1, dim1=1), + (torch.randn(20, 32),), + ) + + test_runner.lower_and_run_model( + TransposeModel(dim0=0, dim1=0), + (torch.randn(8, 10, 12),), + ) + test_runner.lower_and_run_model( + TransposeModel(dim0=1, dim1=1), + (torch.randn(8, 10, 12),), + ) + test_runner.lower_and_run_model( + TransposeModel(dim0=2, dim1=2), + (torch.randn(8, 10, 12),), + ) + + +def test_transpose_negative_dims(test_runner) -> None: + test_runner.lower_and_run_model( + TransposeModel(dim0=-3, dim1=-1), + (torch.randn(8, 10, 12),), + ) + + test_runner.lower_and_run_model( + TransposeModel(dim0=-2, dim1=-1), + (torch.randn(8, 10, 12),), + ) + + +def test_transpose_different_shapes(test_runner) -> None: + test_runner.lower_and_run_model( + TransposeModel(dim0=0, dim1=1), + (torch.randn(20, 32),), + ) + + test_runner.lower_and_run_model( + TransposeModel(dim0=0, dim1=2), + (torch.randn(8, 10, 12),), + ) + + test_runner.lower_and_run_model( + TransposeModel(dim0=1, dim1=3), + (torch.randn(4, 6, 8, 10),), + ) + + test_runner.lower_and_run_model( + TransposeModel(dim0=0, dim1=4), + (torch.randn(2, 3, 4, 5, 6),), + ) diff --git a/backends/test/suite/operators/test_trunc.py b/backends/test/suite/operators/test_trunc.py index 71dcbf59176..6c385016ffc 100644 --- a/backends/test/suite/operators/test_trunc.py +++ b/backends/test/suite/operators/test_trunc.py @@ -9,13 +9,8 @@ import unittest import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class TruncModel(torch.nn.Module): @@ -26,48 +21,53 @@ def forward(self, x): return torch.trunc(x) -@operator_test -class TestTrunc(OperatorTest): - @dtype_test - def test_trunc_dtype(self, flow: TestFlow, dtype) -> None: - # Test with different dtypes - model = TruncModel().to(dtype) - self._test_op(model, (torch.rand(10, 10).to(dtype) * 10 - 5,), flow) +@parameterize_by_dtype +def test_trunc_dtype(test_runner, dtype) -> None: + # Test with different dtypes + model = TruncModel().to(dtype) + test_runner.lower_and_run_model(model, (torch.rand(10, 10).to(dtype) * 10 - 5,)) - def test_trunc_shapes(self, flow: TestFlow) -> None: - # Test with different tensor shapes - # 1D tensor - self._test_op(TruncModel(), (torch.randn(20) * 5,), flow) +def test_trunc_shapes(test_runner) -> None: + # Test with different tensor shapes - # 2D tensor - self._test_op(TruncModel(), (torch.randn(5, 10) * 5,), flow) + # 1D tensor + test_runner.lower_and_run_model(TruncModel(), (torch.randn(20) * 5,)) - # 3D tensor - self._test_op(TruncModel(), (torch.randn(3, 4, 5) * 5,), flow) + # 2D tensor + test_runner.lower_and_run_model(TruncModel(), (torch.randn(5, 10) * 5,)) - @unittest.skip("NaN and Inf are not enforced for backends.") - def test_trunc_edge_cases(self, flow: TestFlow) -> None: - # Test edge cases + # 3D tensor + test_runner.lower_and_run_model(TruncModel(), (torch.randn(3, 4, 5) * 5,)) - # Integer values (should remain unchanged) - self._test_op( - TruncModel(), - (torch.arange(-5, 6).float(),), - flow, - generate_random_test_inputs=False, - ) - # Values with different fractional parts - x = torch.tensor( - [-2.9, -2.5, -2.1, -0.9, -0.5, -0.1, 0.0, 0.1, 0.5, 0.9, 2.1, 2.5, 2.9] - ) - self._test_op(TruncModel(), (x,), flow, generate_random_test_inputs=False) +@unittest.skip("NaN and Inf are not enforced for backends.") +def test_trunc_edge_cases(test_runner) -> None: + # Test edge cases - # Tensor with infinity - x = torch.tensor([float("inf"), float("-inf"), 1.4, -1.4]) - self._test_op(TruncModel(), (x,), flow, generate_random_test_inputs=False) + # Integer values (should remain unchanged) + test_runner.lower_and_run_model( + TruncModel(), + (torch.arange(-5, 6).float(),), + generate_random_test_inputs=False, + ) - # Tensor with NaN - x = torch.tensor([float("nan"), 1.4, -1.4]) - self._test_op(TruncModel(), (x,), flow, generate_random_test_inputs=False) + # Values with different fractional parts + x = torch.tensor( + [-2.9, -2.5, -2.1, -0.9, -0.5, -0.1, 0.0, 0.1, 0.5, 0.9, 2.1, 2.5, 2.9] + ) + test_runner.lower_and_run_model( + TruncModel(), (x,), generate_random_test_inputs=False + ) + + # Tensor with infinity + x = torch.tensor([float("inf"), float("-inf"), 1.4, -1.4]) + test_runner.lower_and_run_model( + TruncModel(), (x,), generate_random_test_inputs=False + ) + + # Tensor with NaN + x = torch.tensor([float("nan"), 1.4, -1.4]) + test_runner.lower_and_run_model( + TruncModel(), (x,), generate_random_test_inputs=False + ) diff --git a/backends/test/suite/operators/test_unsqueeze.py b/backends/test/suite/operators/test_unsqueeze.py index 00951b4656c..c5d1ebe3987 100644 --- a/backends/test/suite/operators/test_unsqueeze.py +++ b/backends/test/suite/operators/test_unsqueeze.py @@ -8,13 +8,8 @@ import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class UnsqueezeModel(torch.nn.Module): @@ -26,85 +21,74 @@ def forward(self, x): return torch.unsqueeze(x, self.dim) -@operator_test -class Unsqueeze(OperatorTest): - @dtype_test - def test_unsqueeze_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - UnsqueezeModel(dim=1), - (torch.rand(3, 5).to(dtype),), - flow, - ) - - def test_unsqueeze_basic(self, flow: TestFlow) -> None: - self._test_op( - UnsqueezeModel(dim=1), - (torch.randn(3, 5),), - flow, - ) - - def test_unsqueeze_positions(self, flow: TestFlow) -> None: - self._test_op( - UnsqueezeModel(dim=0), - (torch.randn(3, 5),), - flow, - ) - - self._test_op( - UnsqueezeModel(dim=1), - (torch.randn(3, 5),), - flow, - ) - - self._test_op( - UnsqueezeModel(dim=2), - (torch.randn(3, 5),), - flow, - ) - - def test_unsqueeze_negative_dim(self, flow: TestFlow) -> None: - self._test_op( - UnsqueezeModel(dim=-1), - (torch.randn(3, 5),), - flow, - ) - - self._test_op( - UnsqueezeModel(dim=-2), - (torch.randn(3, 5),), - flow, - ) - - self._test_op( - UnsqueezeModel(dim=-3), - (torch.randn(3, 5),), - flow, - ) - - def test_unsqueeze_different_shapes(self, flow: TestFlow) -> None: - self._test_op( - UnsqueezeModel(dim=0), - (torch.randn(5),), - flow, - ) - self._test_op( - UnsqueezeModel(dim=1), - (torch.randn(5),), - flow, - ) - - self._test_op( - UnsqueezeModel(dim=0), - (torch.randn(3, 4, 5),), - flow, - ) - self._test_op( - UnsqueezeModel(dim=2), - (torch.randn(3, 4, 5),), - flow, - ) - self._test_op( - UnsqueezeModel(dim=3), - (torch.randn(3, 4, 5),), - flow, - ) +@parameterize_by_dtype +def test_unsqueeze_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + UnsqueezeModel(dim=1), + (torch.rand(3, 5).to(dtype),), + ) + + +def test_unsqueeze_basic(test_runner) -> None: + test_runner.lower_and_run_model( + UnsqueezeModel(dim=1), + (torch.randn(3, 5),), + ) + + +def test_unsqueeze_positions(test_runner) -> None: + test_runner.lower_and_run_model( + UnsqueezeModel(dim=0), + (torch.randn(3, 5),), + ) + + test_runner.lower_and_run_model( + UnsqueezeModel(dim=1), + (torch.randn(3, 5),), + ) + + test_runner.lower_and_run_model( + UnsqueezeModel(dim=2), + (torch.randn(3, 5),), + ) + + +def test_unsqueeze_negative_dim(test_runner) -> None: + test_runner.lower_and_run_model( + UnsqueezeModel(dim=-1), + (torch.randn(3, 5),), + ) + + test_runner.lower_and_run_model( + UnsqueezeModel(dim=-2), + (torch.randn(3, 5),), + ) + + test_runner.lower_and_run_model( + UnsqueezeModel(dim=-3), + (torch.randn(3, 5),), + ) + + +def test_unsqueeze_different_shapes(test_runner) -> None: + test_runner.lower_and_run_model( + UnsqueezeModel(dim=0), + (torch.randn(5),), + ) + test_runner.lower_and_run_model( + UnsqueezeModel(dim=1), + (torch.randn(5),), + ) + + test_runner.lower_and_run_model( + UnsqueezeModel(dim=0), + (torch.randn(3, 4, 5),), + ) + test_runner.lower_and_run_model( + UnsqueezeModel(dim=2), + (torch.randn(3, 4, 5),), + ) + test_runner.lower_and_run_model( + UnsqueezeModel(dim=3), + (torch.randn(3, 4, 5),), + ) diff --git a/backends/test/suite/operators/test_upsample_bilinear2d.py b/backends/test/suite/operators/test_upsample_bilinear2d.py index 010712b2e5c..34f32b09150 100644 --- a/backends/test/suite/operators/test_upsample_bilinear2d.py +++ b/backends/test/suite/operators/test_upsample_bilinear2d.py @@ -9,13 +9,8 @@ from typing import Optional, Tuple, Union import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class ModelWithSize(torch.nn.Module): @@ -53,221 +48,192 @@ def forward(self, x): ) -@operator_test -class TestUpsampleBilinear2d(OperatorTest): - @dtype_test - def test_upsample_bilinear2d_dtype(self, flow: TestFlow, dtype) -> None: - # Input shape: (batch_size, channels, height, width) - model = ModelWithSize(size=(10, 10), align_corners=False).to(dtype) - self._test_op(model, (torch.rand(2, 3, 5, 5).to(dtype),), flow) - - def test_upsample_bilinear2d_sizes(self, flow: TestFlow) -> None: - # Test with different input and output sizes - - # Small input, larger output - self._test_op( - ModelWithSize(size=(8, 8), align_corners=False), - (torch.randn(1, 2, 4, 4),), - flow, - ) - self._test_op( - ModelWithSize(size=(8, 8), align_corners=True), - (torch.randn(1, 2, 4, 4),), - flow, - ) - - # Larger input, even larger output - self._test_op( - ModelWithSize(size=(16, 16), align_corners=False), - (torch.randn(1, 2, 8, 8),), - flow, - ) - self._test_op( - ModelWithSize(size=(16, 16), align_corners=True), - (torch.randn(1, 2, 8, 8),), - flow, - ) - - # Different height and width - self._test_op( - ModelWithSize(size=(16, 8), align_corners=False), - (torch.randn(1, 2, 8, 4),), - flow, - ) - self._test_op( - ModelWithSize(size=(16, 8), align_corners=True), - (torch.randn(1, 2, 8, 4),), - flow, - ) - - # Asymmetric upsampling - self._test_op( - ModelWithSize(size=(20, 10), align_corners=False), - (torch.randn(1, 2, 5, 5),), - flow, - ) - self._test_op( - ModelWithSize(size=(20, 10), align_corners=True), - (torch.randn(1, 2, 5, 5),), - flow, - ) - - def test_upsample_bilinear2d_scale_factors(self, flow: TestFlow) -> None: - # Test with different scale factors - - # Scale by 2 - self._test_op( - ModelWithScale(scale_factor=2.0, align_corners=False), - (torch.randn(1, 2, 5, 5),), - flow, - ) - self._test_op( - ModelWithScale(scale_factor=2.0, align_corners=True), - (torch.randn(1, 2, 5, 5),), - flow, - ) - - # Scale by 3 - self._test_op( - ModelWithScale(scale_factor=3.0, align_corners=False), - (torch.randn(1, 2, 5, 5),), - flow, - ) - self._test_op( - ModelWithScale(scale_factor=3.0, align_corners=True), - (torch.randn(1, 2, 5, 5),), - flow, - ) - - # Scale by 1.5 - self._test_op( - ModelWithScale(scale_factor=1.5, align_corners=False), - (torch.randn(1, 2, 6, 6),), - flow, - ) - self._test_op( - ModelWithScale(scale_factor=1.5, align_corners=True), - (torch.randn(1, 2, 6, 6),), - flow, - ) - - # Different scales for height and width - self._test_op( - ModelWithScale(scale_factor=(2.0, 1.5), align_corners=False), - (torch.randn(1, 2, 5, 6),), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - ModelWithScale(scale_factor=(2.0, 1.5), align_corners=True), - (torch.randn(1, 2, 5, 6),), - flow, - generate_random_test_inputs=False, - ) - - def test_upsample_bilinear2d_batch_sizes(self, flow: TestFlow) -> None: - # Test with different batch sizes - self._test_op( - ModelWithSize(size=(10, 10), align_corners=False), - (torch.randn(1, 3, 5, 5),), - flow, - ) - self._test_op( - ModelWithSize(size=(10, 10), align_corners=False), - (torch.randn(4, 3, 5, 5),), - flow, - ) - self._test_op( - ModelWithSize(size=(10, 10), align_corners=False), - (torch.randn(8, 3, 5, 5),), - flow, - ) - - def test_upsample_bilinear2d_channels(self, flow: TestFlow) -> None: - # Test with different numbers of channels - self._test_op( - ModelWithSize(size=(10, 10), align_corners=False), - (torch.randn(2, 1, 5, 5),), - flow, - ) # Grayscale - self._test_op( - ModelWithSize(size=(10, 10), align_corners=False), - (torch.randn(2, 3, 5, 5),), - flow, - ) # RGB - self._test_op( - ModelWithSize(size=(10, 10), align_corners=False), - (torch.randn(2, 4, 5, 5),), - flow, - ) # RGBA - self._test_op( - ModelWithSize(size=(10, 10), align_corners=False), - (torch.randn(2, 16, 5, 5),), - flow, - ) # Multi-channel - - def test_upsample_bilinear2d_same_size(self, flow: TestFlow) -> None: - # Test with output size same as input size (should be identity) - self._test_op( - ModelWithSize(size=(5, 5), align_corners=False), - (torch.randn(2, 3, 5, 5),), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - ModelWithSize(size=(5, 5), align_corners=True), - (torch.randn(2, 3, 5, 5),), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - ModelWithScale(scale_factor=1.0, align_corners=False), - (torch.randn(2, 3, 5, 5),), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - ModelWithScale(scale_factor=1.0, align_corners=True), - (torch.randn(2, 3, 5, 5),), - flow, - generate_random_test_inputs=False, - ) - - def test_upsample_bilinear2d_downsampling(self, flow: TestFlow) -> None: - # Test downsampling - self._test_op( - ModelWithSize(size=(4, 4), align_corners=False), - (torch.randn(2, 3, 8, 8),), - flow, - ) - self._test_op( - ModelWithSize(size=(4, 4), align_corners=True), - (torch.randn(2, 3, 8, 8),), - flow, - ) - self._test_op( - ModelWithScale(scale_factor=0.5, align_corners=False), - (torch.randn(2, 3, 8, 8),), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - ModelWithScale(scale_factor=0.5, align_corners=True), - (torch.randn(2, 3, 8, 8),), - flow, - generate_random_test_inputs=False, - ) - - # Test with non-integer downsampling factor - self._test_op( - ModelWithScale(scale_factor=0.75, align_corners=False), - (torch.randn(2, 3, 8, 8),), - flow, - generate_random_test_inputs=False, - ) - self._test_op( - ModelWithScale(scale_factor=0.75, align_corners=True), - (torch.randn(2, 3, 8, 8),), - flow, - generate_random_test_inputs=False, - ) +@parameterize_by_dtype +def test_upsample_bilinear2d_dtype(test_runner, dtype) -> None: + # Input shape: (batch_size, channels, height, width) + model = ModelWithSize(size=(10, 10), align_corners=False).to(dtype) + test_runner.lower_and_run_model(model, (torch.rand(2, 3, 5, 5).to(dtype),)) + + +def test_upsample_bilinear2d_sizes(test_runner) -> None: + # Test with different input and output sizes + + # Small input, larger output + test_runner.lower_and_run_model( + ModelWithSize(size=(8, 8), align_corners=False), + (torch.randn(1, 2, 4, 4),), + ) + test_runner.lower_and_run_model( + ModelWithSize(size=(8, 8), align_corners=True), + (torch.randn(1, 2, 4, 4),), + ) + + # Larger input, even larger output + test_runner.lower_and_run_model( + ModelWithSize(size=(16, 16), align_corners=False), + (torch.randn(1, 2, 8, 8),), + ) + test_runner.lower_and_run_model( + ModelWithSize(size=(16, 16), align_corners=True), + (torch.randn(1, 2, 8, 8),), + ) + + # Different height and width + test_runner.lower_and_run_model( + ModelWithSize(size=(16, 8), align_corners=False), + (torch.randn(1, 2, 8, 4),), + ) + test_runner.lower_and_run_model( + ModelWithSize(size=(16, 8), align_corners=True), + (torch.randn(1, 2, 8, 4),), + ) + + # Asymmetric upsampling + test_runner.lower_and_run_model( + ModelWithSize(size=(20, 10), align_corners=False), + (torch.randn(1, 2, 5, 5),), + ) + test_runner.lower_and_run_model( + ModelWithSize(size=(20, 10), align_corners=True), + (torch.randn(1, 2, 5, 5),), + ) + + +def test_upsample_bilinear2d_scale_factors(test_runner) -> None: + # Test with different scale factors + + # Scale by 2 + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=2.0, align_corners=False), + (torch.randn(1, 2, 5, 5),), + ) + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=2.0, align_corners=True), + (torch.randn(1, 2, 5, 5),), + ) + + # Scale by 3 + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=3.0, align_corners=False), + (torch.randn(1, 2, 5, 5),), + ) + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=3.0, align_corners=True), + (torch.randn(1, 2, 5, 5),), + ) + + # Scale by 1.5 + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=1.5, align_corners=False), + (torch.randn(1, 2, 6, 6),), + ) + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=1.5, align_corners=True), + (torch.randn(1, 2, 6, 6),), + ) + + # Different scales for height and width + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=(2.0, 1.5), align_corners=False), + (torch.randn(1, 2, 5, 6),), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=(2.0, 1.5), align_corners=True), + (torch.randn(1, 2, 5, 6),), + generate_random_test_inputs=False, + ) + + +def test_upsample_bilinear2d_batch_sizes(test_runner) -> None: + # Test with different batch sizes + test_runner.lower_and_run_model( + ModelWithSize(size=(10, 10), align_corners=False), + (torch.randn(1, 3, 5, 5),), + ) + test_runner.lower_and_run_model( + ModelWithSize(size=(10, 10), align_corners=False), + (torch.randn(4, 3, 5, 5),), + ) + test_runner.lower_and_run_model( + ModelWithSize(size=(10, 10), align_corners=False), + (torch.randn(8, 3, 5, 5),), + ) + + +def test_upsample_bilinear2d_channels(test_runner) -> None: + # Test with different numbers of channels + test_runner.lower_and_run_model( + ModelWithSize(size=(10, 10), align_corners=False), + (torch.randn(2, 1, 5, 5),), + ) # Grayscale + test_runner.lower_and_run_model( + ModelWithSize(size=(10, 10), align_corners=False), + (torch.randn(2, 3, 5, 5),), + ) # RGB + test_runner.lower_and_run_model( + ModelWithSize(size=(10, 10), align_corners=False), + (torch.randn(2, 4, 5, 5),), + ) # RGBA + test_runner.lower_and_run_model( + ModelWithSize(size=(10, 10), align_corners=False), + (torch.randn(2, 16, 5, 5),), + ) # Multi-channel + + +def test_upsample_bilinear2d_same_size(test_runner) -> None: + # Test with output size same as input size (should be identity) + test_runner.lower_and_run_model( + ModelWithSize(size=(5, 5), align_corners=False), + (torch.randn(2, 3, 5, 5),), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + ModelWithSize(size=(5, 5), align_corners=True), + (torch.randn(2, 3, 5, 5),), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=1.0, align_corners=False), + (torch.randn(2, 3, 5, 5),), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=1.0, align_corners=True), + (torch.randn(2, 3, 5, 5),), + generate_random_test_inputs=False, + ) + + +def test_upsample_bilinear2d_downsampling(test_runner) -> None: + # Test downsampling + test_runner.lower_and_run_model( + ModelWithSize(size=(4, 4), align_corners=False), + (torch.randn(2, 3, 8, 8),), + ) + test_runner.lower_and_run_model( + ModelWithSize(size=(4, 4), align_corners=True), + (torch.randn(2, 3, 8, 8),), + ) + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=0.5, align_corners=False), + (torch.randn(2, 3, 8, 8),), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=0.5, align_corners=True), + (torch.randn(2, 3, 8, 8),), + generate_random_test_inputs=False, + ) + + # Test with non-integer downsampling factor + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=0.75, align_corners=False), + (torch.randn(2, 3, 8, 8),), + generate_random_test_inputs=False, + ) + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=0.75, align_corners=True), + (torch.randn(2, 3, 8, 8),), + generate_random_test_inputs=False, + ) diff --git a/backends/test/suite/operators/test_upsample_nearest2d.py b/backends/test/suite/operators/test_upsample_nearest2d.py index 3ae877b5f4f..ee9ee3594aa 100644 --- a/backends/test/suite/operators/test_upsample_nearest2d.py +++ b/backends/test/suite/operators/test_upsample_nearest2d.py @@ -9,13 +9,8 @@ from typing import Optional, Tuple, Union import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class ModelWithSize(torch.nn.Module): @@ -44,94 +39,105 @@ def forward(self, x): ) -@operator_test -class TestUpsampleNearest2d(OperatorTest): - @dtype_test - def test_upsample_nearest2d_dtype(self, flow: TestFlow, dtype) -> None: - # Input shape: (batch_size, channels, height, width) - model = ModelWithSize(size=(10, 10)).to(dtype) - self._test_op(model, (torch.rand(2, 3, 5, 5).to(dtype),), flow) - - def test_upsample_nearest2d_sizes(self, flow: TestFlow) -> None: - # Test with different input and output sizes - - # Small input, larger output - self._test_op(ModelWithSize(size=(8, 8)), (torch.randn(1, 2, 4, 4),), flow) - - # Larger input, even larger output - self._test_op(ModelWithSize(size=(16, 16)), (torch.randn(1, 2, 8, 8),), flow) - - # Different height and width - self._test_op(ModelWithSize(size=(16, 8)), (torch.randn(1, 2, 8, 4),), flow) - - # Asymmetric upsampling - self._test_op(ModelWithSize(size=(20, 10)), (torch.randn(1, 2, 5, 5),), flow) - - def test_upsample_nearest2d_scale_factors(self, flow: TestFlow) -> None: - # Test with different scale factors - - # Scale by 2 - self._test_op( - ModelWithScale(scale_factor=2.0), - (torch.randn(1, 2, 5, 5),), - flow, - generate_random_test_inputs=False, - ) - - # Scale by 3 - self._test_op( - ModelWithScale(scale_factor=3.0), - (torch.randn(1, 2, 5, 5),), - flow, - generate_random_test_inputs=False, - ) - - # Scale by 1.5 - self._test_op( - ModelWithScale(scale_factor=1.5), - (torch.randn(1, 2, 6, 6),), - flow, - generate_random_test_inputs=False, - ) - - # Different scales for height and width - self._test_op( - ModelWithScale(scale_factor=(2.0, 1.5)), - (torch.randn(1, 2, 5, 6),), - flow, - generate_random_test_inputs=False, - ) - - def test_upsample_nearest2d_batch_sizes(self, flow: TestFlow) -> None: - # Test with different batch sizes - self._test_op(ModelWithSize(size=(10, 10)), (torch.randn(1, 3, 5, 5),), flow) - self._test_op(ModelWithSize(size=(10, 10)), (torch.randn(4, 3, 5, 5),), flow) - self._test_op(ModelWithSize(size=(10, 10)), (torch.randn(8, 3, 5, 5),), flow) - - def test_upsample_nearest2d_channels(self, flow: TestFlow) -> None: - # Test with different numbers of channels - self._test_op( - ModelWithSize(size=(10, 10)), (torch.randn(2, 1, 5, 5),), flow - ) # Grayscale - self._test_op( - ModelWithSize(size=(10, 10)), (torch.randn(2, 3, 5, 5),), flow - ) # RGB - self._test_op( - ModelWithSize(size=(10, 10)), (torch.randn(2, 4, 5, 5),), flow - ) # RGBA - self._test_op( - ModelWithSize(size=(10, 10)), (torch.randn(2, 16, 5, 5),), flow - ) # Multi-channel - - def test_upsample_nearest2d_same_size(self, flow: TestFlow) -> None: - # Test with output size same as input size (should be identity) - self._test_op( - ModelWithSize(size=(5, 5)), - (torch.randn(2, 3, 5, 5),), - flow, - ) - self._test_op( - ModelWithScale(scale_factor=1.0), - (torch.randn(2, 3, 5, 5),), - flow, - ) +@parameterize_by_dtype +def test_upsample_nearest2d_dtype(test_runner, dtype) -> None: + # Input shape: (batch_size, channels, height, width) + model = ModelWithSize(size=(10, 10)).to(dtype) + test_runner.lower_and_run_model(model, (torch.rand(2, 3, 5, 5).to(dtype),)) + + +def test_upsample_nearest2d_sizes(test_runner) -> None: + # Test with different input and output sizes + + # Small input, larger output + test_runner.lower_and_run_model( + ModelWithSize(size=(8, 8)), (torch.randn(1, 2, 4, 4),) + ) + + # Larger input, even larger output + test_runner.lower_and_run_model( + ModelWithSize(size=(16, 16)), (torch.randn(1, 2, 8, 8),) + ) + + # Different height and width + test_runner.lower_and_run_model( + ModelWithSize(size=(16, 8)), (torch.randn(1, 2, 8, 4),) + ) + + # Asymmetric upsampling + test_runner.lower_and_run_model( + ModelWithSize(size=(20, 10)), (torch.randn(1, 2, 5, 5),) + ) + + +def test_upsample_nearest2d_scale_factors(test_runner) -> None: + # Test with different scale factors + + # Scale by 2 + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=2.0), + (torch.randn(1, 2, 5, 5),), + generate_random_test_inputs=False, + ) + + # Scale by 3 + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=3.0), + (torch.randn(1, 2, 5, 5),), + generate_random_test_inputs=False, + ) + + # Scale by 1.5 + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=1.5), + (torch.randn(1, 2, 6, 6),), + generate_random_test_inputs=False, + ) + + # Different scales for height and width + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=(2.0, 1.5)), + (torch.randn(1, 2, 5, 6),), + generate_random_test_inputs=False, + ) + + +def test_upsample_nearest2d_batch_sizes(test_runner) -> None: + # Test with different batch sizes + test_runner.lower_and_run_model( + ModelWithSize(size=(10, 10)), (torch.randn(1, 3, 5, 5),) + ) + test_runner.lower_and_run_model( + ModelWithSize(size=(10, 10)), (torch.randn(4, 3, 5, 5),) + ) + test_runner.lower_and_run_model( + ModelWithSize(size=(10, 10)), (torch.randn(8, 3, 5, 5),) + ) + + +def test_upsample_nearest2d_channels(test_runner) -> None: + # Test with different numbers of channels + test_runner.lower_and_run_model( + ModelWithSize(size=(10, 10)), (torch.randn(2, 1, 5, 5),) + ) # Grayscale + test_runner.lower_and_run_model( + ModelWithSize(size=(10, 10)), (torch.randn(2, 3, 5, 5),) + ) # RGB + test_runner.lower_and_run_model( + ModelWithSize(size=(10, 10)), (torch.randn(2, 4, 5, 5),) + ) # RGBA + test_runner.lower_and_run_model( + ModelWithSize(size=(10, 10)), (torch.randn(2, 16, 5, 5),) + ) # Multi-channel + + +def test_upsample_nearest2d_same_size(test_runner) -> None: + # Test with output size same as input size (should be identity) + test_runner.lower_and_run_model( + ModelWithSize(size=(5, 5)), + (torch.randn(2, 3, 5, 5),), + ) + test_runner.lower_and_run_model( + ModelWithScale(scale_factor=1.0), + (torch.randn(2, 3, 5, 5),), + ) diff --git a/backends/test/suite/operators/test_view.py b/backends/test/suite/operators/test_view.py index 4a20d1f9632..a9f61923de7 100644 --- a/backends/test/suite/operators/test_view.py +++ b/backends/test/suite/operators/test_view.py @@ -9,13 +9,8 @@ from typing import List import torch -from executorch.backends.test.suite.flow import TestFlow -from executorch.backends.test.suite.operators import ( - dtype_test, - operator_test, - OperatorTest, -) +from executorch.backends.test.suite.operators import parameterize_by_dtype class ViewModel(torch.nn.Module): @@ -27,56 +22,48 @@ def forward(self, x): return x.view(self.shape) -@operator_test -class View(OperatorTest): - @dtype_test - def test_view_dtype(self, flow: TestFlow, dtype) -> None: - self._test_op( - ViewModel(shape=[3, 5]), - (torch.rand(15).to(dtype),), - flow, - ) - - def test_view_dimensions(self, flow: TestFlow) -> None: - self._test_op( - ViewModel(shape=[3, 5]), - (torch.randn(15),), - flow, - ) - - self._test_op( - ViewModel(shape=[20]), - (torch.randn(4, 5),), - flow, - ) - - self._test_op( - ViewModel(shape=[2, 2, 5]), - (torch.randn(4, 5),), - flow, - ) - - self._test_op( - ViewModel(shape=[6, 4]), - (torch.randn(3, 2, 4),), - flow, - ) - - def test_view_inferred_dimension(self, flow: TestFlow) -> None: - self._test_op( - ViewModel(shape=[3, -1]), - (torch.randn(15),), - flow, - ) - - self._test_op( - ViewModel(shape=[-1, 5]), - (torch.randn(15),), - flow, - ) - - self._test_op( - ViewModel(shape=[2, -1, 3]), - (torch.randn(24),), - flow, - ) +@parameterize_by_dtype +def test_view_dtype(test_runner, dtype) -> None: + test_runner.lower_and_run_model( + ViewModel(shape=[3, 5]), + (torch.rand(15).to(dtype),), + ) + + +def test_view_dimensions(test_runner) -> None: + test_runner.lower_and_run_model( + ViewModel(shape=[3, 5]), + (torch.randn(15),), + ) + + test_runner.lower_and_run_model( + ViewModel(shape=[20]), + (torch.randn(4, 5),), + ) + + test_runner.lower_and_run_model( + ViewModel(shape=[2, 2, 5]), + (torch.randn(4, 5),), + ) + + test_runner.lower_and_run_model( + ViewModel(shape=[6, 4]), + (torch.randn(3, 2, 4),), + ) + + +def test_view_inferred_dimension(test_runner) -> None: + test_runner.lower_and_run_model( + ViewModel(shape=[3, -1]), + (torch.randn(15),), + ) + + test_runner.lower_and_run_model( + ViewModel(shape=[-1, 5]), + (torch.randn(15),), + ) + + test_runner.lower_and_run_model( + ViewModel(shape=[2, -1, 3]), + (torch.randn(24),), + ) From 5ce01febb8316558927a467cce20b8d12698b06e Mon Sep 17 00:00:00 2001 From: Gregory James Comer Date: Tue, 23 Sep 2025 17:03:54 -0700 Subject: [PATCH 9/9] Update [ghstack-poisoned] --- backends/test/suite/operators/replace.sed | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100644 backends/test/suite/operators/replace.sed diff --git a/backends/test/suite/operators/replace.sed b/backends/test/suite/operators/replace.sed deleted file mode 100644 index 5ee6db9b7cb..00000000000 --- a/backends/test/suite/operators/replace.sed +++ /dev/null @@ -1,10 +0,0 @@ -s/self\, flow\: TestFlow/test_runner/g -s/self\._test_op/test_runner.lower_and_run_model/g -s/, flow//g -/@operator_test/d -/(OperatorTest):/d -s/dtype_test/parameterize_by_dtype/g -/flow,/d -/import TestFlow/d -/operator_test,/d -/OperatorTest,/d