diff --git a/examples/minimal_modelgen_example.py b/examples/minimal_modelgen_example.py new file mode 100644 index 0000000..9802685 --- /dev/null +++ b/examples/minimal_modelgen_example.py @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from facto.inputgen.utils.config import TensorConfig +from facto.modelgen.gen import OpModelGenerator +from facto.specdb.db import SpecDictDB +from facto.utils.ops import get_op_overload + + +def main(): + op_name = "add.Tensor" + spec = SpecDictDB[op_name] + op = get_op_overload(op_name) + config = TensorConfig(device="cpu", half_precision=False) + for model, args, kwargs in OpModelGenerator(op, spec, config).gen(verbose=True): + model(*args, **kwargs) + + +if __name__ == "__main__": + main() diff --git a/facto/inputgen/argtuple/gen.py b/facto/inputgen/argtuple/gen.py index e9ef87a..cb6e6f4 100644 --- a/facto/inputgen/argtuple/gen.py +++ b/facto/inputgen/argtuple/gen.py @@ -48,6 +48,15 @@ def _apply_constraints_to_arg(self, arg, config: TensorConfig): # Create a copy of the argument with potentially modified constraints modified_arg = deepcopy(arg) + # Add rank constraints for tensor arguments when zerodim tensors are not allowed + if not config.is_allowed(Condition.ALLOW_ZERODIM): + if arg.type.is_tensor(): + rank_constraint = cp.Rank.Ge(lambda deps: 1) + modified_arg.constraints = modified_arg.constraints + [rank_constraint] + elif arg.type.is_tensor_list(): + rank_constraint = cp.Rank.Ge(lambda deps, length, ix: 1) + modified_arg.constraints = modified_arg.constraints + [rank_constraint] + # Add size constraints for tensor arguments when empty tensors are not allowed if not config.is_allowed(Condition.ALLOW_EMPTY): if arg.type.is_tensor() or arg.type.is_tensor_list(): @@ -91,12 +100,14 @@ def gen_tuple( return posargs, inkwargs, outargs def gen( - self, *, valid: bool = True, out: bool = False + self, *, valid: bool = True, out: bool = False, verbose: bool = False ) -> Generator[ Tuple[List[Any], OrderedDict[str, Any], OrderedDict[str, Any]], Any, Any ]: engine = MetaArgTupleEngine(self._modified_spec, out=out) for meta_tuple in engine.gen(valid=valid): + if verbose: + print(f"Generated meta_tuple: {[str(x) for x in meta_tuple]}") yield self.gen_tuple(meta_tuple, out=out) def gen_errors( diff --git a/facto/inputgen/argument/gen.py b/facto/inputgen/argument/gen.py index 02891a5..7860c54 100644 --- a/facto/inputgen/argument/gen.py +++ b/facto/inputgen/argument/gen.py @@ -246,7 +246,7 @@ def get_random_tensor(self, size, dtype, high=None, low=None) -> torch.Tensor: ) t = torch.randint( - low=low, high=high, size=size, dtype=dtype, generator=torch_rng + low=low, high=high, size=size, dtype=torch.float, generator=torch_rng ) if not self.space.contains(0): if high > 0: @@ -254,19 +254,19 @@ def get_random_tensor(self, size, dtype, high=None, low=None) -> torch.Tensor: low=max(1, low), high=high, size=size, - dtype=dtype, + dtype=torch.float, generator=torch_rng, ) else: pos = torch.randint( - low=low, high=0, size=size, dtype=dtype, generator=torch_rng + low=low, high=0, size=size, dtype=torch.float, generator=torch_rng ) t = torch.where(t == 0, pos, t) if dtype in dt._int: - return t + return t.to(dtype) if dtype in dt._floating: - return t / FLOAT_RESOLUTION + return (t / FLOAT_RESOLUTION).to(dtype) raise ValueError(f"Unsupported Dtype: {dtype}") diff --git a/facto/inputgen/utils/config.py b/facto/inputgen/utils/config.py index 57a1852..eef2f72 100644 --- a/facto/inputgen/utils/config.py +++ b/facto/inputgen/utils/config.py @@ -10,6 +10,7 @@ class Condition(str, Enum): + ALLOW_ZERODIM = "zerodim" ALLOW_EMPTY = "empty" ALLOW_TRANSPOSED = "transposed" ALLOW_PERMUTED = "permuted" @@ -23,6 +24,7 @@ def __init__(self, device="cpu", disallow_dtypes=None, **conditions): self.device = device self.disallow_dtypes = disallow_dtypes or [] self.conditions = {condition: False for condition in Condition} + self.conditions[Condition.ALLOW_ZERODIM] = True # allow zerodim by default for condition, value in conditions.items(): if condition in self.conditions: self.conditions[condition] = value diff --git a/facto/modelgen/__init__.py b/facto/modelgen/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/facto/modelgen/gen.py b/facto/modelgen/gen.py new file mode 100644 index 0000000..2b6bc87 --- /dev/null +++ b/facto/modelgen/gen.py @@ -0,0 +1,232 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Any, Dict, Generator, List, Optional, Tuple + +import torch.nn as nn + +from facto.inputgen.argtuple.gen import ArgumentTupleGenerator +from facto.inputgen.specs.model import Spec +from facto.inputgen.utils.config import TensorConfig + + +def separate_forward_and_model_inputs( + spec: Spec, args: List[Any], kwargs: Dict[str, Any] +) -> Tuple[List[Any], Dict[str, Any], List[Any], Dict[str, Any]]: + """ + Separate forward inputs from model parameters using FACTO's ArgType system. + + Args: + spec: The operation specification containing argument type information + args: All positional arguments + kwargs: All keyword arguments + + Returns: + Tuple of (forward_args, forward_kwargs, model_args, model_kwargs) + """ + forward_args = [] + model_args = [] + + forward_kwargs = {} + model_kwargs = {} + + for i, inarg in enumerate(spec.inspec): + if inarg.kw: + if inarg.type.is_tensor() or inarg.type.is_tensor_list(): + forward_kwargs[inarg.name] = kwargs[inarg.name] + else: + model_kwargs[inarg.name] = kwargs[inarg.name] + else: + if inarg.type.is_tensor() or inarg.type.is_tensor_list(): + forward_args.append(args[i]) + else: + model_args.append(args[i]) + + return forward_args, forward_kwargs, model_args, model_kwargs + + +def combine_forward_and_model_inputs( + spec: Spec, + forward_args: Tuple[Any], + forward_kwargs: Dict[str, Any], + model_args: Tuple[Any], + model_kwargs: Dict[str, Any], +) -> Tuple[List[Any], Dict[str, Any]]: + """ + Combine forward inputs with model parameters using FACTO's ArgType system. + + Args: + spec: The operation specification containing argument type information + args: All positional arguments + kwargs: All keyword arguments + model_args: All model parameters + model_kwargs: All model keyword parameters + + Returns: + Tuple of (args, kwargs) + """ + combined_args = [] + combined_kwargs = {} + + forward_args_ix = 0 + model_args_ix = 0 + + # Iterate over the input specification + for ix, inarg in enumerate(spec.inspec): + if inarg.kw: + # If the argument is a keyword argument, check if it's a tensor or tensor list + if inarg.type.is_tensor() or inarg.type.is_tensor_list(): + combined_kwargs[inarg.name] = forward_kwargs[inarg.name] + else: + combined_kwargs[inarg.name] = model_kwargs[inarg.name] + else: + # If the argument is a positional argument, check if it's a tensor or tensor list + if inarg.type.is_tensor() or inarg.type.is_tensor_list(): + combined_args.append(forward_args[forward_args_ix]) + forward_args_ix += 1 + else: + combined_args.append(model_args[model_args_ix]) + model_args_ix += 1 + + return combined_args, combined_kwargs + + +class OpModel(nn.Module): + """ + A PyTorch model that wraps a torch aten operation. + + This class creates a simple model that applies a given torch operation + to its inputs in the forward pass. + """ + + def __init__( + self, op: Any, spec: Spec, op_name: str = "", *model_args, **model_kwargs + ): + """ + Initialize the OpModel. + + Args: + op: The torch aten operation to wrap + op_name: Optional name for the operation (for debugging/logging) + *model_args: Positional model parameters + **model_kwargs: Keyword model parameters + """ + super().__init__() + self.op = op + self.op_name = op_name or str(op) + self.spec = spec + self.model_args = model_args + self.model_kwargs = model_kwargs + + def forward(self, *args, **kwargs) -> Any: + """ + Forward pass that applies the wrapped operation to the inputs. + + Args: + *args: Positional arguments to pass to the operation + **kwargs: Keyword arguments to pass to the operation + + Returns: + The result of applying the operation to the inputs + """ + op_args, op_kwargs = combine_forward_and_model_inputs( + self.spec, args, kwargs, self.model_args, self.model_kwargs + ) + return self.op(*op_args, **op_kwargs) + + def __repr__(self) -> str: + return f"OpModel(op={self.op_name})" + + +class OpModelGenerator: + """ + Generator that creates OpModel instances with appropriate inputs for testing. + + This class takes a torch operation and its specification, then uses + ArgumentTupleGenerator to create OpModel instances along with valid + inputs for the forward function. It automatically separates tensor inputs + from non-tensor parameters for ExecuTorch compatibility using FACTO's ArgType system. + """ + + def __init__(self, op: Any, spec: Spec, config: Optional[TensorConfig] = None): + """ + Initialize the OpModelGenerator. + + Args: + op: The torch aten operation to wrap in models + spec: The specification for the operation's arguments + config: Optional tensor configuration for input generation + """ + self.op = op + self.spec = spec + self.config = config + self.arg_generator = ArgumentTupleGenerator(spec, config) + + def gen( + self, + *, + valid: bool = True, + verbose: bool = False, + max_count: Optional[int] = None, + ) -> Generator[Tuple[OpModel, List[Any], Dict[str, Any]], None, None]: + """ + Generate OpModel instances with corresponding inputs. + + Args: + valid: Whether to generate valid inputs (default: True) + max_count: Maximum number of models to generate (default: None for unlimited) + + Yields: + Tuple containing: + - OpModel instance wrapping the operation + - List of positional arguments for forward() + - Dict of keyword arguments for forward() + """ + count = 0 + for args, kwargs, _ in self.arg_generator.gen( + valid=valid, out=False, verbose=verbose + ): + if max_count is not None and count >= max_count: + break + + # Separate tensor inputs from non-tensor parameters + forward_args, forward_kwargs, model_args, model_kwargs = ( + separate_forward_and_model_inputs(self.spec, args, kwargs) + ) + + # Create model instance + model = OpModel( + self.op, self.spec, self.spec.op, *model_args, **model_kwargs + ) + + yield model, forward_args, forward_kwargs + count += 1 + + def test_model_with_inputs( + self, model: OpModel, args: List[Any], kwargs: Dict[str, Any] + ) -> Tuple[bool, Optional[Any], Optional[Exception]]: + """ + Test a model with given inputs and return the result. + + Args: + model: The OpModel to test + args: Positional arguments for the model + kwargs: Keyword arguments for the model + + Returns: + Tuple containing: + - Boolean indicating success/failure + - The output if successful, None if failed + - The exception if failed, None if successful + """ + try: + output = model(*args, **kwargs) + return True, output, None + except Exception as e: + return False, None, e + + def __repr__(self) -> str: + return f"OpModelGenerator(op={self.spec.op})" diff --git a/test/specdb/base_model_test.py b/test/specdb/base_model_test.py new file mode 100644 index 0000000..51cac6d --- /dev/null +++ b/test/specdb/base_model_test.py @@ -0,0 +1,78 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import unittest +from typing import Optional + +from facto.inputgen.utils.config import TensorConfig +from facto.modelgen.gen import OpModelGenerator +from facto.specdb.db import SpecDictDB +from facto.utils.ops import get_op_overload + + +class BaseModelTest(unittest.TestCase): + """Base test class for validating all models generated by OpModelGenerator.""" + + def _run_model( + self, op_name: str, *, config: Optional[TensorConfig] = None, max_count: int = 5 + ): + """ + Run a single model in SpecDB with a given TensorConfig + + This test calls OpModelGenerator.gen with valid=True for a single operation. + The operation is tested as a subtest. + """ + print("Testing model: ", op_name) + with self.subTest(op=op_name): + try: + # Get the spec and operation + spec = SpecDictDB[op_name] + op = get_op_overload(op_name) + generator = OpModelGenerator(op, spec, config) + except Exception as e: + # If we can't resolve the operation or there's another issue, + # fail this subtest with a descriptive message + self.fail( + f"Failed to create model generator for operation {op_name}: {e}" + ) + + try: + # Generate models and test them + model_count = 0 + for model, args, kwargs in generator.gen( + valid=True, max_count=max_count + ): + model_count += 1 + success, output, error = generator.test_model_with_inputs( + model, args, kwargs + ) + if not success: + self.fail( + f"Model failed for {op_name} (model {model_count}): {error}" + ) + + if model_count == 0: + self.fail(f"No models generated for {op_name}") + + except Exception as e: + self.fail(f"Failed while testing models for operation {op_name}: {e}") + + def _run_all_models( + self, *, config: Optional[TensorConfig] = None, skip_ops=[], max_count: int = 5 + ): + """ + Run all models in SpecDB with a given TensorConfig + + This test iterates through all operations in SpecDB and creates + OpModelGenerator instances for each operation. Each operation is tested as a subtest. + """ + # Get all operation names from SpecDB + op_names = list(SpecDictDB.keys()) + + for op_name in op_names: + if op_name in skip_ops: + continue + self._run_model(op_name, config=config, max_count=max_count) diff --git a/test/specdb/test_executorch_portable.py b/test/specdb/test_executorch_portable.py new file mode 100644 index 0000000..82a7742 --- /dev/null +++ b/test/specdb/test_executorch_portable.py @@ -0,0 +1,364 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import unittest +from typing import Optional + +import torch + +from facto.inputgen.utils.config import TensorConfig +from facto.modelgen.gen import OpModelGenerator +from facto.specdb.db import SpecDictDB +from facto.utils.ops import get_op_overload + +try: + # ExecuTorch imports + from executorch.exir import EdgeCompileConfig, to_edge_transform_and_lower + from executorch.extension.pybindings.portable_lib import ( + _load_for_executorch_from_buffer, + ) + + EXECUTORCH_AVAILABLE = True +except ImportError: + EXECUTORCH_AVAILABLE = False + + +class BaseExecuTorchTest(unittest.TestCase): + """Base test class for validating ExecuTorch portable kernels end-to-end.""" + + def _export_to_executorch( + self, model: torch.nn.Module, example_inputs: tuple + ) -> bytes: + """ + Export a PyTorch model to ExecuTorch format using the official workflow. + + Args: + model: PyTorch model to export + example_inputs: Example inputs for tracing + + Returns: + Serialized ExecuTorch program + """ + model.eval() + + # Export the model using torch.export (official workflow) + with torch.no_grad(): + exported_program = torch.export.export(model, example_inputs) + + compile_config = EdgeCompileConfig(_check_ir_validity=False) + + # Convert to ExecuTorch using the official to_edge_transform_and_lower workflow + # No partitioner - this will use the portable kernels directly + et_program = to_edge_transform_and_lower( + exported_program, compile_config=compile_config + ).to_executorch() + + return et_program.buffer + + def _compare_results( + self, + torch_result: torch.Tensor, + executorch_result: torch.Tensor, + rtol: float = 1e-5, + atol: float = 1e-8, + ) -> tuple[bool, str]: + """ + Compare PyTorch and ExecuTorch results. + + Args: + torch_result: Result from PyTorch + executorch_result: Result from ExecuTorch + rtol: Relative tolerance + atol: Absolute tolerance + + Returns: + Tuple of (passed, error_message) + """ + try: + torch.testing.assert_close( + torch_result, executorch_result, rtol=rtol, atol=atol + ) + return True, "" + except Exception as e: + return False, str(e) + + def _run_executorch_model( + self, + op_name: str, + *, + config: Optional[TensorConfig] = None, + max_count: Optional[int] = None, + check_correctness: bool = False, + rtol: float = 1e-5, + atol: float = 1e-8, + ): + """ + Run a single model through ExecuTorch export and execution pipeline. + + Args: + op_name: Operation name to test + config: TensorConfig for input generation + max_count: Maximum number of models to test + check_correctness: Whether to compare outputs with PyTorch + rtol: Relative tolerance for correctness check + atol: Absolute tolerance for correctness check + """ + print("Testing ExecuTorch model: ", op_name) + with self.subTest(op=op_name): + try: + # Get the spec and operation + spec = SpecDictDB[op_name] + op = get_op_overload(op_name) + generator = OpModelGenerator(op, spec, config) + except Exception as e: + self.fail( + f"Failed to create model generator for operation {op_name}: {e}" + ) + + try: + # Generate models and test them through ExecuTorch + model_count = 0 + for model, args, kwargs in generator.gen( + valid=True, verbose=True, max_count=max_count + ): + model_count += 1 + + # First test the model works with PyTorch + success, torch_output, error = generator.test_model_with_inputs( + model, args, kwargs + ) + if not success: + self.fail( + f"PyTorch model failed for {op_name} (model {model_count}): {error}" + ) + + # Prepare inputs for ExecuTorch export + example_inputs = tuple(args) + + try: + # Export to ExecuTorch + executorch_buffer = self._export_to_executorch( + model, example_inputs + ) + except Exception as e: + self.fail( + f"ExecuTorch export failed for {op_name} (model {model_count}): {e}" + ) + + try: + # Load ExecuTorch model + executorch_module = _load_for_executorch_from_buffer( + executorch_buffer + ) + except Exception as e: + self.fail( + f"ExecuTorch loading failed for {op_name} (model {model_count}): {e}" + ) + + try: + # Run with ExecuTorch + executorch_outputs = executorch_module.forward(list(args)) + except Exception as e: + self.fail( + f"ExecuTorch execution failed for {op_name} (model {model_count}): {e}" + ) + + # Handle single output vs multiple outputs + if ( + isinstance(executorch_outputs, (list, tuple)) + and len(executorch_outputs) == 1 + ): + executorch_result = executorch_outputs[0] + else: + executorch_result = executorch_outputs + + # Check correctness if requested + if check_correctness: + # Handle multiple outputs + if isinstance(torch_output, (list, tuple)) and isinstance( + executorch_result, (list, tuple) + ): + if len(torch_output) != len(executorch_result): + self.fail( + f"Output length mismatch for {op_name} (model {model_count}): " + f"PyTorch={len(torch_output)}, ExecuTorch={len(executorch_result)}" + ) + + for i, (torch_out, et_out) in enumerate( + zip(torch_output, executorch_result) + ): + if isinstance(torch_out, torch.Tensor) and isinstance( + et_out, torch.Tensor + ): + passed, error_msg = self._compare_results( + torch_out, et_out, rtol, atol + ) + if not passed: + self.fail( + f"Correctness check failed for {op_name} (model {model_count}, output {i}): {error_msg}" + ) + else: + # Single output case + if isinstance(torch_output, torch.Tensor) and isinstance( + executorch_result, torch.Tensor + ): + passed, error_msg = self._compare_results( + torch_output, executorch_result, rtol, atol + ) + if not passed: + self.fail( + f"Correctness check failed for {op_name} (model {model_count}): {error_msg}" + ) + + if model_count == 0: + self.fail(f"No models generated for {op_name}") + + except Exception as e: + self.fail( + f"Failed while testing ExecuTorch models for operation {op_name}: {e}" + ) + + def _run_all_executorch_models( + self, + *, + config: Optional[TensorConfig] = None, + skip_ops=[], + max_count: Optional[int] = None, + check_correctness: bool = False, + rtol: float = 1e-5, + atol: float = 1e-8, + ): + """ + Run all models in SpecDB through ExecuTorch export and execution pipeline. + + Args: + config: TensorConfig for input generation + skip_ops: List of operations to skip + max_count: Maximum number of models to test per operation + check_correctness: Whether to compare outputs with PyTorch + rtol: Relative tolerance for correctness check + atol: Absolute tolerance for correctness check + """ + # Get all operation names from SpecDB + op_names = list(SpecDictDB.keys()) + + for op_name in op_names: + if op_name in skip_ops: + continue + self._run_executorch_model( + op_name, + config=config, + max_count=max_count, + check_correctness=check_correctness, + rtol=rtol, + atol=atol, + ) + + +class TestExecuTorchPortable(BaseExecuTorchTest): + """Test class for validating ExecuTorch portable kernels end-to-end.""" + + SKIP_OPS = [ + # Calibrate specs + "_native_batch_norm_legit_no_training.default", + "addmm.default", + "arange.default", + "arange.start_step", + "constant_pad_nd.default", + "split_with_sizes_copy.default", + # Review errors + "_cdist_forward.default", + "_to_copy.default", + "add.Tensor", + "add.Scalar", + "any.dims", + "as_strided_copy.default", + "clamp.default", + "clamp.Tensor", + "convolution.default", + "copy.default", + "expand_copy.default", + "fill.Tensor", + "hardtanh.default", + "max_pool3d_with_indices.default", + "native_group_norm.default", + "native_layer_norm.default", + "nonzero.default", + "rsub.Scalar", + "sigmoid.default", + "sub.Tensor", + "sub.Scalar", + "var.correction", + "where.self", + # Generated meta_tuple: ['ArgType.Tensor torch.float64 (8, 5, 7)', 'ArgType.Dim 2', 'ArgType.Bool False'] + # [program.cpp:135] InternalConsistency verification requested but not available + # [op_log_softmax.cpp:157] In function opt_log_softmax_out(), assert failed (false): Unhandled out dtype 7 + # zsh: abort + "_log_softmax.default", + # Generated meta_tuple: ['ArgType.Tensor torch.bool (1, 7, 4)', 'ArgType.Dim 2'] + # [program.cpp:135] InternalConsistency verification requested but not available + # [op_unbind_copy.cpp:79] In function operator()(), assert failed (false): Unhandled dtype Bool for unbind_copy.int_out + # zsh: abort + "unbind_copy.int", + ] + + @unittest.skipUnless(EXECUTORCH_AVAILABLE, "ExecuTorch not available") + def test_executorch_cpu(self): + """Test ExecuTorch export and execution on CPU without correctness checking.""" + config = TensorConfig(device="cpu", zerodim=False, half_precision=False) + self._run_all_executorch_models(config=config, skip_ops=self.SKIP_OPS) + + @unittest.skipUnless(EXECUTORCH_AVAILABLE, "ExecuTorch not available") + def test_executorch_export_cpu_half(self): + """Test ExecuTorch export and execution on CPU with half precision.""" + skip_ops = self.SKIP_OPS.copy() + # Some ATen operations do not support half precision + skip_ops += ["_cdist_forward.default", "_pdist_forward.default"] + + skip_ops += [ + # Generated meta_tuple: ['ArgType.Tensor torch.bfloat16 (4, 8, 6)', 'ArgType.Tensor torch.bfloat16 (4, 6, 4)'] + # [program.cpp:135] InternalConsistency verification requested but not available + # [op_bmm.cpp:163] In function operator()(), assert failed (false): Unhandled dtype BFloat16 for bmm.out + # zsh: abort + "bmm.default", + # Generated meta_tuple: ['ArgType.Tensor torch.bfloat16 (7, 1, 2, 8, 4, 4)', 'ArgType.Scalar -7'] + # [program.cpp:135] InternalConsistency verification requested but not available + # [op_div.cpp:209] In function operator()(), assert failed (false): Unhandled dtype BFloat16 for div.Scalar_out + # zsh: abort + "div.Scalar", + # Generated meta_tuple: ['ArgType.Tensor torch.int16 (2, 7, 8, 1, 4, 8)', 'ArgType.Tensor torch.float16 (1,)'] + # [program.cpp:135] InternalConsistency verification requested but not available + # [op_div.cpp:169] In function operator()(), assert failed (false): Unhandled dtype Half for div.out + # zsh: abort + "div.Tensor", + # Generated meta_tuple: ['ArgType.Tensor torch.float16 (5, 6, 8)', 'ArgType.Scalar -3'] + # [program.cpp:135] InternalConsistency verification requested but not available + # [op_le.cpp:137] In function operator()(), assert failed (false): Unhandled dtype Half for le.Scalar_out + # zsh: abort + "le.Scalar", + # Generated meta_tuple: ['ArgType.Tensor torch.float16 (8, 8, 8, 1, 8, 7)', 'ArgType.Dim 0', 'ArgType.Bool False'] + # [program.cpp:135] InternalConsistency verification requested but not available + # [op_max.cpp:106] In function operator()(), assert failed (false): Unhandled dtype Half for max.dim_max + # zsh: abort + "max.dim", + # Generated meta_tuple: ['ArgType.Tensor torch.float16 (4, 6, 8, 8, 1, 7)', 'ArgType.Dim 3', 'ArgType.Bool True'] + # [program.cpp:135] InternalConsistency verification requested but not available + # [op_min.cpp:106] In function operator()(), assert failed (false): Unhandled dtype Half for min.dim_min + # zsh: abort + "min.dim", + # Generated meta_tuple: ['ArgType.Tensor torch.float16 (3,)', 'ArgType.Dim -1', 'ArgType.Tensor torch.int64 (7,)', 'ArgType.Tensor torch.float16 (8,)'] + # [program.cpp:135] InternalConsistency verification requested but not available + # [op_scatter_add.cpp:107] In function operator()(), assert failed (false): Unhandled dtype Half for scatter_add.out + # zsh: abort + "scatter_add.default", + ] + + config = TensorConfig(device="cpu", zerodim=False, half_precision=True) + self._run_all_executorch_models(config=config, skip_ops=skip_ops) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/specdb/test_model_cpu.py b/test/specdb/test_model_cpu.py new file mode 100644 index 0000000..df1aca3 --- /dev/null +++ b/test/specdb/test_model_cpu.py @@ -0,0 +1,75 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import unittest + +from base_model_test import BaseModelTest +from facto.inputgen.utils.config import TensorConfig + + +class TestSpecDBModelsCPU(BaseModelTest): + """Test class for validating all models generated by OpModelGenerator on CPU.""" + + SKIP_OPS = [ + "_native_batch_norm_legit_no_training.default", + "addmm.default", + "arange.default", + "arange.start_step", + "constant_pad_nd.default", + "split_with_sizes_copy.default", + ] + + def test_all_models_cpu(self): + config = TensorConfig(device="cpu") + self._run_all_models(config=config, skip_ops=self.SKIP_OPS) + + def test_all_models_cpu_empty(self): + config = TensorConfig(device="cpu", empty=True) + self._run_all_models(config=config, skip_ops=self.SKIP_OPS) + + def test_all_models_cpu_half(self): + skip_ops = self.SKIP_OPS.copy() + # "cdist" not implemented for 'Half' on CPU + # "pdist" not implemented for 'Half' on CPU + skip_ops += ["_cdist_forward.default", "_pdist_forward.default"] + + config = TensorConfig(device="cpu", half_precision=True) + self._run_all_models(config=config, skip_ops=skip_ops) + + def test_all_models_cpu_transposed(self): + skip_ops = self.SKIP_OPS.copy() + # Expected X.is_contiguous(memory_format) to be true, but got false. + skip_ops += ["native_group_norm.default"] + # _pdist_forward requires contiguous input + skip_ops += ["_pdist_forward.default"] + config = TensorConfig(device="cpu", transposed=True) + self._run_all_models(config=config, skip_ops=skip_ops) + + def test_all_models_cpu_permuted(self): + skip_ops = self.SKIP_OPS.copy() + # Expected X.is_contiguous(memory_format) to be true, but got false. + skip_ops += ["native_group_norm.default"] + # _pdist_forward requires contiguous input + skip_ops += ["_pdist_forward.default"] + # Unsupported memory format. Supports only ChannelsLast3d, Contiguous + skip_ops += ["max_pool3d_with_indices.default"] + # Unsupported memory format. Supports only ChannelsLast, Contiguous + skip_ops += ["pixel_shuffle.default"] + config = TensorConfig(device="cpu", permuted=True) + self._run_all_models(config=config, skip_ops=skip_ops) + + def test_all_models_cpu_strided(self): + skip_ops = self.SKIP_OPS.copy() + # Expected X.is_contiguous(memory_format) to be true, but got false. + skip_ops += ["native_group_norm.default"] + # _pdist_forward requires contiguous input + skip_ops += ["_pdist_forward.default"] + config = TensorConfig(device="cpu", strided=True) + self._run_all_models(config=config, skip_ops=skip_ops) + + +if __name__ == "__main__": + unittest.main()