diff --git a/facto/inputgen/utils/config.py b/facto/inputgen/utils/config.py index 2e80cc6..57a1852 100644 --- a/facto/inputgen/utils/config.py +++ b/facto/inputgen/utils/config.py @@ -6,6 +6,8 @@ from enum import Enum +import torch + class Condition(str, Enum): ALLOW_EMPTY = "empty" @@ -13,6 +15,7 @@ class Condition(str, Enum): ALLOW_PERMUTED = "permuted" ALLOW_STRIDED = "strided" DISALLOW_DTYPES = "disallow_dtypes" + HALF_PRECISION = "half_precision" class TensorConfig: @@ -23,6 +26,8 @@ def __init__(self, device="cpu", disallow_dtypes=None, **conditions): for condition, value in conditions.items(): if condition in self.conditions: self.conditions[condition] = value + if self.conditions[Condition.HALF_PRECISION] is False: + self.disallow_dtypes += [torch.float16, torch.bfloat16] self.probability = 0.5 def is_allowed(self, condition: Condition) -> bool: diff --git a/test/specdb/test_specdb_cpu.py b/test/specdb/test_specdb_cpu.py index b4edec4..bfeb7eb 100644 --- a/test/specdb/test_specdb_cpu.py +++ b/test/specdb/test_specdb_cpu.py @@ -7,26 +7,33 @@ import unittest from base_test import BaseSpecDBTest +from facto.inputgen.utils.config import TensorConfig class TestSpecDBOperationsCPU(BaseSpecDBTest): """Test class for validating all specs in SpecDB using gen_errors on CPU.""" + SKIP_OPS = [ + "_native_batch_norm_legit_no_training.default", + "addmm.default", + "arange.default", + "arange.start_step", + "constant_pad_nd.default", + "split_with_sizes_copy.default", + ] + def test_all_ops_cpu(self): - skip_ops = [ - "_native_batch_norm_legit_no_training.default", - "addmm.default", - "arange.default", - "arange.start_step", - "constant_pad_nd.default", - "split_with_sizes_copy.default", - ] + config = TensorConfig(device="cpu", half_precision=False) + self._run_all_ops(config=config, skip_ops=self.SKIP_OPS) + def test_all_ops_cpu_half(self): + skip_ops = self.SKIP_OPS.copy() # "cdist" not implemented for 'Half' on CPU # "pdist" not implemented for 'Half' on CPU skip_ops += ["_cdist_forward.default", "_pdist_forward.default"] - self._run_all_ops(skip_ops=skip_ops) + config = TensorConfig(device="cpu", half_precision=True) + self._run_all_ops(config=config, skip_ops=skip_ops) if __name__ == "__main__": diff --git a/test/specdb/test_specdb_mps.py b/test/specdb/test_specdb_mps.py index 8e4642f..a367f64 100644 --- a/test/specdb/test_specdb_mps.py +++ b/test/specdb/test_specdb_mps.py @@ -15,46 +15,55 @@ class TestSpecDBOperationsMPS(BaseSpecDBTest): """Test class for validating all specs in SpecDB using gen_errors on MPS.""" + SKIP_OPS = [ + # Calibrate specs (cpu not passing either): + "addmm.default", + "arange.default", + "arange.start_step", + "constant_pad_nd.default", + "split_with_sizes_copy.default", + # https://github.com/pytorch/pytorch/issues/160208 + "add.Tensor", + "add.Scalar", + "rsub.Scalar", + "sub.Tensor", + "sub.Scalar", + # crash: https://github.com/pytorch/pytorch/issues/154887 + "_native_batch_norm_legit_no_training.default", + # not implemented + "_pdist_forward.default", + # impl: clamp tensor number of dims must not be greater than that of input tensor + "clamp.Tensor", + # crash: https://github.com/pytorch/pytorch/issues/154881 + "cumsum.default", + # sparse_grad not supported in MPS yet + "gather.default", + # Dimension specified as -1 but tensor has no dimensions + "index_select.default", + # crash: https://github.com/pytorch/pytorch/issues/154882 + "max_pool2d_with_indices.default", + # On-going issue on MPSGraph topk when ndims() - axis > 4, see issue #154890 + # https://github.com/pytorch/pytorch/issues/154890 + "topk.default", + # var_mps: reduction dim must be in the range of input shape + "var.correction", + "var.dim", + ] + def test_all_ops_mps(self): - skip_ops = [ - # Calibrate specs (cpu not passing either): - "addmm.default", - "arange.default", - "arange.start_step", - "constant_pad_nd.default", - "split_with_sizes_copy.default", - # https://github.com/pytorch/pytorch/issues/160208 - "add.Tensor", - "add.Scalar", - "rsub.Scalar", - "sub.Tensor", - "sub.Scalar", - # crash: https://github.com/pytorch/pytorch/issues/154887 - "_native_batch_norm_legit_no_training.default", - # not implemented - "_pdist_forward.default", - # impl: clamp tensor number of dims must not be greater than that of input tensor - "clamp.Tensor", - # crash: https://github.com/pytorch/pytorch/issues/154881 - "cumsum.default", - # sparse_grad not supported in MPS yet - "gather.default", - # Dimension specified as -1 but tensor has no dimensions - "index_select.default", - # crash: https://github.com/pytorch/pytorch/issues/154882 - "max_pool2d_with_indices.default", - # On-going issue on MPSGraph topk when ndims() - axis > 4, see issue #154890 - # https://github.com/pytorch/pytorch/issues/154890 - "topk.default", - # var_mps: reduction dim must be in the range of input shape - "var.correction", - "var.dim", - ] + config = TensorConfig( + device="mps", disallow_dtypes=[torch.float64], half_precision=False + ) + self._run_all_ops(config=config, skip_ops=self.SKIP_OPS) + def test_all_ops_mps_half(self): + skip_ops = self.SKIP_OPS.copy() # ConvTranspose 3D with BF16 or FP16 types is not supported on MPS skip_ops += ["convolution.default"] - config = TensorConfig(device="mps", disallow_dtypes=[torch.float64]) + config = TensorConfig( + device="mps", disallow_dtypes=[torch.float64], half_precision=True + ) self._run_all_ops(config=config, skip_ops=skip_ops)