Skip to content

Add half_precision condition to TensorConfig and refactor specdb tests #38

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: gh/manuelcandales/20/base
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions facto/inputgen/utils/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,16 @@

from enum import Enum

import torch


class Condition(str, Enum):
ALLOW_EMPTY = "empty"
ALLOW_TRANSPOSED = "transposed"
ALLOW_PERMUTED = "permuted"
ALLOW_STRIDED = "strided"
DISALLOW_DTYPES = "disallow_dtypes"
HALF_PRECISION = "half_precision"


class TensorConfig:
Expand All @@ -23,6 +26,8 @@ def __init__(self, device="cpu", disallow_dtypes=None, **conditions):
for condition, value in conditions.items():
if condition in self.conditions:
self.conditions[condition] = value
if self.conditions[Condition.HALF_PRECISION] is False:
self.disallow_dtypes += [torch.float16, torch.bfloat16]
self.probability = 0.5

def is_allowed(self, condition: Condition) -> bool:
Expand Down
25 changes: 16 additions & 9 deletions test/specdb/test_specdb_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,26 +7,33 @@
import unittest

from base_test import BaseSpecDBTest
from facto.inputgen.utils.config import TensorConfig


class TestSpecDBOperationsCPU(BaseSpecDBTest):
"""Test class for validating all specs in SpecDB using gen_errors on CPU."""

SKIP_OPS = [

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

all after including HALF_PRECISION?

"_native_batch_norm_legit_no_training.default",
"addmm.default",
"arange.default",
"arange.start_step",
"constant_pad_nd.default",
"split_with_sizes_copy.default",
]

def test_all_ops_cpu(self):
skip_ops = [
"_native_batch_norm_legit_no_training.default",
"addmm.default",
"arange.default",
"arange.start_step",
"constant_pad_nd.default",
"split_with_sizes_copy.default",
]
config = TensorConfig(device="cpu", half_precision=False)
self._run_all_ops(config=config, skip_ops=self.SKIP_OPS)

def test_all_ops_cpu_half(self):
skip_ops = self.SKIP_OPS.copy()
# "cdist" not implemented for 'Half' on CPU
# "pdist" not implemented for 'Half' on CPU
skip_ops += ["_cdist_forward.default", "_pdist_forward.default"]

self._run_all_ops(skip_ops=skip_ops)
config = TensorConfig(device="cpu", half_precision=True)
self._run_all_ops(config=config, skip_ops=skip_ops)


if __name__ == "__main__":
Expand Down
79 changes: 44 additions & 35 deletions test/specdb/test_specdb_mps.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,46 +15,55 @@
class TestSpecDBOperationsMPS(BaseSpecDBTest):
"""Test class for validating all specs in SpecDB using gen_errors on MPS."""

SKIP_OPS = [
# Calibrate specs (cpu not passing either):
"addmm.default",
"arange.default",
"arange.start_step",
"constant_pad_nd.default",
"split_with_sizes_copy.default",
# https://github.com/pytorch/pytorch/issues/160208
"add.Tensor",
"add.Scalar",
"rsub.Scalar",
"sub.Tensor",
"sub.Scalar",
# crash: https://github.com/pytorch/pytorch/issues/154887
"_native_batch_norm_legit_no_training.default",
# not implemented
"_pdist_forward.default",
# impl: clamp tensor number of dims must not be greater than that of input tensor
"clamp.Tensor",
# crash: https://github.com/pytorch/pytorch/issues/154881
"cumsum.default",
# sparse_grad not supported in MPS yet
"gather.default",
# Dimension specified as -1 but tensor has no dimensions
"index_select.default",
# crash: https://github.com/pytorch/pytorch/issues/154882
"max_pool2d_with_indices.default",
# On-going issue on MPSGraph topk when ndims() - axis > 4, see issue #154890
# https://github.com/pytorch/pytorch/issues/154890
"topk.default",
# var_mps: reduction dim must be in the range of input shape
"var.correction",
"var.dim",
]

def test_all_ops_mps(self):
skip_ops = [
# Calibrate specs (cpu not passing either):
"addmm.default",
"arange.default",
"arange.start_step",
"constant_pad_nd.default",
"split_with_sizes_copy.default",
# https://github.com/pytorch/pytorch/issues/160208
"add.Tensor",
"add.Scalar",
"rsub.Scalar",
"sub.Tensor",
"sub.Scalar",
# crash: https://github.com/pytorch/pytorch/issues/154887
"_native_batch_norm_legit_no_training.default",
# not implemented
"_pdist_forward.default",
# impl: clamp tensor number of dims must not be greater than that of input tensor
"clamp.Tensor",
# crash: https://github.com/pytorch/pytorch/issues/154881
"cumsum.default",
# sparse_grad not supported in MPS yet
"gather.default",
# Dimension specified as -1 but tensor has no dimensions
"index_select.default",
# crash: https://github.com/pytorch/pytorch/issues/154882
"max_pool2d_with_indices.default",
# On-going issue on MPSGraph topk when ndims() - axis > 4, see issue #154890
# https://github.com/pytorch/pytorch/issues/154890
"topk.default",
# var_mps: reduction dim must be in the range of input shape
"var.correction",
"var.dim",
]
config = TensorConfig(
device="mps", disallow_dtypes=[torch.float64], half_precision=False
)
self._run_all_ops(config=config, skip_ops=self.SKIP_OPS)

def test_all_ops_mps_half(self):
skip_ops = self.SKIP_OPS.copy()
# ConvTranspose 3D with BF16 or FP16 types is not supported on MPS
skip_ops += ["convolution.default"]

config = TensorConfig(device="mps", disallow_dtypes=[torch.float64])
config = TensorConfig(
device="mps", disallow_dtypes=[torch.float64], half_precision=True
)
self._run_all_ops(config=config, skip_ops=skip_ops)


Expand Down