Skip to content

Commit 751d7f6

Browse files
authored
fixing torchao rocm ci test (#2789)
1 parent 24f11f8 commit 751d7f6

File tree

1 file changed

+4
-0
lines changed

1 file changed

+4
-0
lines changed

test/quantization/quantize_/workflows/int4/test_int4_marlin_sparse_tensor.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
)
2222
from torchao.quantization.utils import compute_error
2323
from torchao.sparsity.sparse_api import apply_fake_sparsity
24+
from torchao.testing.utils import skip_if_rocm
2425
from torchao.utils import (
2526
TORCH_VERSION_AT_LEAST_2_8,
2627
)
@@ -38,6 +39,7 @@ class TestInt4MarlinSparseTensor(TestCase):
3839
def setUp(self):
3940
self.GPU_DEVICES = ["cuda"] if torch.cuda.is_available() else []
4041

42+
@skip_if_rocm("ROCm enablement in progress")
4143
@parametrize("config", [BF16_ACT_CONFIG])
4244
@parametrize(
4345
"sizes",
@@ -65,6 +67,7 @@ def test_linear(self, config, sizes):
6567
quantized_and_compiled = compiled_linear(input)
6668
self.assertTrue(compute_error(original, quantized_and_compiled) > 20)
6769

70+
@skip_if_rocm("ROCm enablement in progress")
6871
@unittest.skip("Fix later")
6972
@parametrize("config", [BF16_ACT_CONFIG])
7073
def test_to_device(self, config):
@@ -81,6 +84,7 @@ def test_to_device(self, config):
8184
quantize_(linear, config)
8285
linear.to(device)
8386

87+
@skip_if_rocm("ROCm enablement in progress")
8488
@parametrize("config", [BF16_ACT_CONFIG])
8589
def test_module_path(self, config):
8690
linear = torch.nn.Linear(128, 256, dtype=torch.bfloat16)

0 commit comments

Comments
 (0)