Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions tests/llmcompressor/pytorch/utils/test_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
tensors_to_device,
tensors_to_precision,
)
from tests.testing_utils import requires_gpu


@pytest.mark.unit
Expand Down Expand Up @@ -55,6 +56,7 @@ def test_tensors_to_device_cpu(tensors):
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
reason="Skipping pytorch tests",
)
@requires_gpu
@pytest.mark.parametrize(
"tensors",
[
Expand All @@ -69,7 +71,6 @@ def test_tensors_to_device_cpu(tensors):
[[torch.randn(1, 8)], torch.randn(8, 8)],
],
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires cuda availability")
def test_tensors_to_device_cuda(tensors):
out = tensors_to_device(tensors, "cuda")

Expand Down Expand Up @@ -364,6 +365,7 @@ def test_tensors_module_forward(module, tensors, check_feat_lab_inp):
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
reason="Skipping pytorch tests",
)
@requires_gpu
@pytest.mark.parametrize(
"module,tensors,check_feat_lab_inp",
[
Expand Down Expand Up @@ -417,7 +419,6 @@ def test_tensors_module_forward(module, tensors, check_feat_lab_inp):
),
],
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires cuda availability")
def test_tensors_module_forward_cuda(module, tensors, check_feat_lab_inp):
module = module.to("cuda")
tensors = tensors_to_device(tensors, "cuda")
Expand Down Expand Up @@ -471,6 +472,7 @@ def test_tensor_sparsity(tensor, dim, expected_sparsity):
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
reason="Skipping pytorch tests",
)
@requires_gpu
@pytest.mark.parametrize(
"tensor,dim,expected_sparsity",
[
Expand All @@ -490,7 +492,6 @@ def test_tensor_sparsity(tensor, dim, expected_sparsity):
),
],
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires cuda availability")
def test_tensor_sparsity_cuda(tensor, dim, expected_sparsity):
tensor = tensor.to("cuda")
sparsity = tensor_sparsity(tensor, dim)
Expand Down
13 changes: 13 additions & 0 deletions tests/llmcompressor/transformers/compression/test_has_gpu.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import os

import pytest
import torch


@pytest.mark.skipif(os.getenv("GITHUB_ACTIONS") != "true", reason="Only run for GHA")
def test_has_gpu():
"""
This test exists purely to raise an error if
a runner performs transformers tests without a GPU
"""
assert torch.cuda.is_available()
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
modify_save_pretrained,
patch_tied_tensors_bug,
)
from tests.testing_utils import requires_gpu


@pytest.mark.parametrize(
Expand Down Expand Up @@ -275,7 +276,7 @@ def test_model_reload(offload, torch_dtype, tie_word_embeddings, device_map, tmp
assert torch.equal(model_dict[key].cpu(), reloaded_dict[key].cpu())


@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires gpu")
@requires_gpu
@pytest.mark.parametrize(
"offload,torch_dtype,tie_word_embeddings,device_map",
[
Expand Down Expand Up @@ -340,7 +341,7 @@ def test_model_shared_tensors(
assert not torch.equal(lm_head, embed_tokens)


@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires gpu")
@requires_gpu
@pytest.mark.parametrize(
"offload,torch_dtype,tie_word_embeddings,device_map",
[
Expand All @@ -356,6 +357,7 @@ def test_model_shared_tensors_gpu(
)


@requires_gpu
@pytest.mark.parametrize(
"model_stub, recipe, sparse_format, quant_format",
[
Expand Down