Skip to content

Commit 9010372

Browse files
authored
skip accelerate tests (#208)
1 parent ff121cc commit 9010372

File tree

2 files changed

+24
-1
lines changed

2 files changed

+24
-1
lines changed

tests/test_quantization/lifecycle/test_apply.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
apply_quantization_status,
2929
)
3030
from compressed_tensors.quantization.utils import iter_named_leaf_modules
31+
from tests.testing_utils import requires_accelerate
3132
from transformers import AutoModelForCausalLM
3233

3334

@@ -224,6 +225,7 @@ def get_sample_tinyllama_quant_config(status: str = "frozen"):
224225
return QuantizationConfig.parse_obj(config_dict)
225226

226227

228+
@requires_accelerate()
227229
@pytest.mark.parametrize(
228230
"ignore,should_raise_warning",
229231
[

tests/testing_utils.py

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,29 @@ def compressed_tensors_config_available():
2626
return False
2727

2828

29+
def accelerate_availabe():
30+
try:
31+
import accelerate # noqa: F401
32+
33+
return True
34+
35+
except ImportError:
36+
return False
37+
38+
39+
_is_compressed_tensors_config_available = compressed_tensors_config_available()
40+
_is_accelerate_available = accelerate_availabe()
41+
42+
2943
def requires_hf_quantizer():
3044
return pytest.mark.skipif(
31-
not compressed_tensors_config_available(),
45+
not _is_compressed_tensors_config_available,
3246
reason="requires transformers>=4.45 to support CompressedTensorsHfQuantizer",
3347
)
48+
49+
50+
def requires_accelerate():
51+
return pytest.mark.skipif(
52+
not _is_accelerate_available,
53+
reason="requires accelerate",
54+
)

0 commit comments

Comments
 (0)