Skip to content

Commit e3e3a96

Browse files
committed
tests
1 parent 8e1b6f5 commit e3e3a96

File tree

2 files changed

+2
-2
lines changed

2 files changed

+2
-2
lines changed

tests/quantization/bnb/test_4bit.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -485,7 +485,7 @@ def test_moving_to_cpu_throws_warning(self):
485485

486486
assert "Pipelines loaded with `dtype=torch.float16`" in cap_logger.out
487487

488-
@require_accelerate_version_greater("1.1.0")
488+
@require_accelerate_version_greater("1.0.0")
489489
def test_pipeline_cuda_placement_works_with_nf4(self):
490490
transformer_nf4_config = BitsAndBytesConfig(
491491
load_in_4bit=True,

tests/quantization/bnb/test_mixed_int8.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -434,7 +434,7 @@ def test_generate_quality_dequantize(self):
434434
output_type="np",
435435
).images
436436

437-
@require_accelerate_version_greater("1.1.0")
437+
@require_accelerate_version_greater("1.0.0")
438438
def test_pipeline_cuda_placement_works_with_mixed_int8(self):
439439
transformer_8bit_config = BitsAndBytesConfig(load_in_8bit=True)
440440
transformer_8bit = SD3Transformer2DModel.from_pretrained(

0 commit comments

Comments
 (0)