Skip to content

Commit 34a0a7b

Browse files
committed
update decorator
1 parent da9c5c9 commit 34a0a7b

File tree

1 file changed

+16
-24
lines changed

1 file changed

+16
-24
lines changed

tests/pipelines/test_pipelines_common.py

Lines changed: 16 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,11 @@
3838
from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
3939
from diffusers.schedulers import KarrasDiffusionSchedulers
4040
from diffusers.utils import logging
41-
from diffusers.utils.import_utils import is_accelerate_available, is_accelerate_version, is_xformers_available
41+
from diffusers.utils.import_utils import is_xformers_available
4242
from diffusers.utils.testing_utils import (
4343
CaptureLogger,
44+
require_accelerate_version_greater,
45+
require_non_cpu,
4446
require_torch,
4547
skip_mps,
4648
torch_device,
@@ -770,10 +772,8 @@ def test_from_pipe_consistent_forward_pass(self, expected_max_diff=1e-3):
770772
type(proc) == AttnProcessor for proc in component.attn_processors.values()
771773
), "`from_pipe` changed the attention processor in original pipeline."
772774

773-
@unittest.skipIf(
774-
torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"),
775-
reason="CPU offload is only available with hardware accelerator and `accelerate v0.14.0` or higher",
776-
)
775+
@require_non_cpu
776+
@require_accelerate_version_greater("0.14.0")
777777
def test_from_pipe_consistent_forward_pass_cpu_offload(self, expected_max_diff=1e-3):
778778
components = self.get_dummy_components()
779779
pipe = self.pipeline_class(**components)
@@ -1201,7 +1201,7 @@ def test_components_function(self):
12011201
self.assertTrue(hasattr(pipe, "components"))
12021202
self.assertTrue(set(pipe.components.keys()) == set(init_components.keys()))
12031203

1204-
@unittest.skipIf(torch_device="cpu", reason="float16 requires a hardware accelerator")
1204+
@require_non_cpu
12051205
def test_float16_inference(self, expected_max_diff=5e-2):
12061206
components = self.get_dummy_components()
12071207
pipe = self.pipeline_class(**components)
@@ -1238,7 +1238,7 @@ def test_float16_inference(self, expected_max_diff=5e-2):
12381238
max_diff = np.abs(to_np(output) - to_np(output_fp16)).max()
12391239
self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.")
12401240

1241-
@unittest.skipIf(torch_device="cpu", reason="float16 requires a hardware accelerator")
1241+
@require_non_cpu
12421242
def test_save_load_float16(self, expected_max_diff=1e-2):
12431243
components = self.get_dummy_components()
12441244
for name, module in components.items():
@@ -1319,7 +1319,7 @@ def test_save_load_optional_components(self, expected_max_difference=1e-4):
13191319
max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()
13201320
self.assertLess(max_diff, expected_max_difference)
13211321

1322-
@unittest.skipIf(torch_device="cpu", reason="Hardware accelerator and CPU are required to switch devices")
1322+
@require_non_cpu
13231323
def test_to_device(self):
13241324
components = self.get_dummy_components()
13251325
pipe = self.pipeline_class(**components)
@@ -1393,10 +1393,8 @@ def _test_attention_slicing_forward_pass(
13931393
assert_mean_pixel_difference(to_np(output_with_slicing1[0]), to_np(output_without_slicing[0]))
13941394
assert_mean_pixel_difference(to_np(output_with_slicing2[0]), to_np(output_without_slicing[0]))
13951395

1396-
@unittest.skipIf(
1397-
torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"),
1398-
reason="CPU offload is only available with hardware accelerator and `accelerate v0.14.0` or higher",
1399-
)
1396+
@require_non_cpu
1397+
@require_accelerate_version_greater("0.14.0")
14001398
def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4):
14011399
import accelerate
14021400

@@ -1456,10 +1454,8 @@ def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4):
14561454
f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}",
14571455
)
14581456

1459-
@unittest.skipIf(
1460-
torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"),
1461-
reason="CPU offload is only available with hardware accelerator and `accelerate v0.17.0` or higher",
1462-
)
1457+
@require_non_cpu
1458+
@require_accelerate_version_greater("0.17.0")
14631459
def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4):
14641460
import accelerate
14651461

@@ -1513,10 +1509,8 @@ def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4):
15131509
f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}",
15141510
)
15151511

1516-
@unittest.skipIf(
1517-
torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"),
1518-
reason="CPU offload is only available with hardware accelerator and `accelerate v0.17.0` or higher",
1519-
)
1512+
@require_non_cpu
1513+
@require_accelerate_version_greater("0.17.0")
15201514
def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4):
15211515
import accelerate
15221516

@@ -1570,10 +1564,8 @@ def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4):
15701564
f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}",
15711565
)
15721566

1573-
@unittest.skipIf(
1574-
torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"),
1575-
reason="CPU offload is only available with hardware accelerator and `accelerate v0.14.0` or higher",
1576-
)
1567+
@require_non_cpu
1568+
@require_accelerate_version_greater("0.14.0")
15771569
def test_sequential_offload_forward_pass_twice(self, expected_max_diff=2e-4):
15781570
import accelerate
15791571

0 commit comments

Comments
 (0)