Skip to content

Commit 1117427

Browse files
committed
update test marker
1 parent 48b67d6 commit 1117427

File tree

4 files changed

+24
-9
lines changed

4 files changed

+24
-9
lines changed

src/diffusers/utils/testing_utils.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -337,6 +337,14 @@ def require_note_seq(test_case):
337337
return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case)
338338

339339

340+
def require_non_cpu(test_case):
341+
"""
342+
Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no
343+
hardware accelerator available.
344+
"""
345+
return unittest.skipUnless(torch_device != "cpu", "test requires a GPU")(test_case)
346+
347+
340348
def require_torchsde(test_case):
341349
"""
342350
Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed.

tests/pipelines/animatediff/test_animatediff.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,13 @@
1919
)
2020
from diffusers.models.attention import FreeNoiseTransformerBlock
2121
from diffusers.utils import is_xformers_available, logging
22-
from diffusers.utils.testing_utils import numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device
22+
from diffusers.utils.testing_utils import (
23+
numpy_cosine_similarity_distance,
24+
require_non_cpu,
25+
require_torch_gpu,
26+
slow,
27+
torch_device,
28+
)
2329

2430
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
2531
from ..test_pipelines_common import (
@@ -272,7 +278,7 @@ def test_inference_batch_single_identical(
272278
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
273279
assert max_diff < expected_max_diff
274280

275-
@unittest.skipIf(torch_device = "cpu", reason="Hardware Accelerator and CPU are required to switch devices")
281+
@require_non_cpu
276282
def test_to_device(self):
277283
components = self.get_dummy_components()
278284
pipe = self.pipeline_class(**components)

tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
floats_tensor,
2727
load_image,
2828
nightly,
29+
require_non_cpu,
2930
require_torch,
3031
torch_device,
3132
)
@@ -93,7 +94,7 @@ def test_inference_superresolution(self):
9394

9495
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
9596

96-
@unittest.skipIf(torch_device = "cpu", "This test requires a hardware accelerator")
97+
@require_non_cpu
9798
def test_inference_superresolution_fp16(self):
9899
unet = self.dummy_uncond_unet
99100
scheduler = DDIMScheduler()

tests/pipelines/test_pipelines_common.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -771,7 +771,7 @@ def test_from_pipe_consistent_forward_pass(self, expected_max_diff=1e-3):
771771
), "`from_pipe` changed the attention processor in original pipeline."
772772

773773
@unittest.skipIf(
774-
torch_device = "cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"),
774+
torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"),
775775
reason="CPU offload is only available with hardware accelerator and `accelerate v0.14.0` or higher",
776776
)
777777
def test_from_pipe_consistent_forward_pass_cpu_offload(self, expected_max_diff=1e-3):
@@ -1317,7 +1317,7 @@ def test_save_load_optional_components(self, expected_max_difference=1e-4):
13171317
max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()
13181318
self.assertLess(max_diff, expected_max_difference)
13191319

1320-
@unittest.skipIf(torch_device = "cpu", reason="Hardware accelerator and CPU are required to switch devices")
1320+
@unittest.skipIf(torch_device="cpu", reason="Hardware accelerator and CPU are required to switch devices")
13211321
def test_to_device(self):
13221322
components = self.get_dummy_components()
13231323
pipe = self.pipeline_class(**components)
@@ -1392,7 +1392,7 @@ def _test_attention_slicing_forward_pass(
13921392
assert_mean_pixel_difference(to_np(output_with_slicing2[0]), to_np(output_without_slicing[0]))
13931393

13941394
@unittest.skipIf(
1395-
torch_device = "cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"),
1395+
torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"),
13961396
reason="CPU offload is only available with hardware accelerator and `accelerate v0.14.0` or higher",
13971397
)
13981398
def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4):
@@ -1455,7 +1455,7 @@ def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4):
14551455
)
14561456

14571457
@unittest.skipIf(
1458-
torch_device = "cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"),
1458+
torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"),
14591459
reason="CPU offload is only available with hardware accelerator and `accelerate v0.17.0` or higher",
14601460
)
14611461
def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4):
@@ -1512,7 +1512,7 @@ def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4):
15121512
)
15131513

15141514
@unittest.skipIf(
1515-
torch_device = "cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"),
1515+
torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"),
15161516
reason="CPU offload is only available with hardware accelerator and `accelerate v0.17.0` or higher",
15171517
)
15181518
def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4):
@@ -1569,7 +1569,7 @@ def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4):
15691569
)
15701570

15711571
@unittest.skipIf(
1572-
torch_device = "cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"),
1572+
torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"),
15731573
reason="CPU offload is only available with hardware accelerator and `accelerate v0.14.0` or higher",
15741574
)
15751575
def test_sequential_offload_forward_pass_twice(self, expected_max_diff=2e-4):

0 commit comments

Comments
 (0)