Skip to content

Commit f157e98

Browse files
committed
update
1 parent bad2a3e commit f157e98

File tree

3 files changed

+9
-12
lines changed

3 files changed

+9
-12
lines changed

tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -290,13 +290,13 @@ def test_to_device(self):
290290
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
291291
self.assertTrue(np.isnan(output_cpu).sum() == 0)
292292

293-
pipe.to("cuda")
293+
pipe.to(torch_device)
294294
model_devices = [
295295
component.device.type for component in pipe.components.values() if hasattr(component, "device")
296296
]
297-
self.assertTrue(all(device == "cuda" for device in model_devices))
297+
self.assertTrue(all(device == torch_device for device in model_devices))
298298

299-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
299+
output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0]
300300
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
301301

302302
def test_to_dtype(self):

tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
enable_full_determinism,
2626
floats_tensor,
2727
numpy_cosine_similarity_distance,
28+
require_accelerate_version_greater,
2829
require_non_cpu,
2930
require_torch_gpu,
3031
slow,
@@ -428,10 +429,8 @@ def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4):
428429
max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()
429430
self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results")
430431

431-
@unittest.skipIf(
432-
not is_accelerate_available() or is_accelerate_version("<", "0.17.0"),
433-
reason="CPU offload is only available with CUDA and `accelerate v0.17.0` or higher",
434-
)
432+
@require_non_cpu
433+
@require_accelerate_version_greater("0.17.0")
435434
def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4):
436435
generator_device = "cpu"
437436
components = self.get_dummy_components()

tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,10 @@
2323
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
2424

2525
from diffusers import AutoencoderKL, DDIMScheduler, TextToVideoZeroSDXLPipeline, UNet2DConditionModel
26-
from diffusers.utils.import_utils import is_accelerate_available, is_accelerate_version
2726
from diffusers.utils.testing_utils import (
2827
enable_full_determinism,
2928
nightly,
29+
require_accelerate_version_greater,
3030
require_non_cpu,
3131
require_torch_gpu,
3232
torch_device,
@@ -261,10 +261,8 @@ def test_inference_batch_consistent(self):
261261
def test_inference_batch_single_identical(self):
262262
pass
263263

264-
@unittest.skipIf(
265-
not is_accelerate_available() or is_accelerate_version("<", "0.17.0"),
266-
reason="CPU offload is only available with CUDA and `accelerate v0.17.0` or higher",
267-
)
264+
@require_non_cpu
265+
@require_accelerate_version_greater("0.17.0")
268266
def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4):
269267
components = self.get_dummy_components()
270268
pipe = self.pipeline_class(**components)

0 commit comments

Comments
 (0)