Skip to content

Commit b671e25

Browse files
committed
update float16
1 parent 451790f commit b671e25

23 files changed

+74
-61
lines changed

tests/pipelines/animatediff/test_animatediff.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
from diffusers.utils import is_xformers_available, logging
2222
from diffusers.utils.testing_utils import (
2323
numpy_cosine_similarity_distance,
24-
require_non_cpu,
24+
require_accelerator,
2525
require_torch_gpu,
2626
slow,
2727
torch_device,

tests/pipelines/animatediff/test_animatediff_sdxl.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
UNetMotionModel,
1515
)
1616
from diffusers.utils import is_xformers_available, logging
17-
from diffusers.utils.testing_utils import require_non_cpu, torch_device
17+
from diffusers.utils.testing_utils import require_accelerator, torch_device
1818

1919
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_PARAMS
2020
from ..test_pipelines_common import (
@@ -212,7 +212,7 @@ def test_inference_batch_single_identical(
212212
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
213213
assert max_diff < expected_max_diff
214214

215-
@require_non_cpu
215+
@require_accelerator
216216
def test_to_device(self):
217217
components = self.get_dummy_components()
218218
pipe = self.pipeline_class(**components)

tests/pipelines/animatediff/test_animatediff_sparsectrl.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
)
2121
from diffusers.utils import logging
2222
from diffusers.utils.import_utils import is_xformers_available
23-
from diffusers.utils.testing_utils import require_non_cpu, torch_device
23+
from diffusers.utils.testing_utils import require_accelerator, torch_device
2424

2525
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
2626
from ..test_pipelines_common import (
@@ -345,7 +345,7 @@ def test_inference_batch_single_identical_use_simplified_condition_embedding_tru
345345
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
346346
assert max_diff < expected_max_diff
347347

348-
@require_non_cpu
348+
@require_accelerator
349349
def test_to_device(self):
350350
components = self.get_dummy_components()
351351
pipe = self.pipeline_class(**components)

tests/pipelines/animatediff/test_animatediff_video2video.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
)
2020
from diffusers.models.attention import FreeNoiseTransformerBlock
2121
from diffusers.utils import is_xformers_available, logging
22-
from diffusers.utils.testing_utils import require_non_cpu, torch_device
22+
from diffusers.utils.testing_utils import require_accelerator, torch_device
2323

2424
from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS
2525
from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin
@@ -258,7 +258,7 @@ def test_inference_batch_single_identical(
258258
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
259259
assert max_diff < expected_max_diff
260260

261-
@require_non_cpu
261+
@require_accelerator
262262
def test_to_device(self):
263263
components = self.get_dummy_components()
264264
pipe = self.pipeline_class(**components)

tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
)
2121
from diffusers.models.attention import FreeNoiseTransformerBlock
2222
from diffusers.utils import is_xformers_available, logging
23-
from diffusers.utils.testing_utils import require_non_cpu, torch_device
23+
from diffusers.utils.testing_utils import require_accelerator, torch_device
2424

2525
from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS
2626
from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin
@@ -274,7 +274,7 @@ def test_inference_batch_single_identical(
274274
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
275275
assert max_diff < expected_max_diff
276276

277-
@require_non_cpu
277+
@require_accelerator
278278
def test_to_device(self):
279279
components = self.get_dummy_components()
280280
pipe = self.pipeline_class(**components)

tests/pipelines/controlnet_xs/test_controlnetxs.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
is_torch_compile,
3939
load_image,
4040
load_numpy,
41-
require_non_cpu,
41+
require_accelerator,
4242
require_torch_2,
4343
require_torch_gpu,
4444
run_test_in_subprocess,
@@ -307,7 +307,7 @@ def test_multi_vae(self):
307307

308308
assert out_vae_np.shape == out_np.shape
309309

310-
@require_non_cpu
310+
@require_accelerator
311311
def test_to_device(self):
312312
components = self.get_dummy_components()
313313
pipe = self.pipeline_class(**components)

tests/pipelines/deepfloyd_if/test_if.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
)
2424
from diffusers.models.attention_processor import AttnAddedKVProcessor
2525
from diffusers.utils.import_utils import is_xformers_available
26-
from diffusers.utils.testing_utils import load_numpy, require_non_cpu, require_torch_gpu, skip_mps, slow, torch_device
26+
from diffusers.utils.testing_utils import load_numpy, require_accelerator, require_torch_gpu, skip_mps, slow, torch_device
2727

2828
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
2929
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference

tests/pipelines/deepfloyd_if/test_if_img2img.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
from diffusers.utils.testing_utils import (
2626
floats_tensor,
2727
load_numpy,
28-
require_non_cpu,
28+
require_accelerator,
2929
require_torch_gpu,
3030
skip_mps,
3131
slow,
@@ -78,12 +78,14 @@ def test_save_load_optional_components(self):
7878
def test_xformers_attention_forwardGenerator_pass(self):
7979
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
8080

81-
@require_non_cpu
81+
@unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
82+
@require_accelerator
8283
def test_save_load_float16(self):
8384
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
8485
super().test_save_load_float16(expected_max_diff=1e-1)
8586

86-
@require_non_cpu
87+
@unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
88+
@require_accelerator
8789
def test_float16_inference(self):
8890
super().test_float16_inference(expected_max_diff=1e-1)
8991

tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
from diffusers.utils.testing_utils import (
2626
floats_tensor,
2727
load_numpy,
28-
require_non_cpu,
28+
require_accelerator,
2929
require_torch_gpu,
3030
skip_mps,
3131
slow,
@@ -80,7 +80,8 @@ def test_xformers_attention_forwardGenerator_pass(self):
8080
def test_save_load_optional_components(self):
8181
self._test_save_load_optional_components()
8282

83-
@require_non_cpu
83+
@unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
84+
@require_accelerator
8485
def test_save_load_float16(self):
8586
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
8687
super().test_save_load_float16(expected_max_diff=1e-1)

tests/pipelines/deepfloyd_if/test_if_inpainting.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
from diffusers.utils.testing_utils import (
2626
floats_tensor,
2727
load_numpy,
28-
require_non_cpu,
28+
require_accelerator,
2929
require_torch_gpu,
3030
skip_mps,
3131
slow,
@@ -80,7 +80,8 @@ def test_xformers_attention_forwardGenerator_pass(self):
8080
def test_save_load_optional_components(self):
8181
self._test_save_load_optional_components()
8282

83-
@require_non_cpu
83+
@unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
84+
@require_accelerator
8485
def test_save_load_float16(self):
8586
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
8687
super().test_save_load_float16(expected_max_diff=1e-1)

0 commit comments

Comments
 (0)