Skip to content

Commit bf5e0b2

Browse files
committed
Merge branch 'xpu_more3' into enable_xpu
2 parents 496e0be + 769d713 commit bf5e0b2

25 files changed

+62
-89
lines changed

tests/models/test_modeling_common.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@
4848
require_torch_2,
4949
require_torch_accelerator_with_training,
5050
require_torch_gpu,
51+
require_torch_accelerator,
5152
require_torch_multi_gpu,
5253
run_test_in_subprocess,
5354
torch_device,
@@ -405,7 +406,7 @@ def test_set_xformers_attn_processor_for_determinism(self):
405406
assert torch.allclose(output, output_3, atol=self.base_precision)
406407
assert torch.allclose(output_2, output_3, atol=self.base_precision)
407408

408-
@require_torch_gpu
409+
@require_torch_accelerator
409410
def test_set_attn_processor_for_determinism(self):
410411
if self.uses_custom_attn_processor:
411412
return
@@ -752,7 +753,7 @@ def test_deprecated_kwargs(self):
752753
" from `_deprecated_kwargs = [<deprecated_argument>]`"
753754
)
754755

755-
@require_torch_gpu
756+
@require_torch_accelerator
756757
def test_cpu_offload(self):
757758
config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
758759
model = self.model_class(**config).eval()
@@ -782,7 +783,7 @@ def test_cpu_offload(self):
782783

783784
self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5))
784785

785-
@require_torch_gpu
786+
@require_torch_accelerator
786787
def test_disk_offload_without_safetensors(self):
787788
config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
788789
model = self.model_class(**config).eval()
@@ -816,7 +817,7 @@ def test_disk_offload_without_safetensors(self):
816817

817818
self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5))
818819

819-
@require_torch_gpu
820+
@require_torch_accelerator
820821
def test_disk_offload_with_safetensors(self):
821822
config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
822823
model = self.model_class(**config).eval()
@@ -875,7 +876,7 @@ def test_model_parallelism(self):
875876

876877
self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5))
877878

878-
@require_torch_gpu
879+
@require_torch_accelerator
879880
def test_sharded_checkpoints(self):
880881
torch.manual_seed(0)
881882
config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
@@ -907,7 +908,7 @@ def test_sharded_checkpoints(self):
907908

908909
self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5))
909910

910-
@require_torch_gpu
911+
@require_torch_accelerator
911912
def test_sharded_checkpoints_with_variant(self):
912913
torch.manual_seed(0)
913914
config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
@@ -944,7 +945,7 @@ def test_sharded_checkpoints_with_variant(self):
944945

945946
self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5))
946947

947-
@require_torch_gpu
948+
@require_torch_accelerator
948949
def test_sharded_checkpoints_device_map(self):
949950
config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
950951
model = self.model_class(**config).eval()

tests/pipelines/amused/test_amused.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,11 +22,10 @@
2222
from diffusers import AmusedPipeline, AmusedScheduler, UVit2DModel, VQModel
2323
from diffusers.utils.testing_utils import (
2424
enable_full_determinism,
25-
require_torch_gpu,
25+
require_torch_accelerator,
2626
slow,
2727
torch_device,
2828
)
29-
3029
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
3130
from ..test_pipelines_common import PipelineTesterMixin
3231

@@ -129,7 +128,7 @@ def test_inference_batch_single_identical(self):
129128

130129

131130
@slow
132-
@require_torch_gpu
131+
@require_torch_accelerator
133132
class AmusedPipelineSlowTests(unittest.TestCase):
134133
def test_amused_256(self):
135134
pipe = AmusedPipeline.from_pretrained("amused/amused-256")

tests/pipelines/amused/test_amused_img2img.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,10 @@
2323
from diffusers.utils import load_image
2424
from diffusers.utils.testing_utils import (
2525
enable_full_determinism,
26-
require_torch_gpu,
26+
require_torch_accelerator,
2727
slow,
2828
torch_device,
2929
)
30-
3130
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
3231
from ..test_pipelines_common import PipelineTesterMixin
3332

@@ -131,7 +130,7 @@ def test_inference_batch_single_identical(self):
131130

132131

133132
@slow
134-
@require_torch_gpu
133+
@require_torch_accelerator
135134
class AmusedImg2ImgPipelineSlowTests(unittest.TestCase):
136135
def test_amused_256(self):
137136
pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-256")

tests/pipelines/amused/test_amused_inpaint.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,10 @@
2323
from diffusers.utils import load_image
2424
from diffusers.utils.testing_utils import (
2525
enable_full_determinism,
26-
require_torch_gpu,
26+
require_torch_accelerator,
2727
slow,
2828
torch_device,
2929
)
30-
3130
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
3231
from ..test_pipelines_common import PipelineTesterMixin
3332

@@ -135,7 +134,7 @@ def test_inference_batch_single_identical(self):
135134

136135

137136
@slow
138-
@require_torch_gpu
137+
@require_torch_accelerator
139138
class AmusedInpaintPipelineSlowTests(unittest.TestCase):
140139
def test_amused_256(self):
141140
pipe = AmusedInpaintPipeline.from_pretrained("amused/amused-256")

tests/pipelines/animatediff/test_animatediff_controlnet.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -281,7 +281,6 @@ def test_inference_batch_single_identical(
281281
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
282282
assert max_diff < expected_max_diff
283283

284-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
285284
def test_to_device(self):
286285
components = self.get_dummy_components()
287286
pipe = self.pipeline_class(**components)
@@ -297,14 +296,14 @@ def test_to_device(self):
297296
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
298297
self.assertTrue(np.isnan(output_cpu).sum() == 0)
299298

300-
pipe.to("cuda")
299+
pipe.to(torch_device)
301300
model_devices = [
302301
component.device.type for component in pipe.components.values() if hasattr(component, "device")
303302
]
304-
self.assertTrue(all(device == "cuda" for device in model_devices))
303+
self.assertTrue(all(device == torch_device for device in model_devices))
305304

306-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
307-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
305+
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
306+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
308307

309308
def test_to_dtype(self):
310309
components = self.get_dummy_components()

tests/pipelines/animatediff/test_animatediff_sdxl.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,6 @@ def test_inference_batch_single_identical(
212212
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
213213
assert max_diff < expected_max_diff
214214

215-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
216215
def test_to_device(self):
217216
components = self.get_dummy_components()
218217
pipe = self.pipeline_class(**components)
@@ -228,14 +227,14 @@ def test_to_device(self):
228227
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
229228
self.assertTrue(np.isnan(output_cpu).sum() == 0)
230229

231-
pipe.to("cuda")
230+
pipe.to(torch_device)
232231
model_devices = [
233232
component.device.type for component in pipe.components.values() if hasattr(component, "device")
234233
]
235-
self.assertTrue(all(device == "cuda" for device in model_devices))
234+
self.assertTrue(all(device == torch_device for device in model_devices))
236235

237-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
238-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
236+
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
237+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
239238

240239
def test_to_dtype(self):
241240
components = self.get_dummy_components()

tests/pipelines/animatediff/test_animatediff_sparsectrl.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -345,7 +345,6 @@ def test_inference_batch_single_identical_use_simplified_condition_embedding_tru
345345
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
346346
assert max_diff < expected_max_diff
347347

348-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
349348
def test_to_device(self):
350349
components = self.get_dummy_components()
351350
pipe = self.pipeline_class(**components)
@@ -361,13 +360,13 @@ def test_to_device(self):
361360
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
362361
self.assertTrue(np.isnan(output_cpu).sum() == 0)
363362

364-
pipe.to("cuda")
363+
pipe.to(torch_device)
365364
model_devices = [
366365
component.device.type for component in pipe.components.values() if hasattr(component, "device")
367366
]
368-
self.assertTrue(all(device == "cuda" for device in model_devices))
367+
self.assertTrue(all(device == torch_device for device in model_devices))
369368

370-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
369+
output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0]
371370
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
372371

373372
def test_to_dtype(self):

tests/pipelines/animatediff/test_animatediff_video2video.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -258,7 +258,6 @@ def test_inference_batch_single_identical(
258258
max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()
259259
assert max_diff < expected_max_diff
260260

261-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
262261
def test_to_device(self):
263262
components = self.get_dummy_components()
264263
pipe = self.pipeline_class(**components)
@@ -274,14 +273,14 @@ def test_to_device(self):
274273
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
275274
self.assertTrue(np.isnan(output_cpu).sum() == 0)
276275

277-
pipe.to("cuda")
276+
pipe.to(torch_device)
278277
model_devices = [
279278
component.device.type for component in pipe.components.values() if hasattr(component, "device")
280279
]
281-
self.assertTrue(all(device == "cuda" for device in model_devices))
280+
self.assertTrue(all(device == torch_device for device in model_devices))
282281

283-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
284-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
282+
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
283+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
285284

286285
def test_to_dtype(self):
287286
components = self.get_dummy_components()

tests/pipelines/controlnet_xs/test_controlnetxs.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -306,7 +306,6 @@ def test_multi_vae(self):
306306

307307
assert out_vae_np.shape == out_np.shape
308308

309-
@unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices")
310309
def test_to_device(self):
311310
components = self.get_dummy_components()
312311
pipe = self.pipeline_class(**components)
@@ -322,14 +321,14 @@ def test_to_device(self):
322321
output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0]
323322
self.assertTrue(np.isnan(output_cpu).sum() == 0)
324323

325-
pipe.to("cuda")
324+
pipe.to(torch_device)
326325
model_devices = [
327326
component.device.type for component in pipe.components.values() if hasattr(component, "device")
328327
]
329-
self.assertTrue(all(device == "cuda" for device in model_devices))
328+
self.assertTrue(all(device == torch_device for device in model_devices))
330329

331-
output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0]
332-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
330+
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
331+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
333332

334333

335334
@slow

tests/pipelines/deepfloyd_if/test_if.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,6 @@ def get_dummy_inputs(self, device, seed=0):
5858
def test_save_load_optional_components(self):
5959
self._test_save_load_optional_components()
6060

61-
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
6261
def test_save_load_float16(self):
6362
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
6463
super().test_save_load_float16(expected_max_diff=1e-1)

0 commit comments

Comments
 (0)