Skip to content

Commit 98624dd

Browse files
committed
initial commit
1 parent 1b202c5 commit 98624dd

22 files changed

+201
-120
lines changed

tests/models/test_modeling_common.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@
5959
require_torch_2,
6060
require_torch_accelerator_with_training,
6161
require_torch_gpu,
62+
require_torch_accelerator,
6263
require_torch_multi_gpu,
6364
run_test_in_subprocess,
6465
torch_all_close,
@@ -543,7 +544,7 @@ def test_set_xformers_attn_processor_for_determinism(self):
543544
assert torch.allclose(output, output_3, atol=self.base_precision)
544545
assert torch.allclose(output_2, output_3, atol=self.base_precision)
545546

546-
@require_torch_gpu
547+
@require_torch_accelerator
547548
def test_set_attn_processor_for_determinism(self):
548549
if self.uses_custom_attn_processor:
549550
return
@@ -1068,7 +1069,7 @@ def test_wrong_adapter_name_raises_error(self):
10681069

10691070
self.assertTrue(f"Adapter name {wrong_name} not found in the model." in str(err_context.exception))
10701071

1071-
@require_torch_gpu
1072+
@require_torch_accelerator
10721073
def test_cpu_offload(self):
10731074
config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
10741075
model = self.model_class(**config).eval()
@@ -1098,7 +1099,7 @@ def test_cpu_offload(self):
10981099

10991100
self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5))
11001101

1101-
@require_torch_gpu
1102+
@require_torch_accelerator
11021103
def test_disk_offload_without_safetensors(self):
11031104
config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
11041105
model = self.model_class(**config).eval()
@@ -1132,7 +1133,7 @@ def test_disk_offload_without_safetensors(self):
11321133

11331134
self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5))
11341135

1135-
@require_torch_gpu
1136+
@require_torch_accelerator
11361137
def test_disk_offload_with_safetensors(self):
11371138
config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
11381139
model = self.model_class(**config).eval()
@@ -1191,7 +1192,7 @@ def test_model_parallelism(self):
11911192

11921193
self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5))
11931194

1194-
@require_torch_gpu
1195+
@require_torch_accelerator
11951196
def test_sharded_checkpoints(self):
11961197
torch.manual_seed(0)
11971198
config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
@@ -1223,7 +1224,7 @@ def test_sharded_checkpoints(self):
12231224

12241225
self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5))
12251226

1226-
@require_torch_gpu
1227+
@require_torch_accelerator
12271228
def test_sharded_checkpoints_with_variant(self):
12281229
torch.manual_seed(0)
12291230
config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
@@ -1261,7 +1262,7 @@ def test_sharded_checkpoints_with_variant(self):
12611262

12621263
self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5))
12631264

1264-
@require_torch_gpu
1265+
@require_torch_accelerator
12651266
def test_sharded_checkpoints_device_map(self):
12661267
config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
12671268
model = self.model_class(**config).eval()

tests/pipelines/allegro/test_allegro.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
from diffusers.utils.testing_utils import (
2525
enable_full_determinism,
2626
numpy_cosine_similarity_distance,
27-
require_torch_gpu,
27+
require_torch_accelerator,
2828
slow,
2929
torch_device,
3030
)
@@ -299,7 +299,7 @@ def test_vae_tiling(self, expected_diff_max: float = 0.2):
299299

300300

301301
@slow
302-
@require_torch_gpu
302+
@require_torch_accelerator
303303
class AllegroPipelineIntegrationTests(unittest.TestCase):
304304
prompt = "A painting of a squirrel eating a burger."
305305

@@ -317,7 +317,7 @@ def test_allegro(self):
317317
generator = torch.Generator("cpu").manual_seed(0)
318318

319319
pipe = AllegroPipeline.from_pretrained("rhymes-ai/Allegro", torch_dtype=torch.float16)
320-
pipe.enable_model_cpu_offload()
320+
pipe.enable_model_cpu_offload(device=torch_device)
321321
prompt = self.prompt
322322

323323
videos = pipe(

tests/pipelines/animatediff/test_animatediff.py

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
numpy_cosine_similarity_distance,
2424
require_accelerator,
2525
require_torch_gpu,
26+
require_torch_accelerator,
2627
slow,
2728
torch_device,
2829
)
@@ -547,19 +548,25 @@ def test_vae_slicing(self):
547548

548549

549550
@slow
550-
@require_torch_gpu
551+
@require_torch_accelerator
551552
class AnimateDiffPipelineSlowTests(unittest.TestCase):
552553
def setUp(self):
553554
# clean up the VRAM before each test
554555
super().setUp()
555556
gc.collect()
556-
torch.cuda.empty_cache()
557+
if torch_device == "cuda":
558+
torch.cuda.empty_cache()
559+
elif torch_device == "xpu":
560+
torch.xpu.empty_cache()
557561

558562
def tearDown(self):
559563
# clean up the VRAM after each test
560564
super().tearDown()
561565
gc.collect()
562-
torch.cuda.empty_cache()
566+
if torch_device == "cuda":
567+
torch.cuda.empty_cache()
568+
elif torch_device == "xpu":
569+
torch.xpu.empty_cache()
563570

564571
def test_animatediff(self):
565572
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2")
@@ -573,7 +580,7 @@ def test_animatediff(self):
573580
clip_sample=False,
574581
)
575582
pipe.enable_vae_slicing()
576-
pipe.enable_model_cpu_offload()
583+
pipe.enable_model_cpu_offload(device=torch_device)
577584
pipe.set_progress_bar_config(disable=None)
578585

579586
prompt = "night, b&w photo of old house, post apocalypse, forest, storm weather, wind, rocks, 8k uhd, dslr, soft lighting, high quality, film grain"

tests/pipelines/cogvideo/test_cogvideox.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
from diffusers.utils.testing_utils import (
2525
enable_full_determinism,
2626
numpy_cosine_similarity_distance,
27-
require_torch_gpu,
27+
require_torch_accelerator,
2828
slow,
2929
torch_device,
3030
)
@@ -321,7 +321,7 @@ def test_fused_qkv_projections(self):
321321

322322

323323
@slow
324-
@require_torch_gpu
324+
@require_torch_accelerator
325325
class CogVideoXPipelineIntegrationTests(unittest.TestCase):
326326
prompt = "A painting of a squirrel eating a burger."
327327

@@ -339,7 +339,7 @@ def test_cogvideox(self):
339339
generator = torch.Generator("cpu").manual_seed(0)
340340

341341
pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-2b", torch_dtype=torch.float16)
342-
pipe.enable_model_cpu_offload()
342+
pipe.enable_model_cpu_offload(device=torch_device)
343343
prompt = self.prompt
344344

345345
videos = pipe(

tests/pipelines/cogvideo/test_cogvideox_image2video.py

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
enable_full_determinism,
2828
numpy_cosine_similarity_distance,
2929
require_torch_gpu,
30+
require_torch_accelerator,
3031
slow,
3132
torch_device,
3233
)
@@ -344,25 +345,31 @@ def test_fused_qkv_projections(self):
344345

345346

346347
@slow
347-
@require_torch_gpu
348+
@require_torch_accelerator
348349
class CogVideoXImageToVideoPipelineIntegrationTests(unittest.TestCase):
349350
prompt = "A painting of a squirrel eating a burger."
350351

351352
def setUp(self):
352353
super().setUp()
353354
gc.collect()
354-
torch.cuda.empty_cache()
355+
if torch_device == "cuda":
356+
torch.cuda.empty_cache()
357+
elif torch_device == "xpu":
358+
torch.xpu.empty_cache()
355359

356360
def tearDown(self):
357361
super().tearDown()
358362
gc.collect()
359-
torch.cuda.empty_cache()
363+
if torch_device == "cuda":
364+
torch.cuda.empty_cache()
365+
elif torch_device == "xpu":
366+
torch.xpu.empty_cache()
360367

361368
def test_cogvideox(self):
362369
generator = torch.Generator("cpu").manual_seed(0)
363370

364371
pipe = CogVideoXImageToVideoPipeline.from_pretrained("THUDM/CogVideoX-5b-I2V", torch_dtype=torch.bfloat16)
365-
pipe.enable_model_cpu_offload()
372+
pipe.enable_model_cpu_offload(device=torch_device)
366373

367374
prompt = self.prompt
368375
image = load_image(

tests/pipelines/cogview3/test_cogview3plus.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
from diffusers.utils.testing_utils import (
2525
enable_full_determinism,
2626
numpy_cosine_similarity_distance,
27-
require_torch_gpu,
27+
require_torch_accelerator,
2828
slow,
2929
torch_device,
3030
)
@@ -232,7 +232,7 @@ def test_attention_slicing_forward_pass(
232232

233233

234234
@slow
235-
@require_torch_gpu
235+
@require_torch_accelerator
236236
class CogView3PlusPipelineIntegrationTests(unittest.TestCase):
237237
prompt = "A painting of a squirrel eating a burger."
238238

@@ -250,7 +250,7 @@ def test_cogview3plus(self):
250250
generator = torch.Generator("cpu").manual_seed(0)
251251

252252
pipe = CogView3PlusPipeline.from_pretrained("THUDM/CogView3Plus-3b", torch_dtype=torch.float16)
253-
pipe.enable_model_cpu_offload()
253+
pipe.enable_model_cpu_offload(device=torch_device)
254254
prompt = self.prompt
255255

256256
images = pipe(

0 commit comments

Comments
 (0)