Skip to content

Commit b9b6d34

Browse files
committed
enable unidiffuser cases on XPU
Signed-off-by: Yao Matrix <[email protected]>
1 parent 4a9ab65 commit b9b6d34

File tree

2 files changed

+12
-11
lines changed

2 files changed

+12
-11
lines changed

tests/pipelines/test_pipelines_common.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1485,8 +1485,8 @@ def test_to_device(self):
14851485
model_devices = [component.device.type for component in components.values() if hasattr(component, "device")]
14861486
self.assertTrue(all(device == torch_device for device in model_devices))
14871487

1488-
output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0]
1489-
self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)
1488+
output_device = pipe(**self.get_dummy_inputs(torch_device))[0]
1489+
self.assertTrue(np.isnan(to_np(output_device)).sum() == 0)
14901490

14911491
def test_to_dtype(self):
14921492
components = self.get_dummy_components()
@@ -1677,11 +1677,11 @@ def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4):
16771677

16781678
pipe.set_progress_bar_config(disable=None)
16791679

1680-
pipe.enable_model_cpu_offload(device=torch_device)
1680+
pipe.enable_model_cpu_offload()
16811681
inputs = self.get_dummy_inputs(generator_device)
16821682
output_with_offload = pipe(**inputs)[0]
16831683

1684-
pipe.enable_model_cpu_offload(device=torch_device)
1684+
pipe.enable_model_cpu_offload()
16851685
inputs = self.get_dummy_inputs(generator_device)
16861686
output_with_offload_twice = pipe(**inputs)[0]
16871687

@@ -2226,7 +2226,7 @@ def create_pipe():
22262226

22272227
def enable_group_offload_on_component(pipe, group_offloading_kwargs):
22282228
# We intentionally don't test VAE's here. This is because some tests enable tiling on the VAE. If
2229-
# tiling is enabled and a forward pass is run, when cuda streams are used, the execution order of
2229+
# tiling is enabled and a forward pass is run, when accelerator streams are used, the execution order of
22302230
# the layers is not traced correctly. This causes errors. For apply group offloading to VAE, a
22312231
# warmup forward pass (even with dummy small inputs) is recommended.
22322232
for component_name in [

tests/pipelines/unidiffuser/test_unidiffuser.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
UniDiffuserTextDecoder,
2323
)
2424
from diffusers.utils.testing_utils import (
25+
backend_empty_cache,
2526
enable_full_determinism,
2627
floats_tensor,
2728
load_image,
@@ -584,17 +585,17 @@ def test_encode_prompt_works_in_isolation():
584585

585586

586587
@nightly
587-
@require_torch_gpu
588+
@require_torch_accelerator
588589
class UniDiffuserPipelineSlowTests(unittest.TestCase):
589590
def setUp(self):
590591
super().setUp()
591592
gc.collect()
592-
torch.cuda.empty_cache()
593+
backend_empty_cache(torch_device)
593594

594595
def tearDown(self):
595596
super().tearDown()
596597
gc.collect()
597-
torch.cuda.empty_cache()
598+
backend_empty_cache(torch_device)
598599

599600
def get_inputs(self, device, seed=0, generate_latents=False):
600601
generator = torch.manual_seed(seed)
@@ -705,17 +706,17 @@ def test_unidiffuser_compile(self, seed=0):
705706

706707

707708
@nightly
708-
@require_torch_gpu
709+
@require_torch_accelerator
709710
class UniDiffuserPipelineNightlyTests(unittest.TestCase):
710711
def setUp(self):
711712
super().setUp()
712713
gc.collect()
713-
torch.cuda.empty_cache()
714+
backend_empty_cache(torch_device)
714715

715716
def tearDown(self):
716717
super().tearDown()
717718
gc.collect()
718-
torch.cuda.empty_cache()
719+
backend_empty_cache(torch_device)
719720

720721
def get_inputs(self, device, seed=0, generate_latents=False):
721722
generator = torch.manual_seed(seed)

0 commit comments

Comments
 (0)