Skip to content

Commit bfa287d

Browse files
committed
enable latent_diffusion, dance_diffusion, musicldm, shap_e integration
uts on xpu Signed-off-by: Yao Matrix <[email protected]>
1 parent e66e7d7 commit bfa287d

File tree

5 files changed

+25
-21
lines changed

5 files changed

+25
-21
lines changed

tests/pipelines/dance_diffusion/test_dance_diffusion.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
import torch
2121

2222
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel
23-
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, skip_mps, torch_device
23+
from diffusers.utils.testing_utils import backend_empty_cache, enable_full_determinism, nightly, require_torch_accelerator, require_torch_gpu, skip_mps, torch_device
2424

2525
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
2626
from ..test_pipelines_common import PipelineTesterMixin
@@ -116,19 +116,19 @@ def test_inference_batch_single_identical(self):
116116

117117

118118
@nightly
119-
@require_torch_gpu
119+
@require_torch_accelerator
120120
class PipelineIntegrationTests(unittest.TestCase):
121121
def setUp(self):
122122
# clean up the VRAM before each test
123123
super().setUp()
124124
gc.collect()
125-
torch.cuda.empty_cache()
125+
backend_empty_cache(torch_device)
126126

127127
def tearDown(self):
128128
# clean up the VRAM after each test
129129
super().tearDown()
130130
gc.collect()
131-
torch.cuda.empty_cache()
131+
backend_empty_cache(torch_device)
132132

133133
def test_dance_diffusion(self):
134134
device = torch_device

tests/pipelines/latent_diffusion/test_latent_diffusion.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,11 @@
2222

2323
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNet2DConditionModel
2424
from diffusers.utils.testing_utils import (
25+
backend_empty_cache,
2526
enable_full_determinism,
2627
load_numpy,
2728
nightly,
29+
require_torch_accelerator,
2830
require_torch_gpu,
2931
torch_device,
3032
)
@@ -136,17 +138,17 @@ def test_inference_text2img(self):
136138

137139

138140
@nightly
139-
@require_torch_gpu
141+
@require_torch_accelerator
140142
class LDMTextToImagePipelineSlowTests(unittest.TestCase):
141143
def setUp(self):
142144
super().setUp()
143145
gc.collect()
144-
torch.cuda.empty_cache()
146+
backend_empty_cache(torch_device)
145147

146148
def tearDown(self):
147149
super().tearDown()
148150
gc.collect()
149-
torch.cuda.empty_cache()
151+
backend_empty_cache(torch_device)
150152

151153
def get_inputs(self, device, dtype=torch.float32, seed=0):
152154
generator = torch.manual_seed(seed)
@@ -177,17 +179,17 @@ def test_ldm_default_ddim(self):
177179

178180

179181
@nightly
180-
@require_torch_gpu
182+
@require_torch_accelerator
181183
class LDMTextToImagePipelineNightlyTests(unittest.TestCase):
182184
def setUp(self):
183185
super().setUp()
184186
gc.collect()
185-
torch.cuda.empty_cache()
187+
backend_empty_cache(torch_device)
186188

187189
def tearDown(self):
188190
super().tearDown()
189191
gc.collect()
190-
torch.cuda.empty_cache()
192+
backend_empty_cache(torch_device)
191193

192194
def get_inputs(self, device, dtype=torch.float32, seed=0):
193195
generator = torch.manual_seed(seed)

tests/pipelines/musicldm/test_musicldm.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@
3939
UNet2DConditionModel,
4040
)
4141
from diffusers.utils import is_xformers_available
42-
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device
42+
from diffusers.utils.testing_utils import backend_empty_cache, enable_full_determinism, nightly, require_torch_accelerator, require_torch_gpu, torch_device
4343

4444
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
4545
from ..test_pipelines_common import PipelineTesterMixin
@@ -408,17 +408,17 @@ def test_to_dtype(self):
408408

409409

410410
@nightly
411-
@require_torch_gpu
411+
@require_torch_accelerator
412412
class MusicLDMPipelineNightlyTests(unittest.TestCase):
413413
def setUp(self):
414414
super().setUp()
415415
gc.collect()
416-
torch.cuda.empty_cache()
416+
backend_empty_cache(torch_device)
417417

418418
def tearDown(self):
419419
super().tearDown()
420420
gc.collect()
421-
torch.cuda.empty_cache()
421+
backend_empty_cache(torch_device)
422422

423423
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
424424
generator = torch.Generator(device=generator_device).manual_seed(seed)

tests/pipelines/shap_e/test_shap_e.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
2323
from diffusers.pipelines.shap_e import ShapERenderer
24-
from diffusers.utils.testing_utils import load_numpy, nightly, require_torch_gpu, torch_device
24+
from diffusers.utils.testing_utils import backend_empty_cache, load_numpy, nightly, require_torch_accelerator, require_torch_gpu, torch_device
2525

2626
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
2727

@@ -222,19 +222,19 @@ def test_sequential_cpu_offload_forward_pass(self):
222222

223223

224224
@nightly
225-
@require_torch_gpu
225+
@require_torch_accelerator
226226
class ShapEPipelineIntegrationTests(unittest.TestCase):
227227
def setUp(self):
228228
# clean up the VRAM before each test
229229
super().setUp()
230230
gc.collect()
231-
torch.cuda.empty_cache()
231+
backend_empty_cache(torch_device)
232232

233233
def tearDown(self):
234234
# clean up the VRAM after each test
235235
super().tearDown()
236236
gc.collect()
237-
torch.cuda.empty_cache()
237+
backend_empty_cache(torch_device)
238238

239239
def test_shap_e(self):
240240
expected_image = load_numpy(

tests/pipelines/shap_e/test_shap_e_img2img.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,12 @@
2323
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImg2ImgPipeline
2424
from diffusers.pipelines.shap_e import ShapERenderer
2525
from diffusers.utils.testing_utils import (
26+
backend_empty_cache,
2627
floats_tensor,
2728
load_image,
2829
load_numpy,
2930
nightly,
31+
require_torch_accelerator,
3032
require_torch_gpu,
3133
torch_device,
3234
)
@@ -250,19 +252,19 @@ def test_sequential_cpu_offload_forward_pass(self):
250252

251253

252254
@nightly
253-
@require_torch_gpu
255+
@require_torch_accelerator
254256
class ShapEImg2ImgPipelineIntegrationTests(unittest.TestCase):
255257
def setUp(self):
256258
# clean up the VRAM before each test
257259
super().setUp()
258260
gc.collect()
259-
torch.cuda.empty_cache()
261+
backend_empty_cache(torch_device)
260262

261263
def tearDown(self):
262264
# clean up the VRAM after each test
263265
super().tearDown()
264266
gc.collect()
265-
torch.cuda.empty_cache()
267+
backend_empty_cache(torch_device)
266268

267269
def test_shap_e_img2img(self):
268270
input_image = load_image(

0 commit comments

Comments
 (0)