|
37 | 37 | UNet2DConditionModel, |
38 | 38 | ) |
39 | 39 | from diffusers.utils import is_xformers_available |
40 | | -from diffusers.utils.testing_utils import enable_full_determinism, nightly, torch_device |
| 40 | +from diffusers.utils.testing_utils import backend_empty_cache, enable_full_determinism, nightly, torch_device |
41 | 41 |
|
42 | 42 | from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS |
43 | 43 | from ..test_pipelines_common import PipelineTesterMixin |
@@ -378,12 +378,12 @@ class AudioLDMPipelineSlowTests(unittest.TestCase): |
378 | 378 | def setUp(self): |
379 | 379 | super().setUp() |
380 | 380 | gc.collect() |
381 | | - torch.cuda.empty_cache() |
| 381 | + backend_empty_cache(torch_device) |
382 | 382 |
|
383 | 383 | def tearDown(self): |
384 | 384 | super().tearDown() |
385 | 385 | gc.collect() |
386 | | - torch.cuda.empty_cache() |
| 386 | + backend_empty_cache(torch_device) |
387 | 387 |
|
388 | 388 | def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): |
389 | 389 | generator = torch.Generator(device=generator_device).manual_seed(seed) |
@@ -423,12 +423,12 @@ class AudioLDMPipelineNightlyTests(unittest.TestCase): |
423 | 423 | def setUp(self): |
424 | 424 | super().setUp() |
425 | 425 | gc.collect() |
426 | | - torch.cuda.empty_cache() |
| 426 | + backend_empty_cache(torch_device) |
427 | 427 |
|
428 | 428 | def tearDown(self): |
429 | 429 | super().tearDown() |
430 | 430 | gc.collect() |
431 | | - torch.cuda.empty_cache() |
| 431 | + backend_empty_cache(torch_device) |
432 | 432 |
|
433 | 433 | def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): |
434 | 434 | generator = torch.Generator(device=generator_device).manual_seed(seed) |
|
0 commit comments