Skip to content

Commit 2f3ad32

Browse files
committed
update
1 parent a393860 commit 2f3ad32

File tree

11 files changed

+36
-87
lines changed

11 files changed

+36
-87
lines changed

tests/pipelines/animatediff/test_animatediff.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import gc
21
import unittest
32

43
import numpy as np
@@ -20,7 +19,7 @@
2019
from diffusers.models.attention import FreeNoiseTransformerBlock
2120
from diffusers.utils import is_xformers_available, logging
2221
from diffusers.utils.testing_utils import (
23-
backend_empty_cache,
22+
flush_memory,
2423
numpy_cosine_similarity_distance,
2524
require_accelerator,
2625
require_torch_accelerator,
@@ -553,14 +552,12 @@ class AnimateDiffPipelineSlowTests(unittest.TestCase):
553552
def setUp(self):
554553
# clean up the VRAM before each test
555554
super().setUp()
556-
gc.collect()
557-
backend_empty_cache(torch_device)
555+
flush_memory(torch_device, gc_collect=True)
558556

559557
def tearDown(self):
560558
# clean up the VRAM after each test
561559
super().tearDown()
562-
gc.collect()
563-
backend_empty_cache(torch_device)
560+
flush_memory(torch_device, gc_collect=True)
564561

565562
def test_animatediff(self):
566563
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2")

tests/pipelines/cogvideo/test_cogvideox_image2video.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
import gc
1615
import inspect
1716
import unittest
1817

@@ -24,8 +23,8 @@
2423
from diffusers import AutoencoderKLCogVideoX, CogVideoXImageToVideoPipeline, CogVideoXTransformer3DModel, DDIMScheduler
2524
from diffusers.utils import load_image
2625
from diffusers.utils.testing_utils import (
27-
backend_empty_cache,
2826
enable_full_determinism,
27+
flush_memory,
2928
numpy_cosine_similarity_distance,
3029
require_torch_accelerator,
3130
slow,
@@ -351,13 +350,11 @@ class CogVideoXImageToVideoPipelineIntegrationTests(unittest.TestCase):
351350

352351
def setUp(self):
353352
super().setUp()
354-
gc.collect()
355-
backend_empty_cache(torch_device)
353+
flush_memory(torch_device, gc_collect=True)
356354

357355
def tearDown(self):
358356
super().tearDown()
359-
gc.collect()
360-
backend_empty_cache(torch_device)
357+
flush_memory(torch_device, gc_collect=True)
361358

362359
def test_cogvideox(self):
363360
generator = torch.Generator("cpu").manual_seed(0)

tests/pipelines/controlnet/test_controlnet.py

Lines changed: 6 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16-
import gc
1716
import tempfile
1817
import traceback
1918
import unittest
@@ -34,8 +33,8 @@
3433
from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel
3534
from diffusers.utils.import_utils import is_xformers_available
3635
from diffusers.utils.testing_utils import (
37-
backend_empty_cache,
3836
enable_full_determinism,
37+
flush_memory,
3938
get_python_version,
4039
is_torch_compile,
4140
load_image,
@@ -704,13 +703,11 @@ def test_save_pretrained_raise_not_implemented_exception(self):
704703
class ControlNetPipelineSlowTests(unittest.TestCase):
705704
def setUp(self):
706705
super().setUp()
707-
gc.collect()
708-
backend_empty_cache(torch_device)
706+
flush_memory(torch_device, gc_collect=True)
709707

710708
def tearDown(self):
711709
super().tearDown()
712-
gc.collect()
713-
backend_empty_cache(torch_device)
710+
flush_memory(torch_device, gc_collect=True)
714711

715712
def test_canny(self):
716713
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny")
@@ -929,14 +926,7 @@ def test_seg(self):
929926
assert np.abs(expected_image - image).max() < 8e-2
930927

931928
def test_sequential_cpu_offloading(self):
932-
if torch_device == "cuda":
933-
torch.cuda.empty_cache()
934-
torch.cuda.reset_max_memory_allocated()
935-
torch.cuda.reset_peak_memory_stats()
936-
elif torch_device == "xpu":
937-
torch.xpu.empty_cache()
938-
torch.xpu.reset_max_memory_allocated()
939-
torch.xpu.reset_peak_memory_stats()
929+
flush_memory(torch_device, gc_collect=True, reset_mem_stats=True)
940930

941931
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg")
942932

@@ -1077,13 +1067,11 @@ def test_v11_shuffle_global_pool_conditions(self):
10771067
class StableDiffusionMultiControlNetPipelineSlowTests(unittest.TestCase):
10781068
def setUp(self):
10791069
super().setUp()
1080-
gc.collect()
1081-
backend_empty_cache(torch_device)
1070+
flush_memory(torch_device, gc_collect=True)
10821071

10831072
def tearDown(self):
10841073
super().tearDown()
1085-
gc.collect()
1086-
backend_empty_cache(torch_device)
1074+
flush_memory(torch_device, gc_collect=True)
10871075

10881076
def test_pose_and_canny(self):
10891077
controlnet_canny = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny")

tests/pipelines/controlnet/test_controlnet_sdxl.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
# limitations under the License.
1515

1616
import copy
17-
import gc
1817
import unittest
1918

2019
import numpy as np
@@ -35,8 +34,8 @@
3534
from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel
3635
from diffusers.utils.import_utils import is_xformers_available
3736
from diffusers.utils.testing_utils import (
38-
backend_empty_cache,
3937
enable_full_determinism,
38+
flush_memory,
4039
load_image,
4140
require_torch_accelerator,
4241
slow,
@@ -894,13 +893,11 @@ def test_negative_conditions(self):
894893
class ControlNetSDXLPipelineSlowTests(unittest.TestCase):
895894
def setUp(self):
896895
super().setUp()
897-
gc.collect()
898-
backend_empty_cache(torch_device)
896+
flush_memory(torch_device, gc_collect=True)
899897

900898
def tearDown(self):
901899
super().tearDown()
902-
gc.collect()
903-
backend_empty_cache(torch_device)
900+
flush_memory(torch_device, gc_collect=True)
904901

905902
def test_canny(self):
906903
controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0")

tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16-
import gc
1716
import unittest
1817

1918
import numpy as np
@@ -29,8 +28,8 @@
2928
from diffusers.models import HunyuanDiT2DControlNetModel, HunyuanDiT2DMultiControlNetModel
3029
from diffusers.utils import load_image
3130
from diffusers.utils.testing_utils import (
32-
backend_empty_cache,
3331
enable_full_determinism,
32+
flush_memory,
3433
require_torch_accelerator,
3534
slow,
3635
torch_device,
@@ -185,13 +184,11 @@ class HunyuanDiTControlNetPipelineSlowTests(unittest.TestCase):
185184

186185
def setUp(self):
187186
super().setUp()
188-
gc.collect()
189-
backend_empty_cache(torch_device)
187+
flush_memory(torch_device, gc_collect=True)
190188

191189
def tearDown(self):
192190
super().tearDown()
193-
gc.collect()
194-
backend_empty_cache(torch_device)
191+
flush_memory(torch_device, gc_collect=True)
195192

196193
def test_canny(self):
197194
controlnet = HunyuanDiT2DControlNetModel.from_pretrained(

tests/pipelines/controlnet_xs/test_controlnetxs.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16-
import gc
1716
import traceback
1817
import unittest
1918

@@ -34,8 +33,8 @@
3433
)
3534
from diffusers.utils.import_utils import is_xformers_available
3635
from diffusers.utils.testing_utils import (
37-
backend_empty_cache,
3836
enable_full_determinism,
37+
flush_memory,
3938
is_torch_compile,
4039
load_image,
4140
load_numpy,
@@ -339,8 +338,7 @@ def test_to_device(self):
339338
class ControlNetXSPipelineSlowTests(unittest.TestCase):
340339
def tearDown(self):
341340
super().tearDown()
342-
gc.collect()
343-
backend_empty_cache(torch_device)
341+
flush_memory(torch_device, gc_collect=True)
344342

345343
def test_canny(self):
346344
controlnet = ControlNetXSAdapter.from_pretrained(

tests/pipelines/controlnet_xs/test_controlnetxs_sdxl.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16-
import gc
1716
import unittest
1817

1918
import numpy as np
@@ -32,8 +31,8 @@
3231
)
3332
from diffusers.utils.import_utils import is_xformers_available
3433
from diffusers.utils.testing_utils import (
35-
backend_empty_cache,
3634
enable_full_determinism,
35+
flush_memory,
3736
load_image,
3837
require_torch_accelerator,
3938
slow,
@@ -380,8 +379,7 @@ def test_multi_vae(self):
380379
class StableDiffusionXLControlNetXSPipelineSlowTests(unittest.TestCase):
381380
def tearDown(self):
382381
super().tearDown()
383-
gc.collect()
384-
backend_empty_cache(torch_device)
382+
flush_memory(torch_device, gc_collect=True)
385383

386384
def test_canny(self):
387385
controlnet = ControlNetXSAdapter.from_pretrained(

tests/pipelines/deepfloyd_if/test_if_img2img.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
from diffusers.utils.import_utils import is_xformers_available
2525
from diffusers.utils.testing_utils import (
2626
floats_tensor,
27+
flush_memory,
2728
load_numpy,
2829
require_accelerator,
2930
require_torch_accelerator,
@@ -125,14 +126,7 @@ def test_if_img2img(self):
125126
pipe.unet.set_attn_processor(AttnAddedKVProcessor())
126127
pipe.enable_model_cpu_offload(device=torch_device)
127128

128-
if torch_device == "cuda":
129-
torch.cuda.reset_max_memory_allocated()
130-
torch.cuda.empty_cache()
131-
torch.cuda.reset_peak_memory_stats()
132-
elif torch_device == "xpu":
133-
torch.xpu.reset_max_memory_allocated()
134-
torch.xpu.empty_cache()
135-
torch.xpu.reset_peak_memory_stats()
129+
flush_memory(torch_device, reset_mem_stats=True)
136130

137131
image = floats_tensor((1, 3, 64, 64), rng=random.Random(0)).to(torch_device)
138132
generator = torch.Generator(device="cpu").manual_seed(0)

tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16-
import gc
1716
import random
1817
import unittest
1918

@@ -23,8 +22,8 @@
2322
from diffusers.models.attention_processor import AttnAddedKVProcessor
2423
from diffusers.utils.import_utils import is_xformers_available
2524
from diffusers.utils.testing_utils import (
26-
backend_empty_cache,
2725
floats_tensor,
26+
flush_memory,
2827
load_numpy,
2928
require_accelerator,
3029
require_torch_accelerator,
@@ -105,14 +104,12 @@ class IFImg2ImgSuperResolutionPipelineSlowTests(unittest.TestCase):
105104
def setUp(self):
106105
# clean up the VRAM before each test
107106
super().setUp()
108-
gc.collect()
109-
backend_empty_cache(torch_device)
107+
flush_memory(torch_device, gc_collect=True)
110108

111109
def tearDown(self):
112110
# clean up the VRAM after each test
113111
super().tearDown()
114-
gc.collect()
115-
torch.cuda.empty_cache()
112+
flush_memory(torch_device, gc_collect=True)
116113

117114
def test_if_img2img_superresolution(self):
118115
pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained(
@@ -123,14 +120,7 @@ def test_if_img2img_superresolution(self):
123120
pipe.unet.set_attn_processor(AttnAddedKVProcessor())
124121
pipe.enable_model_cpu_offload(device=torch_device)
125122

126-
if torch_device == "cuda":
127-
torch.cuda.reset_max_memory_allocated()
128-
torch.cuda.empty_cache()
129-
torch.cuda.reset_peak_memory_stats()
130-
elif torch_device == "xpu":
131-
torch.xpu.reset_max_memory_allocated()
132-
torch.xpu.empty_cache()
133-
torch.xpu.reset_peak_memory_stats()
123+
flush_memory(torch_device, reset_mem_stats=True)
134124

135125
generator = torch.Generator(device="cpu").manual_seed(0)
136126

tests/pipelines/i2vgen_xl/test_i2vgenxl.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16-
import gc
1716
import random
1817
import unittest
1918

@@ -36,9 +35,9 @@
3635
from diffusers.models.unets import I2VGenXLUNet
3736
from diffusers.utils import is_xformers_available, load_image
3837
from diffusers.utils.testing_utils import (
39-
backend_empty_cache,
4038
enable_full_determinism,
4139
floats_tensor,
40+
flush_memory,
4241
numpy_cosine_similarity_distance,
4342
require_torch_accelerator,
4443
skip_mps,
@@ -232,14 +231,12 @@ class I2VGenXLPipelineSlowTests(unittest.TestCase):
232231
def setUp(self):
233232
# clean up the VRAM before each test
234233
super().setUp()
235-
gc.collect()
236-
backend_empty_cache(torch_device)
234+
flush_memory(torch_device, gc_collect=True)
237235

238236
def tearDown(self):
239237
# clean up the VRAM after each test
240238
super().tearDown()
241-
gc.collect()
242-
backend_empty_cache(torch_device)
239+
flush_memory(torch_device, gc_collect=True)
243240

244241
def test_i2vgen_xl(self):
245242
pipe = I2VGenXLPipeline.from_pretrained("ali-vilab/i2vgen-xl", torch_dtype=torch.float16, variant="fp16")

0 commit comments

Comments
 (0)