3232 UNet2DConditionModel ,
3333)
3434from diffusers .utils .testing_utils import (
35+ backend_empty_cache ,
3536 enable_full_determinism ,
3637 floats_tensor ,
3738 is_flaky ,
3839 load_image ,
39- require_torch_gpu ,
40+ require_torch_accelerator ,
4041 slow ,
42+ torch_device ,
4143)
4244
4345from ..test_pipelines_common import PipelineTesterMixin
@@ -288,17 +290,17 @@ def test_marigold_depth_dummy_no_processing_resolution(self):
288290
289291
290292@slow
291- @require_torch_gpu
293+ @require_torch_accelerator
292294class MarigoldDepthPipelineIntegrationTests (unittest .TestCase ):
293295 def setUp (self ):
294296 super ().setUp ()
295297 gc .collect ()
296- torch . cuda . empty_cache ( )
298+ backend_empty_cache ( torch_device )
297299
298300 def tearDown (self ):
299301 super ().tearDown ()
300302 gc .collect ()
301- torch . cuda . empty_cache ( )
303+ backend_empty_cache ( torch_device )
302304
303305 def _test_marigold_depth (
304306 self ,
@@ -317,8 +319,7 @@ def _test_marigold_depth(
317319 from_pretrained_kwargs ["torch_dtype" ] = torch .float16
318320
319321 pipe = MarigoldDepthPipeline .from_pretrained (model_id , ** from_pretrained_kwargs )
320- if device == "cuda" :
321- pipe .enable_model_cpu_offload ()
322+ pipe .enable_model_cpu_offload (device = torch_device )
322323 pipe .set_progress_bar_config (disable = None )
323324
324325 generator = torch .Generator (device = device ).manual_seed (generator_seed )
@@ -358,7 +359,7 @@ def test_marigold_depth_einstein_f32_cpu_G0_S1_P32_E1_B1_M1(self):
358359 def test_marigold_depth_einstein_f32_cuda_G0_S1_P768_E1_B1_M1 (self ):
359360 self ._test_marigold_depth (
360361 is_fp16 = False ,
361- device = "cuda" ,
362+ device = torch_device ,
362363 generator_seed = 0 ,
363364 expected_slice = np .array ([0.1244 , 0.1265 , 0.1292 , 0.1240 , 0.1252 , 0.1266 , 0.1246 , 0.1226 , 0.1180 ]),
364365 num_inference_steps = 1 ,
@@ -371,7 +372,7 @@ def test_marigold_depth_einstein_f32_cuda_G0_S1_P768_E1_B1_M1(self):
371372 def test_marigold_depth_einstein_f16_cuda_G0_S1_P768_E1_B1_M1 (self ):
372373 self ._test_marigold_depth (
373374 is_fp16 = True ,
374- device = "cuda" ,
375+ device = torch_device ,
375376 generator_seed = 0 ,
376377 expected_slice = np .array ([0.1241 , 0.1262 , 0.1290 , 0.1238 , 0.1250 , 0.1265 , 0.1244 , 0.1225 , 0.1179 ]),
377378 num_inference_steps = 1 ,
@@ -384,7 +385,7 @@ def test_marigold_depth_einstein_f16_cuda_G0_S1_P768_E1_B1_M1(self):
384385 def test_marigold_depth_einstein_f16_cuda_G2024_S1_P768_E1_B1_M1 (self ):
385386 self ._test_marigold_depth (
386387 is_fp16 = True ,
387- device = "cuda" ,
388+ device = torch_device ,
388389 generator_seed = 2024 ,
389390 expected_slice = np .array ([0.1710 , 0.1725 , 0.1738 , 0.1700 , 0.1700 , 0.1696 , 0.1698 , 0.1663 , 0.1592 ]),
390391 num_inference_steps = 1 ,
@@ -397,7 +398,7 @@ def test_marigold_depth_einstein_f16_cuda_G2024_S1_P768_E1_B1_M1(self):
397398 def test_marigold_depth_einstein_f16_cuda_G0_S2_P768_E1_B1_M1 (self ):
398399 self ._test_marigold_depth (
399400 is_fp16 = True ,
400- device = "cuda" ,
401+ device = torch_device ,
401402 generator_seed = 0 ,
402403 expected_slice = np .array ([0.1085 , 0.1098 , 0.1110 , 0.1081 , 0.1085 , 0.1082 , 0.1085 , 0.1057 , 0.0996 ]),
403404 num_inference_steps = 2 ,
@@ -410,7 +411,7 @@ def test_marigold_depth_einstein_f16_cuda_G0_S2_P768_E1_B1_M1(self):
410411 def test_marigold_depth_einstein_f16_cuda_G0_S1_P512_E1_B1_M1 (self ):
411412 self ._test_marigold_depth (
412413 is_fp16 = True ,
413- device = "cuda" ,
414+ device = torch_device ,
414415 generator_seed = 0 ,
415416 expected_slice = np .array ([0.2683 , 0.2693 , 0.2698 , 0.2666 , 0.2632 , 0.2615 , 0.2656 , 0.2603 , 0.2573 ]),
416417 num_inference_steps = 1 ,
@@ -423,7 +424,7 @@ def test_marigold_depth_einstein_f16_cuda_G0_S1_P512_E1_B1_M1(self):
423424 def test_marigold_depth_einstein_f16_cuda_G0_S1_P768_E3_B1_M1 (self ):
424425 self ._test_marigold_depth (
425426 is_fp16 = True ,
426- device = "cuda" ,
427+ device = torch_device ,
427428 generator_seed = 0 ,
428429 expected_slice = np .array ([0.1200 , 0.1215 , 0.1237 , 0.1193 , 0.1197 , 0.1202 , 0.1196 , 0.1166 , 0.1109 ]),
429430 num_inference_steps = 1 ,
@@ -437,7 +438,7 @@ def test_marigold_depth_einstein_f16_cuda_G0_S1_P768_E3_B1_M1(self):
437438 def test_marigold_depth_einstein_f16_cuda_G0_S1_P768_E4_B2_M1 (self ):
438439 self ._test_marigold_depth (
439440 is_fp16 = True ,
440- device = "cuda" ,
441+ device = torch_device ,
441442 generator_seed = 0 ,
442443 expected_slice = np .array ([0.1121 , 0.1135 , 0.1155 , 0.1111 , 0.1115 , 0.1118 , 0.1111 , 0.1079 , 0.1019 ]),
443444 num_inference_steps = 1 ,
@@ -451,7 +452,7 @@ def test_marigold_depth_einstein_f16_cuda_G0_S1_P768_E4_B2_M1(self):
451452 def test_marigold_depth_einstein_f16_cuda_G0_S1_P512_E1_B1_M0 (self ):
452453 self ._test_marigold_depth (
453454 is_fp16 = True ,
454- device = "cuda" ,
455+ device = torch_device ,
455456 generator_seed = 0 ,
456457 expected_slice = np .array ([0.2671 , 0.2690 , 0.2720 , 0.2659 , 0.2676 , 0.2739 , 0.2664 , 0.2686 , 0.2573 ]),
457458 num_inference_steps = 1 ,
0 commit comments