From 275b6af5556bcd8334f12427543c8e4c7623ff8b Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 9 Sep 2024 14:47:17 -0700 Subject: [PATCH 01/51] enable on xpu --- tests/pipelines/test_pipelines_common.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 49da08e2ca45..4c54cbd4aee7 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -1394,7 +1394,7 @@ def _test_attention_slicing_forward_pass( assert_mean_pixel_difference(to_np(output_with_slicing2[0]), to_np(output_without_slicing[0])) @unittest.skipIf( - torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), + not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher", ) def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): @@ -1412,8 +1412,8 @@ def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): inputs = self.get_dummy_inputs(generator_device) output_without_offload = pipe(**inputs)[0] - pipe.enable_sequential_cpu_offload() - assert pipe._execution_device.type == "cuda" + pipe.enable_sequential_cpu_offload(device=torch_device) + assert pipe._execution_device.type == torch_device inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs)[0] From f422d55f23cd4ceaef1fd48aec03ebb0b27fe67b Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 9 Sep 2024 15:09:25 -0700 Subject: [PATCH 02/51] add 1 more --- tests/pipelines/test_pipelines_common.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 4c54cbd4aee7..95262616ee36 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -1457,7 +1457,7 @@ def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): ) @unittest.skipIf( - torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), + not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), reason="CPU offload is only available with CUDA and `accelerate v0.17.0` or higher", ) def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): @@ -1477,8 +1477,8 @@ def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): inputs = self.get_dummy_inputs(generator_device) output_without_offload = pipe(**inputs)[0] - pipe.enable_model_cpu_offload() - assert pipe._execution_device.type == "cuda" + pipe.enable_model_cpu_offload(device=torch_device) + assert pipe._execution_device.type == torch_device inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs)[0] From 84540d95259d7bcc371000eb395722b6f8e1aba8 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 9 Sep 2024 15:12:29 -0700 Subject: [PATCH 03/51] add one more --- tests/pipelines/test_pipelines_common.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 95262616ee36..9e7b9c03a996 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -1514,7 +1514,7 @@ def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): ) @unittest.skipIf( - torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), + not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), reason="CPU offload is only available with CUDA and `accelerate v0.17.0` or higher", ) def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4): @@ -1530,11 +1530,11 @@ def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4): pipe.set_progress_bar_config(disable=None) - pipe.enable_model_cpu_offload() + pipe.enable_model_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs)[0] - pipe.enable_model_cpu_offload() + pipe.enable_model_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs(generator_device) output_with_offload_twice = pipe(**inputs)[0] @@ -1571,7 +1571,7 @@ def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4): ) @unittest.skipIf( - torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), + not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher", ) def test_sequential_offload_forward_pass_twice(self, expected_max_diff=2e-4): @@ -1587,11 +1587,11 @@ def test_sequential_offload_forward_pass_twice(self, expected_max_diff=2e-4): pipe.set_progress_bar_config(disable=None) - pipe.enable_sequential_cpu_offload() + pipe.enable_sequential_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs)[0] - pipe.enable_sequential_cpu_offload() + pipe.enable_sequential_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs(generator_device) output_with_offload_twice = pipe(**inputs)[0] From b94f7513d61fa75a19fe7d7d1903555f3fc7c96a Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 9 Sep 2024 15:20:04 -0700 Subject: [PATCH 04/51] enable more --- tests/pipelines/test_pipelines_common.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 9e7b9c03a996..0c803c01318d 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -771,7 +771,7 @@ def test_from_pipe_consistent_forward_pass(self, expected_max_diff=1e-3): ), "`from_pipe` changed the attention processor in original pipeline." @unittest.skipIf( - torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), + not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher", ) def test_from_pipe_consistent_forward_pass_cpu_offload(self, expected_max_diff=1e-3): @@ -780,7 +780,7 @@ def test_from_pipe_consistent_forward_pass_cpu_offload(self, expected_max_diff=1 for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() - pipe.enable_model_cpu_offload() + pipe.enable_model_cpu_offload(device=torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs_pipe(torch_device) output = pipe(**inputs)[0] @@ -815,7 +815,7 @@ def test_from_pipe_consistent_forward_pass_cpu_offload(self, expected_max_diff=1 if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() - pipe_from_original.enable_model_cpu_offload() + pipe_from_original.enable_model_cpu_offload(device=torch_device) pipe_from_original.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs_pipe(torch_device) output_from_original = pipe_from_original(**inputs)[0] @@ -1201,7 +1201,6 @@ def test_components_function(self): self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) - @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -1238,7 +1237,6 @@ def test_float16_inference(self, expected_max_diff=5e-2): max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") - @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): @@ -1319,7 +1317,6 @@ def test_save_load_optional_components(self, expected_max_difference=1e-4): max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) - @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -1332,11 +1329,11 @@ def test_to_device(self): output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) - pipe.to("cuda") + pipe.to(torch_device) model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] - self.assertTrue(all(device == "cuda" for device in model_devices)) + self.assertTrue(all(device == torch_device for device in model_devices)) - output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] + output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0] self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) def test_to_dtype(self): From fa508a4f384b61dde95e40b8cb54713c7aa230ec Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 9 Sep 2024 15:22:02 -0700 Subject: [PATCH 05/51] add 1 more --- tests/pipelines/test_pipelines_common.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 0c803c01318d..778a45277885 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -772,7 +772,7 @@ def test_from_pipe_consistent_forward_pass(self, expected_max_diff=1e-3): @unittest.skipIf( not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), - reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher", + reason="CPU offload is only available with `accelerate v0.14.0` or higher", ) def test_from_pipe_consistent_forward_pass_cpu_offload(self, expected_max_diff=1e-3): components = self.get_dummy_components() @@ -1392,7 +1392,7 @@ def _test_attention_slicing_forward_pass( @unittest.skipIf( not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), - reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher", + reason="CPU offload is only available with `accelerate v0.14.0` or higher", ) def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): import accelerate @@ -1455,7 +1455,7 @@ def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): @unittest.skipIf( not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), - reason="CPU offload is only available with CUDA and `accelerate v0.17.0` or higher", + reason="CPU offload is only available with `accelerate v0.17.0` or higher", ) def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): import accelerate @@ -1512,7 +1512,7 @@ def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): @unittest.skipIf( not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), - reason="CPU offload is only available with CUDA and `accelerate v0.17.0` or higher", + reason="CPU offload is only available with `accelerate v0.17.0` or higher", ) def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4): import accelerate @@ -1569,7 +1569,7 @@ def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4): @unittest.skipIf( not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), - reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher", + reason="CPU offload is only available with `accelerate v0.14.0` or higher", ) def test_sequential_offload_forward_pass_twice(self, expected_max_diff=2e-4): import accelerate From a04fedfa5153eda2a12498ec0b95cc1f436fb0e9 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 9 Sep 2024 15:54:24 -0700 Subject: [PATCH 06/51] add more --- tests/pipelines/animatediff/test_animatediff.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/pipelines/animatediff/test_animatediff.py b/tests/pipelines/animatediff/test_animatediff.py index 54c83d6a1b68..c387af4cb63d 100644 --- a/tests/pipelines/animatediff/test_animatediff.py +++ b/tests/pipelines/animatediff/test_animatediff.py @@ -272,7 +272,6 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff - @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -288,13 +287,13 @@ def test_to_device(self): output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) - pipe.to("cuda") + pipe.to(torch_device) model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] - self.assertTrue(all(device == "cuda" for device in model_devices)) + self.assertTrue(all(device == torch_device for device in model_devices)) - output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] + output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0] self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) def test_to_dtype(self): From 7442dc13365553697bd7ce2f74fa8b3b1ead25d9 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 9 Sep 2024 16:20:33 -0700 Subject: [PATCH 07/51] enable 1 --- .../latent_diffusion/test_latent_diffusion_superresolution.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py index 9b9a8ef65572..304ef1e495f8 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py @@ -93,7 +93,6 @@ def test_inference_superresolution(self): assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_inference_superresolution_fp16(self): unet = self.dummy_uncond_unet scheduler = DDIMScheduler() From 658789a1bfcb20d28ba80d7f144f555bf2836d95 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 9 Sep 2024 16:24:16 -0700 Subject: [PATCH 08/51] enable more cases --- .../semantic_stable_diffusion/test_semantic_diffusion.py | 1 - .../stable_diffusion_2/test_stable_diffusion_upscale.py | 1 - .../pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py | 1 - tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py | 1 - 4 files changed, 4 deletions(-) diff --git a/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py b/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py index 1cc3111c2631..a68cce505a62 100644 --- a/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py +++ b/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py @@ -237,7 +237,6 @@ def test_semantic_diffusion_no_safety_checker(self): image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_semantic_diffusion_fp16(self): """Test that stable diffusion works with fp16""" unet = self.dummy_cond_unet diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py index c21da7af6d2c..66d1573d57ff 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py @@ -289,7 +289,6 @@ def test_stable_diffusion_upscale_prompt_embeds(self): assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_prompt_embeds_slice.flatten() - expected_slice).max() < 1e-2 - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_stable_diffusion_upscale_fp16(self): """Test that stable diffusion upscale works with fp16""" unet = self.dummy_cond_unet_upscale diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py index 703c3b7a39d8..e8fd0eb4342b 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py @@ -213,7 +213,6 @@ def test_stable_diffusion_v_pred_k_euler(self): assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_stable_diffusion_v_pred_fp16(self): """Test that stable diffusion v-prediction works with fp16""" unet = self.dummy_cond_unet diff --git a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py index 14100ea03dc1..203fef6a3a5a 100644 --- a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py +++ b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py @@ -228,7 +228,6 @@ def test_stable_diffusion_no_safety_checker(self): image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None - @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_stable_diffusion_fp16(self): """Test that stable diffusion works with fp16""" unet = self.dummy_cond_unet From 06c77c70ff056415f6f76342c257b03634612826 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 9 Sep 2024 16:34:39 -0700 Subject: [PATCH 09/51] enable --- .../stable_diffusion_2/test_stable_diffusion_depth.py | 4 ++-- .../text_to_video_synthesis/test_text_to_video_zero_sdxl.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py index 9a3a93acd6ce..650a9c17ad19 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py @@ -250,7 +250,7 @@ def test_float16_inference(self): self.assertLess(max_diff, 1.3e-2, "The outputs of the fp16 and fp32 pipelines are too different.") @unittest.skipIf( - torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), + not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher", ) def test_cpu_offload_forward_pass(self): @@ -262,7 +262,7 @@ def test_cpu_offload_forward_pass(self): inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs)[0] - pipe.enable_sequential_cpu_offload() + pipe.enable_sequential_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs)[0] diff --git a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py index 8bef0cede154..9b1bc12f5c66 100644 --- a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py +++ b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py @@ -256,7 +256,7 @@ def test_inference_batch_single_identical(self): pass @unittest.skipIf( - torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), + not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), reason="CPU offload is only available with CUDA and `accelerate v0.17.0` or higher", ) def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): @@ -268,7 +268,7 @@ def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): inputs = self.get_dummy_inputs(self.generator_device) output_without_offload = pipe(**inputs)[0] - pipe.enable_model_cpu_offload() + pipe.enable_model_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs(self.generator_device) output_with_offload = pipe(**inputs)[0] From 1bf9b0c0d5285be08b7127253a539708428f8486 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 9 Sep 2024 18:02:39 -0700 Subject: [PATCH 10/51] enable --- .../stable_video_diffusion/test_stable_video_diffusion.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py index 60fc21e2027b..666bd3ea67a8 100644 --- a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py +++ b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py @@ -403,8 +403,8 @@ def test_to_dtype(self): self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) @unittest.skipIf( - torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), - reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher", + not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), + reason="CPU offload is only available with `accelerate v0.14.0` or higher", ) def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): components = self.get_dummy_components() @@ -419,7 +419,7 @@ def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): inputs = self.get_dummy_inputs(generator_device) output_without_offload = pipe(**inputs).frames[0] - pipe.enable_sequential_cpu_offload() + pipe.enable_sequential_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs).frames[0] From 6296e8821b93360f12338405d7626bf488144751 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 9 Sep 2024 18:04:21 -0700 Subject: [PATCH 11/51] update comment --- .../pipelines/stable_diffusion_2/test_stable_diffusion_depth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py index 650a9c17ad19..5f3460d61723 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py @@ -251,7 +251,7 @@ def test_float16_inference(self): @unittest.skipIf( not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), - reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher", + reason="CPU offload is only available with `accelerate v0.14.0` or higher", ) def test_cpu_offload_forward_pass(self): components = self.get_dummy_components() From 6d0b37871ca56bb1c3dfcb434f9a2e5257ae3bed Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 9 Sep 2024 18:08:37 -0700 Subject: [PATCH 12/51] one more --- .../stable_video_diffusion/test_stable_video_diffusion.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py index 666bd3ea67a8..d988d1ce721c 100644 --- a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py +++ b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py @@ -428,7 +428,7 @@ def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") @unittest.skipIf( - torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), + not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), reason="CPU offload is only available with CUDA and `accelerate v0.17.0` or higher", ) def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): @@ -446,7 +446,7 @@ def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): inputs = self.get_dummy_inputs(generator_device) output_without_offload = pipe(**inputs).frames[0] - pipe.enable_model_cpu_offload() + pipe.enable_model_cpu_offload(device=torch_device) inputs = self.get_dummy_inputs(generator_device) output_with_offload = pipe(**inputs).frames[0] From 0d9f975491a0408b0a9f1d7954842ab9a95272c2 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 9 Sep 2024 18:10:51 -0700 Subject: [PATCH 13/51] enable 1 --- tests/pipelines/deepfloyd_if/test_if.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/pipelines/deepfloyd_if/test_if.py b/tests/pipelines/deepfloyd_if/test_if.py index 0818665ea113..139561918a66 100644 --- a/tests/pipelines/deepfloyd_if/test_if.py +++ b/tests/pipelines/deepfloyd_if/test_if.py @@ -58,7 +58,6 @@ def get_dummy_inputs(self, device, seed=0): def test_save_load_optional_components(self): self._test_save_load_optional_components() - @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) From 732ff3ac354e1f79fa392de1320e42c1fd16686d Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 9 Sep 2024 18:23:25 -0700 Subject: [PATCH 14/51] add more cases --- tests/pipelines/deepfloyd_if/test_if_img2img.py | 2 -- .../pipelines/stable_diffusion_2/test_stable_diffusion_depth.py | 2 -- .../text_to_video_synthesis/test_text_to_video_zero_sdxl.py | 2 -- 3 files changed, 6 deletions(-) diff --git a/tests/pipelines/deepfloyd_if/test_if_img2img.py b/tests/pipelines/deepfloyd_if/test_if_img2img.py index b71cb05e50ae..ce1167dc4158 100644 --- a/tests/pipelines/deepfloyd_if/test_if_img2img.py +++ b/tests/pipelines/deepfloyd_if/test_if_img2img.py @@ -70,12 +70,10 @@ def test_save_load_optional_components(self): def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) - @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) - @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py index 5f3460d61723..9a10116cae5e 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py @@ -197,7 +197,6 @@ def test_save_load_local(self): max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 1e-4) - @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): components = self.get_dummy_components() for name, module in components.items(): @@ -229,7 +228,6 @@ def test_save_load_float16(self): max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 2e-2, "The output of the fp16 pipeline changed after saving and loading.") - @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_float16_inference(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py index 9b1bc12f5c66..4e8693145077 100644 --- a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py +++ b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py @@ -213,7 +213,6 @@ def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4): max_diff = np.abs(to_np(output) - to_np(output_tuple)).max() self.assertLess(max_diff, expected_max_difference) - @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() for name, module in components.items(): @@ -279,7 +278,6 @@ def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): def test_pipeline_call_signature(self): pass - @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): From 3803d93b5cd354ec3095146858a785c233e251ab Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 9 Sep 2024 22:15:17 -0700 Subject: [PATCH 15/51] enable xpu --- .../stable_video_diffusion/test_stable_video_diffusion.py | 1 - tests/single_file/single_file_testing_utils.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py index d988d1ce721c..9b29d05ad1e1 100644 --- a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py +++ b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py @@ -250,7 +250,6 @@ def test_float16_inference(self, expected_max_diff=5e-2): max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") - @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): diff --git a/tests/single_file/single_file_testing_utils.py b/tests/single_file/single_file_testing_utils.py index 9b89578c5a8c..d4f6ec994231 100644 --- a/tests/single_file/single_file_testing_utils.py +++ b/tests/single_file/single_file_testing_utils.py @@ -156,14 +156,14 @@ def test_single_file_components_with_original_config_local_files_only( def test_single_file_format_inference_is_same_as_pretrained(self, expected_max_diff=1e-4): sf_pipe = self.pipeline_class.from_single_file(self.ckpt_path, safety_checker=None) sf_pipe.unet.set_attn_processor(AttnProcessor()) - sf_pipe.enable_model_cpu_offload() + sf_pipe.enable_model_cpu_offload(device=torch_device) inputs = self.get_inputs(torch_device) image_single_file = sf_pipe(**inputs).images[0] pipe = self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None) pipe.unet.set_attn_processor(AttnProcessor()) - pipe.enable_model_cpu_offload() + pipe.enable_model_cpu_offload(device=torch_device) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images[0] From e00bcca71d555081fea9c2f9e1647ec1a709bbfd Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Thu, 12 Sep 2024 22:38:07 -0700 Subject: [PATCH 16/51] add one more caswe --- .../pipelines/animatediff/test_animatediff_controlnet.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tests/pipelines/animatediff/test_animatediff_controlnet.py b/tests/pipelines/animatediff/test_animatediff_controlnet.py index 519d848c6dc2..eaf3d0b9805f 100644 --- a/tests/pipelines/animatediff/test_animatediff_controlnet.py +++ b/tests/pipelines/animatediff/test_animatediff_controlnet.py @@ -281,7 +281,6 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff - @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -297,14 +296,14 @@ def test_to_device(self): output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) - pipe.to("cuda") + pipe.to(torch_device) model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] - self.assertTrue(all(device == "cuda" for device in model_devices)) + self.assertTrue(all(device == torch_device for device in model_devices)) - output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] - self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) + output_device = pipe(**self.get_dummy_inputs(torch_device))[0] + self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() From 6513201db6877ff9aa01b5da8d7edfcc80b92c22 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Thu, 12 Sep 2024 22:46:24 -0700 Subject: [PATCH 17/51] add more cases --- tests/pipelines/animatediff/test_animatediff_sdxl.py | 9 ++++----- .../pipelines/animatediff/test_animatediff_sparsectrl.py | 7 +++---- .../animatediff/test_animatediff_video2video.py | 9 ++++----- tests/pipelines/controlnet_xs/test_controlnetxs.py | 9 ++++----- tests/pipelines/pag/test_pag_animatediff.py | 9 ++++----- tests/pipelines/pia/test_pia.py | 9 ++++----- .../test_stable_video_diffusion.py | 9 ++++----- .../test_text_to_video_zero_sdxl.py | 9 ++++----- 8 files changed, 31 insertions(+), 39 deletions(-) diff --git a/tests/pipelines/animatediff/test_animatediff_sdxl.py b/tests/pipelines/animatediff/test_animatediff_sdxl.py index 2db0139154e9..f8d5487b2b2f 100644 --- a/tests/pipelines/animatediff/test_animatediff_sdxl.py +++ b/tests/pipelines/animatediff/test_animatediff_sdxl.py @@ -212,7 +212,6 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff - @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -228,14 +227,14 @@ def test_to_device(self): output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) - pipe.to("cuda") + pipe.to(torch_device) model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] - self.assertTrue(all(device == "cuda" for device in model_devices)) + self.assertTrue(all(device == torch_device for device in model_devices)) - output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] - self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) + output_device = pipe(**self.get_dummy_inputs(torch_device))[0] + self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() diff --git a/tests/pipelines/animatediff/test_animatediff_sparsectrl.py b/tests/pipelines/animatediff/test_animatediff_sparsectrl.py index 189d6765de4f..d0241de17276 100644 --- a/tests/pipelines/animatediff/test_animatediff_sparsectrl.py +++ b/tests/pipelines/animatediff/test_animatediff_sparsectrl.py @@ -345,7 +345,6 @@ def test_inference_batch_single_identical_use_simplified_condition_embedding_tru max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff - @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -361,13 +360,13 @@ def test_to_device(self): output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) - pipe.to("cuda") + pipe.to(torch_device) model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] - self.assertTrue(all(device == "cuda" for device in model_devices)) + self.assertTrue(all(device == torch_device for device in model_devices)) - output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] + output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0] self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) def test_to_dtype(self): diff --git a/tests/pipelines/animatediff/test_animatediff_video2video.py b/tests/pipelines/animatediff/test_animatediff_video2video.py index c3fd4c73736a..7b1adcf1ff0b 100644 --- a/tests/pipelines/animatediff/test_animatediff_video2video.py +++ b/tests/pipelines/animatediff/test_animatediff_video2video.py @@ -258,7 +258,6 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff - @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -274,14 +273,14 @@ def test_to_device(self): output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) - pipe.to("cuda") + pipe.to(torch_device) model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] - self.assertTrue(all(device == "cuda" for device in model_devices)) + self.assertTrue(all(device == torch_device for device in model_devices)) - output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] - self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) + output_device = pipe(**self.get_dummy_inputs(torch_device))[0] + self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() diff --git a/tests/pipelines/controlnet_xs/test_controlnetxs.py b/tests/pipelines/controlnet_xs/test_controlnetxs.py index bb0306741fdb..b779650bc86e 100644 --- a/tests/pipelines/controlnet_xs/test_controlnetxs.py +++ b/tests/pipelines/controlnet_xs/test_controlnetxs.py @@ -306,7 +306,6 @@ def test_multi_vae(self): assert out_vae_np.shape == out_np.shape - @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -322,14 +321,14 @@ def test_to_device(self): output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) - pipe.to("cuda") + pipe.to(torch_device) model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] - self.assertTrue(all(device == "cuda" for device in model_devices)) + self.assertTrue(all(device == torch_device for device in model_devices)) - output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] - self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) + output_device = pipe(**self.get_dummy_inputs(torch_device))[0] + self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) @slow diff --git a/tests/pipelines/pag/test_pag_animatediff.py b/tests/pipelines/pag/test_pag_animatediff.py index 7efe8002d17c..ccee2ed65134 100644 --- a/tests/pipelines/pag/test_pag_animatediff.py +++ b/tests/pipelines/pag/test_pag_animatediff.py @@ -218,7 +218,6 @@ def test_dict_tuple_outputs_equivalent(self): expected_slice = np.array([0.5295, 0.3947, 0.5300, 0.4864, 0.4518, 0.5315, 0.5440, 0.4775, 0.5538]) return super().test_dict_tuple_outputs_equivalent(expected_slice=expected_slice) - @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -234,14 +233,14 @@ def test_to_device(self): output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) - pipe.to("cuda") + pipe.to(torch_device) model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] - self.assertTrue(all(device == "cuda" for device in model_devices)) + self.assertTrue(all(device == torch_device for device in model_devices)) - output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] - self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) + output_device = pipe(**self.get_dummy_inputs(torch_device))[0] + self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() diff --git a/tests/pipelines/pia/test_pia.py b/tests/pipelines/pia/test_pia.py index ca558fbb83e5..1f8565664743 100644 --- a/tests/pipelines/pia/test_pia.py +++ b/tests/pipelines/pia/test_pia.py @@ -278,7 +278,6 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff - @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -294,14 +293,14 @@ def test_to_device(self): output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) - pipe.to("cuda") + pipe.to(torch_device) model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] - self.assertTrue(all(device == "cuda" for device in model_devices)) + self.assertTrue(all(device == torch_device for device in model_devices)) - output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] - self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) + output_device = pipe(**self.get_dummy_inputs(torch_device))[0] + self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() diff --git a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py index 9b29d05ad1e1..d7156ad1a635 100644 --- a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py +++ b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py @@ -365,7 +365,6 @@ def test_save_load_local(self, expected_max_difference=9e-4): max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) - @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -380,14 +379,14 @@ def test_to_device(self): output_cpu = pipe(**self.get_dummy_inputs("cpu")).frames[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) - pipe.to("cuda") + pipe.to(torch_device) model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] - self.assertTrue(all(device == "cuda" for device in model_devices)) + self.assertTrue(all(device == torch_device for device in model_devices)) - output_cuda = pipe(**self.get_dummy_inputs("cuda")).frames[0] - self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) + output_device = pipe(**self.get_dummy_inputs(torch_device)).frames[0] + self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() diff --git a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py index 4e8693145077..739dfa1ec52f 100644 --- a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py +++ b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py @@ -329,7 +329,6 @@ def test_save_load_optional_components(self): def test_sequential_cpu_offload_forward_pass(self): pass - @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -342,12 +341,12 @@ def test_to_device(self): output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] # generator set to cpu self.assertTrue(np.isnan(output_cpu).sum() == 0) - pipe.to("cuda") + pipe.to(torch_device) model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] - self.assertTrue(all(device == "cuda" for device in model_devices)) + self.assertTrue(all(device == torch_device for device in model_devices)) - output_cuda = pipe(**self.get_dummy_inputs("cpu"))[0] # generator set to cpu - self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) + output_device = pipe(**self.get_dummy_inputs("cpu"))[0] # generator set to cpu + self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) @unittest.skip( reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor." From 3cb0f960266ef9532a42a1d728fa21662def9bf4 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Thu, 12 Sep 2024 23:01:51 -0700 Subject: [PATCH 18/51] add 1 --- tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py b/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py index dc0cf9826b62..0e567e813c15 100644 --- a/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py +++ b/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py @@ -72,7 +72,6 @@ def test_xformers_attention_forwardGenerator_pass(self): def test_save_load_optional_components(self): self._test_save_load_optional_components() - @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) From 314e76df641c9c09a5194dbfa87fb892483afb16 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Thu, 12 Sep 2024 23:06:28 -0700 Subject: [PATCH 19/51] add more --- tests/pipelines/deepfloyd_if/test_if_inpainting.py | 1 - .../pipelines/deepfloyd_if/test_if_inpainting_superresolution.py | 1 - tests/pipelines/deepfloyd_if/test_if_superresolution.py | 1 - 3 files changed, 3 deletions(-) diff --git a/tests/pipelines/deepfloyd_if/test_if_inpainting.py b/tests/pipelines/deepfloyd_if/test_if_inpainting.py index df0cecd8c307..8e8b33dbd433 100644 --- a/tests/pipelines/deepfloyd_if/test_if_inpainting.py +++ b/tests/pipelines/deepfloyd_if/test_if_inpainting.py @@ -72,7 +72,6 @@ def test_xformers_attention_forwardGenerator_pass(self): def test_save_load_optional_components(self): self._test_save_load_optional_components() - @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) diff --git a/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py b/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py index 2e9f64773289..9cde7d7af868 100644 --- a/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py +++ b/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py @@ -74,7 +74,6 @@ def test_xformers_attention_forwardGenerator_pass(self): def test_save_load_optional_components(self): self._test_save_load_optional_components() - @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) diff --git a/tests/pipelines/deepfloyd_if/test_if_superresolution.py b/tests/pipelines/deepfloyd_if/test_if_superresolution.py index 2e3c8c6e0e15..ad28faf7e8ff 100644 --- a/tests/pipelines/deepfloyd_if/test_if_superresolution.py +++ b/tests/pipelines/deepfloyd_if/test_if_superresolution.py @@ -67,7 +67,6 @@ def test_xformers_attention_forwardGenerator_pass(self): def test_save_load_optional_components(self): self._test_save_load_optional_components() - @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) From 65c95e99ee33555a619011650e99efeddd399329 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Thu, 12 Sep 2024 23:39:48 -0700 Subject: [PATCH 20/51] add more cases --- tests/pipelines/amused/test_amused.py | 4 ++-- tests/pipelines/amused/test_amused_img2img.py | 4 ++-- tests/pipelines/amused/test_amused_inpaint.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/pipelines/amused/test_amused.py b/tests/pipelines/amused/test_amused.py index 9a9e2551d642..ca6cdaad4a31 100644 --- a/tests/pipelines/amused/test_amused.py +++ b/tests/pipelines/amused/test_amused.py @@ -21,7 +21,7 @@ from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import AmusedPipeline, AmusedScheduler, UVit2DModel, VQModel -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_accelerator, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin @@ -130,7 +130,7 @@ def test_inference_batch_single_identical(self): @slow -@require_torch_gpu +@require_torch_accelerator class AmusedPipelineSlowTests(unittest.TestCase): def test_amused_256(self): pipe = AmusedPipeline.from_pretrained("amused/amused-256") diff --git a/tests/pipelines/amused/test_amused_img2img.py b/tests/pipelines/amused/test_amused_img2img.py index 24bc34d330e9..82274608b2e1 100644 --- a/tests/pipelines/amused/test_amused_img2img.py +++ b/tests/pipelines/amused/test_amused_img2img.py @@ -22,7 +22,7 @@ from diffusers import AmusedImg2ImgPipeline, AmusedScheduler, UVit2DModel, VQModel from diffusers.utils import load_image -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_accelerator, slow, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin @@ -134,7 +134,7 @@ def test_inference_batch_single_identical(self): @slow -@require_torch_gpu +@require_torch_accelerator class AmusedImg2ImgPipelineSlowTests(unittest.TestCase): def test_amused_256(self): pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-256") diff --git a/tests/pipelines/amused/test_amused_inpaint.py b/tests/pipelines/amused/test_amused_inpaint.py index d0c1ed09c706..245288bbc904 100644 --- a/tests/pipelines/amused/test_amused_inpaint.py +++ b/tests/pipelines/amused/test_amused_inpaint.py @@ -22,7 +22,7 @@ from diffusers import AmusedInpaintPipeline, AmusedScheduler, UVit2DModel, VQModel from diffusers.utils import load_image -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device +from diffusers.utils.testing_utils import enable_full_determinism, require_torch_accelerator, slow, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineTesterMixin @@ -138,7 +138,7 @@ def test_inference_batch_single_identical(self): @slow -@require_torch_gpu +@require_torch_accelerator class AmusedInpaintPipelineSlowTests(unittest.TestCase): def test_amused_256(self): pipe = AmusedInpaintPipeline.from_pretrained("amused/amused-256") From 2882ee419ccd649bd5f0c3b59ca78eef86c6d747 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Thu, 12 Sep 2024 23:52:22 -0700 Subject: [PATCH 21/51] add case --- .../test_stable_diffusion_attend_and_excite.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py index 4c2b3a3c1e85..1caad9500b24 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py @@ -30,7 +30,7 @@ load_numpy, nightly, numpy_cosine_similarity_distance, - require_torch_gpu, + require_torch_accelerator, skip_mps, torch_device, ) @@ -205,7 +205,7 @@ def test_from_pipe_consistent_forward_pass_cpu_offload(self): super().test_from_pipe_consistent_forward_pass_cpu_offload(expected_max_diff=5e-3) -@require_torch_gpu +@require_torch_accelerator @nightly class StableDiffusionAttendAndExcitePipelineIntegrationTests(unittest.TestCase): # Attend and excite requires being able to run a backward pass at @@ -237,7 +237,7 @@ def test_attend_and_excite_fp16(self): pipe = StableDiffusionAttendAndExcitePipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) - pipe.to("cuda") + pipe.to(torch_device) prompt = "a painting of an elephant with glasses" token_indices = [5, 7] From f604bcebb4c5a02ed34555ea627d5e1a678ba01f Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Fri, 13 Sep 2024 00:04:41 -0700 Subject: [PATCH 22/51] enable --- tests/models/test_modeling_common.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/models/test_modeling_common.py b/tests/models/test_modeling_common.py index b56ac233ef29..ba66c3e50a5f 100644 --- a/tests/models/test_modeling_common.py +++ b/tests/models/test_modeling_common.py @@ -48,6 +48,7 @@ require_torch_2, require_torch_accelerator_with_training, require_torch_gpu, + require_torch_accelerator, require_torch_multi_gpu, run_test_in_subprocess, torch_device, @@ -405,7 +406,7 @@ def test_set_xformers_attn_processor_for_determinism(self): assert torch.allclose(output, output_3, atol=self.base_precision) assert torch.allclose(output_2, output_3, atol=self.base_precision) - @require_torch_gpu + @require_torch_accelerator def test_set_attn_processor_for_determinism(self): if self.uses_custom_attn_processor: return From 0c66254c77195f3e34a7bedcdcd5b1231a549a6b Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Fri, 13 Sep 2024 00:26:45 -0700 Subject: [PATCH 23/51] add more --- tests/models/test_modeling_common.py | 6 +++--- tests/single_file/test_model_controlnet_single_file.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/models/test_modeling_common.py b/tests/models/test_modeling_common.py index ba66c3e50a5f..37a653100fae 100644 --- a/tests/models/test_modeling_common.py +++ b/tests/models/test_modeling_common.py @@ -876,7 +876,7 @@ def test_model_parallelism(self): self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) - @require_torch_gpu + @require_torch_accelerator def test_sharded_checkpoints(self): torch.manual_seed(0) config, inputs_dict = self.prepare_init_args_and_inputs_for_common() @@ -908,7 +908,7 @@ def test_sharded_checkpoints(self): self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) - @require_torch_gpu + @require_torch_accelerator def test_sharded_checkpoints_with_variant(self): torch.manual_seed(0) config, inputs_dict = self.prepare_init_args_and_inputs_for_common() @@ -945,7 +945,7 @@ def test_sharded_checkpoints_with_variant(self): self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) - @require_torch_gpu + @require_torch_accelerator def test_sharded_checkpoints_device_map(self): config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() diff --git a/tests/single_file/test_model_controlnet_single_file.py b/tests/single_file/test_model_controlnet_single_file.py index 1d5b790ebb4a..43fca2f72b1b 100644 --- a/tests/single_file/test_model_controlnet_single_file.py +++ b/tests/single_file/test_model_controlnet_single_file.py @@ -23,7 +23,7 @@ ) from diffusers.utils.testing_utils import ( enable_full_determinism, - require_torch_gpu, + require_torch_accelerator, slow, ) @@ -32,7 +32,7 @@ @slow -@require_torch_gpu +@require_torch_accelerator class ControlNetModelSingleFileTests(unittest.TestCase): model_class = ControlNetModel ckpt_path = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" From c699d7a7e0c63c22cf027f7414c18d1ede45da7d Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Fri, 13 Sep 2024 00:28:22 -0700 Subject: [PATCH 24/51] add more --- tests/single_file/test_model_vae_single_file.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/single_file/test_model_vae_single_file.py b/tests/single_file/test_model_vae_single_file.py index 63f2bb757472..7893e5dc477c 100644 --- a/tests/single_file/test_model_vae_single_file.py +++ b/tests/single_file/test_model_vae_single_file.py @@ -25,7 +25,7 @@ enable_full_determinism, load_hf_numpy, numpy_cosine_similarity_distance, - require_torch_gpu, + require_torch_accelerator, slow, torch_device, ) @@ -35,7 +35,7 @@ @slow -@require_torch_gpu +@require_torch_accelerator class AutoencoderKLSingleFileTests(unittest.TestCase): model_class = AutoencoderKL ckpt_path = ( From 961a8e5b10b43be44ca1931f611e077c8ebce9da Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Fri, 13 Sep 2024 00:34:19 -0700 Subject: [PATCH 25/51] add more --- tests/models/test_modeling_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/models/test_modeling_common.py b/tests/models/test_modeling_common.py index 37a653100fae..aec842d732f6 100644 --- a/tests/models/test_modeling_common.py +++ b/tests/models/test_modeling_common.py @@ -753,7 +753,7 @@ def test_deprecated_kwargs(self): " from `_deprecated_kwargs = []`" ) - @require_torch_gpu + @require_torch_accelerator def test_cpu_offload(self): config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() From 266f5f9fc90557f884086e225b56937afc392ca4 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Fri, 13 Sep 2024 00:49:43 -0700 Subject: [PATCH 26/51] enbale more --- tests/models/test_modeling_common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/models/test_modeling_common.py b/tests/models/test_modeling_common.py index aec842d732f6..7fc3027a5cf0 100644 --- a/tests/models/test_modeling_common.py +++ b/tests/models/test_modeling_common.py @@ -783,7 +783,7 @@ def test_cpu_offload(self): self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) - @require_torch_gpu + @require_torch_accelerator def test_disk_offload_without_safetensors(self): config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() @@ -817,7 +817,7 @@ def test_disk_offload_without_safetensors(self): self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) - @require_torch_gpu + @require_torch_accelerator def test_disk_offload_with_safetensors(self): config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() From 405bc169917b6e86be41dc5bc9e2ac2cd908e74c Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Fri, 13 Sep 2024 01:37:01 -0700 Subject: [PATCH 27/51] add more --- .../test_stable_diffusion_img2img_single_file.py | 6 +++--- .../test_stable_diffusion_inpaint_single_file.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/single_file/test_stable_diffusion_img2img_single_file.py b/tests/single_file/test_stable_diffusion_img2img_single_file.py index cbb5e9c3ee0e..eba72b9f5709 100644 --- a/tests/single_file/test_stable_diffusion_img2img_single_file.py +++ b/tests/single_file/test_stable_diffusion_img2img_single_file.py @@ -9,7 +9,7 @@ from diffusers.utils import load_image from diffusers.utils.testing_utils import ( enable_full_determinism, - require_torch_gpu, + require_torch_accelerator, slow, ) @@ -20,7 +20,7 @@ @slow -@require_torch_gpu +@require_torch_accelerator class StableDiffusionImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): pipeline_class = StableDiffusionImg2ImgPipeline ckpt_path = "https://huggingface.co/Lykon/DreamShaper/blob/main/DreamShaper_8_pruned.safetensors" @@ -61,7 +61,7 @@ def test_single_file_format_inference_is_same_as_pretrained(self): @slow -@require_torch_gpu +@require_torch_accelerator class StableDiffusion21Img2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): pipeline_class = StableDiffusionImg2ImgPipeline ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-ema-pruned.safetensors" diff --git a/tests/single_file/test_stable_diffusion_inpaint_single_file.py b/tests/single_file/test_stable_diffusion_inpaint_single_file.py index 3e133c6ea923..ae3fb5e37e32 100644 --- a/tests/single_file/test_stable_diffusion_inpaint_single_file.py +++ b/tests/single_file/test_stable_diffusion_inpaint_single_file.py @@ -9,7 +9,7 @@ from diffusers.utils import load_image from diffusers.utils.testing_utils import ( enable_full_determinism, - require_torch_gpu, + require_torch_accelerator, slow, ) @@ -20,7 +20,7 @@ @slow -@require_torch_gpu +@require_torch_accelerator class StableDiffusionInpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): pipeline_class = StableDiffusionInpaintPipeline ckpt_path = "https://huggingface.co/Lykon/DreamShaper/blob/main/DreamShaper_8_INPAINTING.inpainting.safetensors" @@ -78,7 +78,7 @@ def test_single_file_components_with_original_config_local_files_only(self): @slow -@require_torch_gpu +@require_torch_accelerator class StableDiffusion21InpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): pipeline_class = StableDiffusionInpaintPipeline ckpt_path = ( From 48b67d6b68a8229f571407d43fb9eaee830e0611 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Fri, 13 Sep 2024 01:46:46 -0700 Subject: [PATCH 28/51] update code --- .../pipelines/animatediff/test_animatediff.py | 1 + .../test_latent_diffusion_superresolution.py | 1 + tests/pipelines/test_pipelines_common.py | 21 ++++++++++--------- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/tests/pipelines/animatediff/test_animatediff.py b/tests/pipelines/animatediff/test_animatediff.py index c387af4cb63d..c767291eabaa 100644 --- a/tests/pipelines/animatediff/test_animatediff.py +++ b/tests/pipelines/animatediff/test_animatediff.py @@ -272,6 +272,7 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff + @unittest.skipIf(torch_device = "cpu", reason="Hardware Accelerator and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py index 304ef1e495f8..829ea327f94f 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py @@ -93,6 +93,7 @@ def test_inference_superresolution(self): assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + @unittest.skipIf(torch_device = "cpu", "This test requires a hardware accelerator") def test_inference_superresolution_fp16(self): unet = self.dummy_uncond_unet scheduler = DDIMScheduler() diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 778a45277885..73cac2c54495 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -771,8 +771,8 @@ def test_from_pipe_consistent_forward_pass(self, expected_max_diff=1e-3): ), "`from_pipe` changed the attention processor in original pipeline." @unittest.skipIf( - not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), - reason="CPU offload is only available with `accelerate v0.14.0` or higher", + torch_device = "cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), + reason="CPU offload is only available with hardware accelerator and `accelerate v0.14.0` or higher", ) def test_from_pipe_consistent_forward_pass_cpu_offload(self, expected_max_diff=1e-3): components = self.get_dummy_components() @@ -1317,6 +1317,7 @@ def test_save_load_optional_components(self, expected_max_difference=1e-4): max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) + @unittest.skipIf(torch_device = "cpu", reason="Hardware accelerator and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -1391,8 +1392,8 @@ def _test_attention_slicing_forward_pass( assert_mean_pixel_difference(to_np(output_with_slicing2[0]), to_np(output_without_slicing[0])) @unittest.skipIf( - not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), - reason="CPU offload is only available with `accelerate v0.14.0` or higher", + torch_device = "cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), + reason="CPU offload is only available with hardware accelerator and `accelerate v0.14.0` or higher", ) def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): import accelerate @@ -1454,8 +1455,8 @@ def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): ) @unittest.skipIf( - not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), - reason="CPU offload is only available with `accelerate v0.17.0` or higher", + torch_device = "cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), + reason="CPU offload is only available with hardware accelerator and `accelerate v0.17.0` or higher", ) def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): import accelerate @@ -1511,8 +1512,8 @@ def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): ) @unittest.skipIf( - not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), - reason="CPU offload is only available with `accelerate v0.17.0` or higher", + torch_device = "cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), + reason="CPU offload is only available with hardware accelerator and `accelerate v0.17.0` or higher", ) def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4): import accelerate @@ -1568,8 +1569,8 @@ def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4): ) @unittest.skipIf( - not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), - reason="CPU offload is only available with `accelerate v0.14.0` or higher", + torch_device = "cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), + reason="CPU offload is only available with hardware accelerator and `accelerate v0.14.0` or higher", ) def test_sequential_offload_forward_pass_twice(self, expected_max_diff=2e-4): import accelerate From 1117427a04a1f89b5b85af603385e5eb2d6b01f7 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Fri, 13 Sep 2024 02:10:41 -0700 Subject: [PATCH 29/51] update test marker --- src/diffusers/utils/testing_utils.py | 8 ++++++++ tests/pipelines/animatediff/test_animatediff.py | 10 ++++++++-- .../test_latent_diffusion_superresolution.py | 3 ++- tests/pipelines/test_pipelines_common.py | 12 ++++++------ 4 files changed, 24 insertions(+), 9 deletions(-) diff --git a/src/diffusers/utils/testing_utils.py b/src/diffusers/utils/testing_utils.py index be3e9983c80f..53a10f9ea153 100644 --- a/src/diffusers/utils/testing_utils.py +++ b/src/diffusers/utils/testing_utils.py @@ -337,6 +337,14 @@ def require_note_seq(test_case): return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case) +def require_non_cpu(test_case): + """ + Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no + hardware accelerator available. + """ + return unittest.skipUnless(torch_device != "cpu", "test requires a GPU")(test_case) + + def require_torchsde(test_case): """ Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed. diff --git a/tests/pipelines/animatediff/test_animatediff.py b/tests/pipelines/animatediff/test_animatediff.py index c767291eabaa..f3a795e1bf43 100644 --- a/tests/pipelines/animatediff/test_animatediff.py +++ b/tests/pipelines/animatediff/test_animatediff.py @@ -19,7 +19,13 @@ ) from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import is_xformers_available, logging -from diffusers.utils.testing_utils import numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device +from diffusers.utils.testing_utils import ( + numpy_cosine_similarity_distance, + require_non_cpu, + require_torch_gpu, + slow, + torch_device, +) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( @@ -272,7 +278,7 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff - @unittest.skipIf(torch_device = "cpu", reason="Hardware Accelerator and CPU are required to switch devices") + @require_non_cpu def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py index 829ea327f94f..fc4bf98b9135 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py @@ -26,6 +26,7 @@ floats_tensor, load_image, nightly, + require_non_cpu, require_torch, torch_device, ) @@ -93,7 +94,7 @@ def test_inference_superresolution(self): assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - @unittest.skipIf(torch_device = "cpu", "This test requires a hardware accelerator") + @require_non_cpu def test_inference_superresolution_fp16(self): unet = self.dummy_uncond_unet scheduler = DDIMScheduler() diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 73cac2c54495..7d725ad442d3 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -771,7 +771,7 @@ def test_from_pipe_consistent_forward_pass(self, expected_max_diff=1e-3): ), "`from_pipe` changed the attention processor in original pipeline." @unittest.skipIf( - torch_device = "cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), + torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), reason="CPU offload is only available with hardware accelerator and `accelerate v0.14.0` or higher", ) def test_from_pipe_consistent_forward_pass_cpu_offload(self, expected_max_diff=1e-3): @@ -1317,7 +1317,7 @@ def test_save_load_optional_components(self, expected_max_difference=1e-4): max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) - @unittest.skipIf(torch_device = "cpu", reason="Hardware accelerator and CPU are required to switch devices") + @unittest.skipIf(torch_device="cpu", reason="Hardware accelerator and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -1392,7 +1392,7 @@ def _test_attention_slicing_forward_pass( assert_mean_pixel_difference(to_np(output_with_slicing2[0]), to_np(output_without_slicing[0])) @unittest.skipIf( - torch_device = "cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), + torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), reason="CPU offload is only available with hardware accelerator and `accelerate v0.14.0` or higher", ) def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): @@ -1455,7 +1455,7 @@ def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): ) @unittest.skipIf( - torch_device = "cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), + torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), reason="CPU offload is only available with hardware accelerator and `accelerate v0.17.0` or higher", ) def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): @@ -1512,7 +1512,7 @@ def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): ) @unittest.skipIf( - torch_device = "cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), + torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), reason="CPU offload is only available with hardware accelerator and `accelerate v0.17.0` or higher", ) def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4): @@ -1569,7 +1569,7 @@ def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4): ) @unittest.skipIf( - torch_device = "cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), + torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), reason="CPU offload is only available with hardware accelerator and `accelerate v0.14.0` or higher", ) def test_sequential_offload_forward_pass_twice(self, expected_max_diff=2e-4): From 88d289a144821d02b45582468c4e710707bac7eb Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Fri, 13 Sep 2024 02:18:47 -0700 Subject: [PATCH 30/51] add skip back --- tests/pipelines/test_pipelines_common.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 7d725ad442d3..b39802320204 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -1201,6 +1201,7 @@ def test_components_function(self): self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) + @unittest.skipIf(torch_device="cpu", reason="float16 requires a hardware accelerator") def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -1237,6 +1238,7 @@ def test_float16_inference(self, expected_max_diff=5e-2): max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") + @unittest.skipIf(torch_device="cpu", reason="float16 requires a hardware accelerator") def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): From b534c50aafe90f4530a127fbcd375f8a7f654a68 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Fri, 13 Sep 2024 02:24:26 -0700 Subject: [PATCH 31/51] update comment --- src/diffusers/utils/testing_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/utils/testing_utils.py b/src/diffusers/utils/testing_utils.py index 53a10f9ea153..74bba64e244e 100644 --- a/src/diffusers/utils/testing_utils.py +++ b/src/diffusers/utils/testing_utils.py @@ -342,7 +342,7 @@ def require_non_cpu(test_case): Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no hardware accelerator available. """ - return unittest.skipUnless(torch_device != "cpu", "test requires a GPU")(test_case) + return unittest.skipUnless(torch_device != "cpu", "test requires a hardware accelerator")(test_case) def require_torchsde(test_case): From ab4bc3a4e1addf36a3a0da10f815d3e63e0d010f Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Fri, 13 Sep 2024 19:45:33 -0700 Subject: [PATCH 32/51] remove single files --- tests/single_file/single_file_testing_utils.py | 4 ++-- tests/single_file/test_model_controlnet_single_file.py | 4 ++-- .../test_stable_diffusion_img2img_single_file.py | 6 +++--- .../test_stable_diffusion_inpaint_single_file.py | 6 +++--- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/single_file/single_file_testing_utils.py b/tests/single_file/single_file_testing_utils.py index d4f6ec994231..9b89578c5a8c 100644 --- a/tests/single_file/single_file_testing_utils.py +++ b/tests/single_file/single_file_testing_utils.py @@ -156,14 +156,14 @@ def test_single_file_components_with_original_config_local_files_only( def test_single_file_format_inference_is_same_as_pretrained(self, expected_max_diff=1e-4): sf_pipe = self.pipeline_class.from_single_file(self.ckpt_path, safety_checker=None) sf_pipe.unet.set_attn_processor(AttnProcessor()) - sf_pipe.enable_model_cpu_offload(device=torch_device) + sf_pipe.enable_model_cpu_offload() inputs = self.get_inputs(torch_device) image_single_file = sf_pipe(**inputs).images[0] pipe = self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None) pipe.unet.set_attn_processor(AttnProcessor()) - pipe.enable_model_cpu_offload(device=torch_device) + pipe.enable_model_cpu_offload() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images[0] diff --git a/tests/single_file/test_model_controlnet_single_file.py b/tests/single_file/test_model_controlnet_single_file.py index 43fca2f72b1b..1d5b790ebb4a 100644 --- a/tests/single_file/test_model_controlnet_single_file.py +++ b/tests/single_file/test_model_controlnet_single_file.py @@ -23,7 +23,7 @@ ) from diffusers.utils.testing_utils import ( enable_full_determinism, - require_torch_accelerator, + require_torch_gpu, slow, ) @@ -32,7 +32,7 @@ @slow -@require_torch_accelerator +@require_torch_gpu class ControlNetModelSingleFileTests(unittest.TestCase): model_class = ControlNetModel ckpt_path = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" diff --git a/tests/single_file/test_stable_diffusion_img2img_single_file.py b/tests/single_file/test_stable_diffusion_img2img_single_file.py index 2ff81f55163a..fd99c4dede2f 100644 --- a/tests/single_file/test_stable_diffusion_img2img_single_file.py +++ b/tests/single_file/test_stable_diffusion_img2img_single_file.py @@ -9,7 +9,7 @@ from diffusers.utils import load_image from diffusers.utils.testing_utils import ( enable_full_determinism, - require_torch_accelerator, + require_torch_gpu, slow, ) @@ -20,7 +20,7 @@ @slow -@require_torch_accelerator +@require_torch_gpu class StableDiffusionImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): pipeline_class = StableDiffusionImg2ImgPipeline ckpt_path = "https://huggingface.co/Jiali/stable-diffusion-1.5/blob/main/v1-5-pruned-emaonly.safetensors" @@ -61,7 +61,7 @@ def test_single_file_format_inference_is_same_as_pretrained(self): @slow -@require_torch_accelerator +@require_torch_gpu class StableDiffusion21Img2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): pipeline_class = StableDiffusionImg2ImgPipeline ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-ema-pruned.safetensors" diff --git a/tests/single_file/test_stable_diffusion_inpaint_single_file.py b/tests/single_file/test_stable_diffusion_inpaint_single_file.py index 2b6e50d6bcbf..ba9583639b98 100644 --- a/tests/single_file/test_stable_diffusion_inpaint_single_file.py +++ b/tests/single_file/test_stable_diffusion_inpaint_single_file.py @@ -9,7 +9,7 @@ from diffusers.utils import load_image from diffusers.utils.testing_utils import ( enable_full_determinism, - require_torch_accelerator, + require_torch_gpu, slow, ) @@ -20,7 +20,7 @@ @slow -@require_torch_accelerator +@require_torch_gpu class StableDiffusionInpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): pipeline_class = StableDiffusionInpaintPipeline ckpt_path = "https://huggingface.co/botp/stable-diffusion-v1-5-inpainting/blob/main/sd-v1-5-inpainting.ckpt" @@ -78,7 +78,7 @@ def test_single_file_components_with_original_config_local_files_only(self): @slow -@require_torch_accelerator +@require_torch_gpu class StableDiffusion21InpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): pipeline_class = StableDiffusionInpaintPipeline ckpt_path = ( From 769d7138a1ba47a411682e2ef8552eecfcdaeedf Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Fri, 13 Sep 2024 19:51:15 -0700 Subject: [PATCH 33/51] remove --- tests/single_file/test_model_vae_single_file.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/single_file/test_model_vae_single_file.py b/tests/single_file/test_model_vae_single_file.py index 7893e5dc477c..63f2bb757472 100644 --- a/tests/single_file/test_model_vae_single_file.py +++ b/tests/single_file/test_model_vae_single_file.py @@ -25,7 +25,7 @@ enable_full_determinism, load_hf_numpy, numpy_cosine_similarity_distance, - require_torch_accelerator, + require_torch_gpu, slow, torch_device, ) @@ -35,7 +35,7 @@ @slow -@require_torch_accelerator +@require_torch_gpu class AutoencoderKLSingleFileTests(unittest.TestCase): model_class = AutoencoderKL ckpt_path = ( From daeb966c66c0071d33ac780749cdfd9fbf30ecd4 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Fri, 13 Sep 2024 20:18:21 -0700 Subject: [PATCH 34/51] style --- tests/models/test_modeling_common.py | 3 +-- tests/pipelines/amused/test_amused.py | 1 + tests/pipelines/amused/test_amused_img2img.py | 1 + tests/pipelines/amused/test_amused_inpaint.py | 1 + .../animatediff/test_animatediff_controlnet.py | 3 ++- tests/pipelines/animatediff/test_animatediff_sdxl.py | 3 ++- .../animatediff/test_animatediff_sparsectrl.py | 3 ++- .../animatediff/test_animatediff_video2video.py | 3 ++- tests/pipelines/controlnet_xs/test_controlnetxs.py | 2 ++ tests/pipelines/deepfloyd_if/test_if.py | 3 ++- tests/pipelines/deepfloyd_if/test_if_img2img.py | 12 +++++++++++- .../deepfloyd_if/test_if_img2img_superresolution.py | 11 ++++++++++- tests/pipelines/deepfloyd_if/test_if_inpainting.py | 11 ++++++++++- .../test_if_inpainting_superresolution.py | 11 ++++++++++- .../deepfloyd_if/test_if_superresolution.py | 11 ++++++++++- tests/pipelines/pag/test_pag_animatediff.py | 3 ++- tests/pipelines/pia/test_pia.py | 3 ++- .../test_semantic_diffusion.py | 2 ++ .../test_stable_diffusion_depth.py | 2 ++ .../test_stable_diffusion_upscale.py | 2 ++ .../test_stable_diffusion_v_pred.py | 2 ++ .../stable_diffusion_safe/test_safe_diffusion.py | 3 ++- .../test_stable_video_diffusion.py | 3 +++ .../test_text_to_video_zero_sdxl.py | 11 ++++++++++- 24 files changed, 94 insertions(+), 16 deletions(-) diff --git a/tests/models/test_modeling_common.py b/tests/models/test_modeling_common.py index 7fc3027a5cf0..f67adcd7a6d1 100644 --- a/tests/models/test_modeling_common.py +++ b/tests/models/test_modeling_common.py @@ -46,9 +46,8 @@ get_python_version, is_torch_compile, require_torch_2, - require_torch_accelerator_with_training, - require_torch_gpu, require_torch_accelerator, + require_torch_accelerator_with_training, require_torch_multi_gpu, run_test_in_subprocess, torch_device, diff --git a/tests/pipelines/amused/test_amused.py b/tests/pipelines/amused/test_amused.py index aad4ce5df2a2..f28d8708d309 100644 --- a/tests/pipelines/amused/test_amused.py +++ b/tests/pipelines/amused/test_amused.py @@ -26,6 +26,7 @@ slow, torch_device, ) + from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/amused/test_amused_img2img.py b/tests/pipelines/amused/test_amused_img2img.py index 5bfecd9fe67d..2699bbe7f56f 100644 --- a/tests/pipelines/amused/test_amused_img2img.py +++ b/tests/pipelines/amused/test_amused_img2img.py @@ -27,6 +27,7 @@ slow, torch_device, ) + from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/amused/test_amused_inpaint.py b/tests/pipelines/amused/test_amused_inpaint.py index 136bd5f724d5..645379a7eab1 100644 --- a/tests/pipelines/amused/test_amused_inpaint.py +++ b/tests/pipelines/amused/test_amused_inpaint.py @@ -27,6 +27,7 @@ slow, torch_device, ) + from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineTesterMixin diff --git a/tests/pipelines/animatediff/test_animatediff_controlnet.py b/tests/pipelines/animatediff/test_animatediff_controlnet.py index eaf3d0b9805f..f5d0006c6898 100644 --- a/tests/pipelines/animatediff/test_animatediff_controlnet.py +++ b/tests/pipelines/animatediff/test_animatediff_controlnet.py @@ -21,7 +21,7 @@ from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import torch_device +from diffusers.utils.testing_utils import require_non_cpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( @@ -281,6 +281,7 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff + @require_non_cpu def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/animatediff/test_animatediff_sdxl.py b/tests/pipelines/animatediff/test_animatediff_sdxl.py index f8d5487b2b2f..a76562faf89a 100644 --- a/tests/pipelines/animatediff/test_animatediff_sdxl.py +++ b/tests/pipelines/animatediff/test_animatediff_sdxl.py @@ -14,7 +14,7 @@ UNetMotionModel, ) from diffusers.utils import is_xformers_available, logging -from diffusers.utils.testing_utils import torch_device +from diffusers.utils.testing_utils import require_non_cpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( @@ -212,6 +212,7 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff + @require_non_cpu def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/animatediff/test_animatediff_sparsectrl.py b/tests/pipelines/animatediff/test_animatediff_sparsectrl.py index d0241de17276..761c6588039e 100644 --- a/tests/pipelines/animatediff/test_animatediff_sparsectrl.py +++ b/tests/pipelines/animatediff/test_animatediff_sparsectrl.py @@ -20,7 +20,7 @@ ) from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import torch_device +from diffusers.utils.testing_utils import require_non_cpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( @@ -345,6 +345,7 @@ def test_inference_batch_single_identical_use_simplified_condition_embedding_tru max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff + @require_non_cpu def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/animatediff/test_animatediff_video2video.py b/tests/pipelines/animatediff/test_animatediff_video2video.py index 7b1adcf1ff0b..7a40e449f030 100644 --- a/tests/pipelines/animatediff/test_animatediff_video2video.py +++ b/tests/pipelines/animatediff/test_animatediff_video2video.py @@ -19,7 +19,7 @@ ) from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import is_xformers_available, logging -from diffusers.utils.testing_utils import torch_device +from diffusers.utils.testing_utils import require_non_cpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin @@ -258,6 +258,7 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff + @require_non_cpu def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/controlnet_xs/test_controlnetxs.py b/tests/pipelines/controlnet_xs/test_controlnetxs.py index b779650bc86e..7a759f781dbe 100644 --- a/tests/pipelines/controlnet_xs/test_controlnetxs.py +++ b/tests/pipelines/controlnet_xs/test_controlnetxs.py @@ -38,6 +38,7 @@ is_torch_compile, load_image, load_numpy, + require_non_cpu, require_torch_2, require_torch_gpu, run_test_in_subprocess, @@ -306,6 +307,7 @@ def test_multi_vae(self): assert out_vae_np.shape == out_np.shape + @require_non_cpu def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/deepfloyd_if/test_if.py b/tests/pipelines/deepfloyd_if/test_if.py index 139561918a66..54560de497af 100644 --- a/tests/pipelines/deepfloyd_if/test_if.py +++ b/tests/pipelines/deepfloyd_if/test_if.py @@ -23,7 +23,7 @@ ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import load_numpy, require_torch_gpu, skip_mps, slow, torch_device +from diffusers.utils.testing_utils import load_numpy, require_non_cpu, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference @@ -58,6 +58,7 @@ def get_dummy_inputs(self, device, seed=0): def test_save_load_optional_components(self): self._test_save_load_optional_components() + @require_non_cpu def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) diff --git a/tests/pipelines/deepfloyd_if/test_if_img2img.py b/tests/pipelines/deepfloyd_if/test_if_img2img.py index ce1167dc4158..aa30d491690e 100644 --- a/tests/pipelines/deepfloyd_if/test_if_img2img.py +++ b/tests/pipelines/deepfloyd_if/test_if_img2img.py @@ -22,7 +22,15 @@ from diffusers import IFImg2ImgPipeline from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device +from diffusers.utils.testing_utils import ( + floats_tensor, + load_numpy, + require_non_cpu, + require_torch_gpu, + skip_mps, + slow, + torch_device, +) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, @@ -70,10 +78,12 @@ def test_save_load_optional_components(self): def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) + @require_non_cpu def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) + @require_non_cpu def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) diff --git a/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py b/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py index 0e567e813c15..a8b8f9524720 100644 --- a/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py +++ b/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py @@ -22,7 +22,15 @@ from diffusers import IFImg2ImgSuperResolutionPipeline from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device +from diffusers.utils.testing_utils import ( + floats_tensor, + load_numpy, + require_non_cpu, + require_torch_gpu, + skip_mps, + slow, + torch_device, +) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, @@ -72,6 +80,7 @@ def test_xformers_attention_forwardGenerator_pass(self): def test_save_load_optional_components(self): self._test_save_load_optional_components() + @require_non_cpu def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) diff --git a/tests/pipelines/deepfloyd_if/test_if_inpainting.py b/tests/pipelines/deepfloyd_if/test_if_inpainting.py index 8e8b33dbd433..677b644a5db6 100644 --- a/tests/pipelines/deepfloyd_if/test_if_inpainting.py +++ b/tests/pipelines/deepfloyd_if/test_if_inpainting.py @@ -22,7 +22,15 @@ from diffusers import IFInpaintingPipeline from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device +from diffusers.utils.testing_utils import ( + floats_tensor, + load_numpy, + require_non_cpu, + require_torch_gpu, + skip_mps, + slow, + torch_device, +) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, @@ -72,6 +80,7 @@ def test_xformers_attention_forwardGenerator_pass(self): def test_save_load_optional_components(self): self._test_save_load_optional_components() + @require_non_cpu def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) diff --git a/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py b/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py index 9cde7d7af868..ffe64c682761 100644 --- a/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py +++ b/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py @@ -22,7 +22,15 @@ from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device +from diffusers.utils.testing_utils import ( + floats_tensor, + load_numpy, + require_non_cpu, + require_torch_gpu, + skip_mps, + slow, + torch_device, +) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, @@ -74,6 +82,7 @@ def test_xformers_attention_forwardGenerator_pass(self): def test_save_load_optional_components(self): self._test_save_load_optional_components() + @require_non_cpu def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) diff --git a/tests/pipelines/deepfloyd_if/test_if_superresolution.py b/tests/pipelines/deepfloyd_if/test_if_superresolution.py index ad28faf7e8ff..7ee53a851db5 100644 --- a/tests/pipelines/deepfloyd_if/test_if_superresolution.py +++ b/tests/pipelines/deepfloyd_if/test_if_superresolution.py @@ -22,7 +22,15 @@ from diffusers import IFSuperResolutionPipeline from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device +from diffusers.utils.testing_utils import ( + floats_tensor, + load_numpy, + require_non_cpu, + require_torch_gpu, + skip_mps, + slow, + torch_device, +) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference @@ -67,6 +75,7 @@ def test_xformers_attention_forwardGenerator_pass(self): def test_save_load_optional_components(self): self._test_save_load_optional_components() + @require_non_cpu def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) diff --git a/tests/pipelines/pag/test_pag_animatediff.py b/tests/pipelines/pag/test_pag_animatediff.py index ccee2ed65134..02bbc85676be 100644 --- a/tests/pipelines/pag/test_pag_animatediff.py +++ b/tests/pipelines/pag/test_pag_animatediff.py @@ -19,7 +19,7 @@ ) from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import is_xformers_available -from diffusers.utils.testing_utils import torch_device +from diffusers.utils.testing_utils import require_non_cpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( @@ -218,6 +218,7 @@ def test_dict_tuple_outputs_equivalent(self): expected_slice = np.array([0.5295, 0.3947, 0.5300, 0.4864, 0.4518, 0.5315, 0.5440, 0.4775, 0.5538]) return super().test_dict_tuple_outputs_equivalent(expected_slice=expected_slice) + @require_non_cpu def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/pia/test_pia.py b/tests/pipelines/pia/test_pia.py index 1f8565664743..5f34807ad2da 100644 --- a/tests/pipelines/pia/test_pia.py +++ b/tests/pipelines/pia/test_pia.py @@ -18,7 +18,7 @@ UNetMotionModel, ) from diffusers.utils import is_xformers_available, logging -from diffusers.utils.testing_utils import floats_tensor, torch_device +from diffusers.utils.testing_utils import floats_tensor, require_non_cpu, torch_device from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin @@ -278,6 +278,7 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff + @require_non_cpu def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py b/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py index 0b0aecf55dd1..8a0ca62aedc3 100644 --- a/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py +++ b/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py @@ -28,6 +28,7 @@ enable_full_determinism, floats_tensor, nightly, + require_non_cpu, require_torch_gpu, torch_device, ) @@ -237,6 +238,7 @@ def test_semantic_diffusion_no_safety_checker(self): image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None + @require_non_cpu def test_semantic_diffusion_fp16(self): """Test that stable diffusion works with fp16""" unet = self.dummy_cond_unet diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py index 1ba19722e29e..31f9b339be95 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py @@ -43,6 +43,7 @@ load_image, load_numpy, nightly, + require_non_cpu, require_torch_gpu, skip_mps, slow, @@ -194,6 +195,7 @@ def test_save_load_local(self): max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 1e-4) + @require_non_cpu def test_save_load_float16(self): components = self.get_dummy_components() for name, module in components.items(): diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py index 66d1573d57ff..440b91b8558a 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py @@ -29,6 +29,7 @@ floats_tensor, load_image, load_numpy, + require_non_cpu, require_torch_gpu, slow, torch_device, @@ -289,6 +290,7 @@ def test_stable_diffusion_upscale_prompt_embeds(self): assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_prompt_embeds_slice.flatten() - expected_slice).max() < 1e-2 + @require_non_cpu def test_stable_diffusion_upscale_fp16(self): """Test that stable diffusion upscale works with fp16""" unet = self.dummy_cond_unet_upscale diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py index e8fd0eb4342b..982842e0c155 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py @@ -34,6 +34,7 @@ enable_full_determinism, load_numpy, numpy_cosine_similarity_distance, + require_non_cpu, require_torch_gpu, slow, torch_device, @@ -213,6 +214,7 @@ def test_stable_diffusion_v_pred_k_euler(self): assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + @require_non_cpu def test_stable_diffusion_v_pred_fp16(self): """Test that stable diffusion v-prediction works with fp16""" unet = self.dummy_cond_unet diff --git a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py index 646541a681e6..4a24ce9e6c07 100644 --- a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py +++ b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py @@ -24,7 +24,7 @@ from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline -from diffusers.utils.testing_utils import floats_tensor, nightly, require_torch_gpu, torch_device +from diffusers.utils.testing_utils import floats_tensor, nightly, require_non_cpu, require_torch_gpu, torch_device class SafeDiffusionPipelineFastTests(unittest.TestCase): @@ -228,6 +228,7 @@ def test_stable_diffusion_no_safety_checker(self): image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None + @require_non_cpu def test_stable_diffusion_fp16(self): """Test that stable diffusion works with fp16""" unet = self.dummy_cond_unet diff --git a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py index d7156ad1a635..be94cc73eade 100644 --- a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py +++ b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py @@ -25,6 +25,7 @@ enable_full_determinism, floats_tensor, numpy_cosine_similarity_distance, + require_non_cpu, require_torch_gpu, slow, torch_device, @@ -250,6 +251,7 @@ def test_float16_inference(self, expected_max_diff=5e-2): max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") + @require_non_cpu def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): @@ -365,6 +367,7 @@ def test_save_load_local(self, expected_max_difference=9e-4): max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) + @require_non_cpu def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py index 739dfa1ec52f..d5e2a546bcdc 100644 --- a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py +++ b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py @@ -24,7 +24,13 @@ from diffusers import AutoencoderKL, DDIMScheduler, TextToVideoZeroSDXLPipeline, UNet2DConditionModel from diffusers.utils.import_utils import is_accelerate_available, is_accelerate_version -from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device +from diffusers.utils.testing_utils import ( + enable_full_determinism, + nightly, + require_non_cpu, + require_torch_gpu, + torch_device, +) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineFromPipeTesterMixin, PipelineTesterMixin @@ -213,6 +219,7 @@ def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4): max_diff = np.abs(to_np(output) - to_np(output_tuple)).max() self.assertLess(max_diff, expected_max_difference) + @require_non_cpu def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() for name, module in components.items(): @@ -278,6 +285,7 @@ def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): def test_pipeline_call_signature(self): pass + @require_non_cpu def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): @@ -329,6 +337,7 @@ def test_save_load_optional_components(self): def test_sequential_cpu_offload_forward_pass(self): pass + @require_non_cpu def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) From 28a73aca3cd91c82c5a3c8deac661b5e03d7079e Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Fri, 13 Sep 2024 20:22:14 -0700 Subject: [PATCH 35/51] add --- tests/pipelines/animatediff/test_animatediff.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/pipelines/animatediff/test_animatediff.py b/tests/pipelines/animatediff/test_animatediff.py index f3a795e1bf43..a4b78fe10926 100644 --- a/tests/pipelines/animatediff/test_animatediff.py +++ b/tests/pipelines/animatediff/test_animatediff.py @@ -300,8 +300,8 @@ def test_to_device(self): ] self.assertTrue(all(device == torch_device for device in model_devices)) - output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0] - self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) + output_device = pipe(**self.get_dummy_inputs(torch_device))[0] + self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() From 3052847ca48a67bebace7f07ac1b9564da2c25e7 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Fri, 13 Sep 2024 20:24:00 -0700 Subject: [PATCH 36/51] revert --- tests/models/test_modeling_common.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/models/test_modeling_common.py b/tests/models/test_modeling_common.py index f67adcd7a6d1..08a0de7b3a13 100644 --- a/tests/models/test_modeling_common.py +++ b/tests/models/test_modeling_common.py @@ -46,8 +46,8 @@ get_python_version, is_torch_compile, require_torch_2, - require_torch_accelerator, require_torch_accelerator_with_training, + require_torch_gpu, require_torch_multi_gpu, run_test_in_subprocess, torch_device, @@ -405,7 +405,7 @@ def test_set_xformers_attn_processor_for_determinism(self): assert torch.allclose(output, output_3, atol=self.base_precision) assert torch.allclose(output_2, output_3, atol=self.base_precision) - @require_torch_accelerator + @require_torch_gpu def test_set_attn_processor_for_determinism(self): if self.uses_custom_attn_processor: return @@ -752,7 +752,7 @@ def test_deprecated_kwargs(self): " from `_deprecated_kwargs = []`" ) - @require_torch_accelerator + @require_torch_gpu def test_cpu_offload(self): config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() @@ -782,7 +782,7 @@ def test_cpu_offload(self): self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) - @require_torch_accelerator + @require_torch_gpu def test_disk_offload_without_safetensors(self): config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() @@ -816,7 +816,7 @@ def test_disk_offload_without_safetensors(self): self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) - @require_torch_accelerator + @require_torch_gpu def test_disk_offload_with_safetensors(self): config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() @@ -875,7 +875,7 @@ def test_model_parallelism(self): self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) - @require_torch_accelerator + @require_torch_gpu def test_sharded_checkpoints(self): torch.manual_seed(0) config, inputs_dict = self.prepare_init_args_and_inputs_for_common() @@ -907,7 +907,7 @@ def test_sharded_checkpoints(self): self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) - @require_torch_accelerator + @require_torch_gpu def test_sharded_checkpoints_with_variant(self): torch.manual_seed(0) config, inputs_dict = self.prepare_init_args_and_inputs_for_common() @@ -944,7 +944,7 @@ def test_sharded_checkpoints_with_variant(self): self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) - @require_torch_accelerator + @require_torch_gpu def test_sharded_checkpoints_device_map(self): config, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**config).eval() @@ -1066,4 +1066,4 @@ def test_push_to_hub_library_name(self): assert model_card.library_name == "diffusers" # Reset repo - delete_repo(self.repo_id, token=TOKEN) + delete_repo(self.repo_id, token=TOKEN) \ No newline at end of file From a5384ffb81f407e9667d01be47668d0b84134d49 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Fri, 13 Sep 2024 20:25:04 -0700 Subject: [PATCH 37/51] reformat --- tests/models/test_modeling_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/models/test_modeling_common.py b/tests/models/test_modeling_common.py index 08a0de7b3a13..b56ac233ef29 100644 --- a/tests/models/test_modeling_common.py +++ b/tests/models/test_modeling_common.py @@ -1066,4 +1066,4 @@ def test_push_to_hub_library_name(self): assert model_card.library_name == "diffusers" # Reset repo - delete_repo(self.repo_id, token=TOKEN) \ No newline at end of file + delete_repo(self.repo_id, token=TOKEN) From 34a0a7bdb79249c6935daf474932f37ef1586288 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Tue, 22 Oct 2024 23:21:03 -0700 Subject: [PATCH 38/51] update decorator --- tests/pipelines/test_pipelines_common.py | 40 ++++++++++-------------- 1 file changed, 16 insertions(+), 24 deletions(-) diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 4c25bea34ada..1ef2ca810f15 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -38,9 +38,11 @@ from diffusers.pipelines.pipeline_utils import StableDiffusionMixin from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import logging -from diffusers.utils.import_utils import is_accelerate_available, is_accelerate_version, is_xformers_available +from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( CaptureLogger, + require_accelerate_version_greater, + require_non_cpu, require_torch, skip_mps, torch_device, @@ -770,10 +772,8 @@ def test_from_pipe_consistent_forward_pass(self, expected_max_diff=1e-3): type(proc) == AttnProcessor for proc in component.attn_processors.values() ), "`from_pipe` changed the attention processor in original pipeline." - @unittest.skipIf( - torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), - reason="CPU offload is only available with hardware accelerator and `accelerate v0.14.0` or higher", - ) + @require_non_cpu + @require_accelerate_version_greater("0.14.0") def test_from_pipe_consistent_forward_pass_cpu_offload(self, expected_max_diff=1e-3): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -1201,7 +1201,7 @@ def test_components_function(self): self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) - @unittest.skipIf(torch_device="cpu", reason="float16 requires a hardware accelerator") + @require_non_cpu def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -1238,7 +1238,7 @@ def test_float16_inference(self, expected_max_diff=5e-2): max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") - @unittest.skipIf(torch_device="cpu", reason="float16 requires a hardware accelerator") + @require_non_cpu def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): @@ -1319,7 +1319,7 @@ def test_save_load_optional_components(self, expected_max_difference=1e-4): max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) - @unittest.skipIf(torch_device="cpu", reason="Hardware accelerator and CPU are required to switch devices") + @require_non_cpu def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -1393,10 +1393,8 @@ def _test_attention_slicing_forward_pass( assert_mean_pixel_difference(to_np(output_with_slicing1[0]), to_np(output_without_slicing[0])) assert_mean_pixel_difference(to_np(output_with_slicing2[0]), to_np(output_without_slicing[0])) - @unittest.skipIf( - torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), - reason="CPU offload is only available with hardware accelerator and `accelerate v0.14.0` or higher", - ) + @require_non_cpu + @require_accelerate_version_greater("0.14.0") def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): import accelerate @@ -1456,10 +1454,8 @@ def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", ) - @unittest.skipIf( - torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), - reason="CPU offload is only available with hardware accelerator and `accelerate v0.17.0` or higher", - ) + @require_non_cpu + @require_accelerate_version_greater("0.17.0") def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): import accelerate @@ -1513,10 +1509,8 @@ def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", ) - @unittest.skipIf( - torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), - reason="CPU offload is only available with hardware accelerator and `accelerate v0.17.0` or higher", - ) + @require_non_cpu + @require_accelerate_version_greater("0.17.0") def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4): import accelerate @@ -1570,10 +1564,8 @@ def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4): f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", ) - @unittest.skipIf( - torch_device="cpu" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), - reason="CPU offload is only available with hardware accelerator and `accelerate v0.14.0` or higher", - ) + @require_non_cpu + @require_accelerate_version_greater("0.14.0") def test_sequential_offload_forward_pass_twice(self, expected_max_diff=2e-4): import accelerate From bad2a3e98952ba110e7a489c1e59ff2c3f4f2f04 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Tue, 22 Oct 2024 23:49:09 -0700 Subject: [PATCH 39/51] update --- .../test_animatediff_video2video_controlnet.py | 4 ++-- .../stable_diffusion_2/test_stable_diffusion_depth.py | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py b/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py index 5e598e67ec11..833d75c18b74 100644 --- a/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py +++ b/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py @@ -20,7 +20,7 @@ ) from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import is_xformers_available, logging -from diffusers.utils.testing_utils import torch_device +from diffusers.utils.testing_utils import require_non_cpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin @@ -274,7 +274,7 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff - @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") + @require_non_cpu def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py index 31f9b339be95..2216a9425a0c 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py @@ -36,13 +36,13 @@ StableDiffusionDepth2ImgPipeline, UNet2DConditionModel, ) -from diffusers.utils import is_accelerate_available, is_accelerate_version from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, + require_accelerate_version_greater, require_non_cpu, require_torch_gpu, skip_mps, @@ -227,6 +227,7 @@ def test_save_load_float16(self): max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 2e-2, "The output of the fp16 pipeline changed after saving and loading.") + @require_non_cpu def test_float16_inference(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -246,10 +247,8 @@ def test_float16_inference(self): max_diff = np.abs(output - output_fp16).max() self.assertLess(max_diff, 1.3e-2, "The outputs of the fp16 and fp32 pipelines are too different.") - @unittest.skipIf( - not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), - reason="CPU offload is only available with `accelerate v0.14.0` or higher", - ) + @require_non_cpu + @require_accelerate_version_greater("0.14.0") def test_cpu_offload_forward_pass(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) From f157e9812be253d22ded1e9a247ab4a75876c431 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Wed, 23 Oct 2024 00:43:55 -0700 Subject: [PATCH 40/51] update --- .../test_animatediff_video2video_controlnet.py | 6 +++--- .../stable_video_diffusion/test_stable_video_diffusion.py | 7 +++---- .../test_text_to_video_zero_sdxl.py | 8 +++----- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py b/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py index 833d75c18b74..a81a5c4467d5 100644 --- a/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py +++ b/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py @@ -290,13 +290,13 @@ def test_to_device(self): output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) - pipe.to("cuda") + pipe.to(torch_device) model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] - self.assertTrue(all(device == "cuda" for device in model_devices)) + self.assertTrue(all(device == torch_device for device in model_devices)) - output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] + output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0] self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) def test_to_dtype(self): diff --git a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py index be94cc73eade..b3478cc5dc19 100644 --- a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py +++ b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py @@ -25,6 +25,7 @@ enable_full_determinism, floats_tensor, numpy_cosine_similarity_distance, + require_accelerate_version_greater, require_non_cpu, require_torch_gpu, slow, @@ -428,10 +429,8 @@ def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") - @unittest.skipIf( - not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), - reason="CPU offload is only available with CUDA and `accelerate v0.17.0` or higher", - ) + @require_non_cpu + @require_accelerate_version_greater("0.17.0") def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): generator_device = "cpu" components = self.get_dummy_components() diff --git a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py index d5e2a546bcdc..2dbb41a08765 100644 --- a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py +++ b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py @@ -23,10 +23,10 @@ from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, TextToVideoZeroSDXLPipeline, UNet2DConditionModel -from diffusers.utils.import_utils import is_accelerate_available, is_accelerate_version from diffusers.utils.testing_utils import ( enable_full_determinism, nightly, + require_accelerate_version_greater, require_non_cpu, require_torch_gpu, torch_device, @@ -261,10 +261,8 @@ def test_inference_batch_consistent(self): def test_inference_batch_single_identical(self): pass - @unittest.skipIf( - not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), - reason="CPU offload is only available with CUDA and `accelerate v0.17.0` or higher", - ) + @require_non_cpu + @require_accelerate_version_greater("0.17.0") def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) From 359754e2574109898036168a414eba7566ac155f Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Wed, 23 Oct 2024 00:50:29 -0700 Subject: [PATCH 41/51] update --- .../stable_video_diffusion/test_stable_video_diffusion.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py index b3478cc5dc19..e1560f10f0a6 100644 --- a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py +++ b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py @@ -18,7 +18,7 @@ StableVideoDiffusionPipeline, UNetSpatioTemporalConditionModel, ) -from diffusers.utils import is_accelerate_available, is_accelerate_version, load_image, logging +from diffusers.utils import load_image, logging from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( CaptureLogger, @@ -404,10 +404,8 @@ def test_to_dtype(self): model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) - @unittest.skipIf( - not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), - reason="CPU offload is only available with `accelerate v0.14.0` or higher", - ) + @require_non_cpu + @require_accelerate_version_greater("0.14.0") def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) From 7ef5dabbe816050e6b506030aee943edaf960f71 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 11 Nov 2024 09:43:07 +0800 Subject: [PATCH 42/51] Update tests/pipelines/deepfloyd_if/test_if.py Co-authored-by: Dhruv Nair --- tests/pipelines/deepfloyd_if/test_if.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/pipelines/deepfloyd_if/test_if.py b/tests/pipelines/deepfloyd_if/test_if.py index 54560de497af..f56ae7a2112f 100644 --- a/tests/pipelines/deepfloyd_if/test_if.py +++ b/tests/pipelines/deepfloyd_if/test_if.py @@ -58,7 +58,8 @@ def get_dummy_inputs(self, device, seed=0): def test_save_load_optional_components(self): self._test_save_load_optional_components() - @require_non_cpu + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) From 915c7416726a5a6eaa4ad02116137373efb99279 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 11 Nov 2024 09:43:26 +0800 Subject: [PATCH 43/51] Update src/diffusers/utils/testing_utils.py Co-authored-by: Dhruv Nair --- src/diffusers/utils/testing_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/utils/testing_utils.py b/src/diffusers/utils/testing_utils.py index a03765a53781..b3e381f7d3fb 100644 --- a/src/diffusers/utils/testing_utils.py +++ b/src/diffusers/utils/testing_utils.py @@ -373,7 +373,7 @@ def require_note_seq(test_case): return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case) -def require_non_cpu(test_case): +def require_accelerator(test_case): """ Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no hardware accelerator available. From e0507b503d852acbff1fe7ec5d0ce0712446ac3a Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 11 Nov 2024 09:43:36 +0800 Subject: [PATCH 44/51] Update tests/pipelines/animatediff/test_animatediff_controlnet.py Co-authored-by: Dhruv Nair --- tests/pipelines/animatediff/test_animatediff_controlnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pipelines/animatediff/test_animatediff_controlnet.py b/tests/pipelines/animatediff/test_animatediff_controlnet.py index f5d0006c6898..74552a9315be 100644 --- a/tests/pipelines/animatediff/test_animatediff_controlnet.py +++ b/tests/pipelines/animatediff/test_animatediff_controlnet.py @@ -281,7 +281,7 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff - @require_non_cpu + @require_accelerator def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) From 97f0e9eb71bb161f4a775a7dae4c725812c204b8 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 11 Nov 2024 09:43:51 +0800 Subject: [PATCH 45/51] Update tests/pipelines/animatediff/test_animatediff.py Co-authored-by: Dhruv Nair --- tests/pipelines/animatediff/test_animatediff.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pipelines/animatediff/test_animatediff.py b/tests/pipelines/animatediff/test_animatediff.py index a4b78fe10926..5e7831a24f3a 100644 --- a/tests/pipelines/animatediff/test_animatediff.py +++ b/tests/pipelines/animatediff/test_animatediff.py @@ -278,7 +278,7 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff - @require_non_cpu + @require_accelerator def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) From 451790f8e92750087e20734e4e7996e2ef303cf1 Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Mon, 11 Nov 2024 09:44:00 +0800 Subject: [PATCH 46/51] Update tests/pipelines/animatediff/test_animatediff_controlnet.py Co-authored-by: Dhruv Nair --- tests/pipelines/animatediff/test_animatediff_controlnet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pipelines/animatediff/test_animatediff_controlnet.py b/tests/pipelines/animatediff/test_animatediff_controlnet.py index 74552a9315be..6fcf6fe44fb7 100644 --- a/tests/pipelines/animatediff/test_animatediff_controlnet.py +++ b/tests/pipelines/animatediff/test_animatediff_controlnet.py @@ -21,7 +21,7 @@ from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import require_non_cpu, torch_device +from diffusers.utils.testing_utils import require_accelerator, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( From b671e25df65ddd234bad1964b96eca434646a44b Mon Sep 17 00:00:00 2001 From: "Lin, Fanli" Date: Sun, 10 Nov 2024 18:14:28 -0800 Subject: [PATCH 47/51] update float16 --- .../pipelines/animatediff/test_animatediff.py | 2 +- .../animatediff/test_animatediff_sdxl.py | 4 ++-- .../test_animatediff_sparsectrl.py | 4 ++-- .../test_animatediff_video2video.py | 4 ++-- ...test_animatediff_video2video_controlnet.py | 4 ++-- .../controlnet_xs/test_controlnetxs.py | 4 ++-- tests/pipelines/deepfloyd_if/test_if.py | 2 +- .../pipelines/deepfloyd_if/test_if_img2img.py | 8 +++++--- .../test_if_img2img_superresolution.py | 5 +++-- .../deepfloyd_if/test_if_inpainting.py | 5 +++-- .../test_if_inpainting_superresolution.py | 5 +++-- .../deepfloyd_if/test_if_superresolution.py | 5 +++-- .../test_latent_diffusion_superresolution.py | 4 ++-- tests/pipelines/pag/test_pag_animatediff.py | 4 ++-- tests/pipelines/pia/test_pia.py | 4 ++-- .../test_semantic_diffusion.py | 4 ++-- .../test_stable_diffusion_depth.py | 10 ++++++---- .../test_stable_diffusion_upscale.py | 4 ++-- .../test_stable_diffusion_v_pred.py | 4 ++-- .../test_safe_diffusion.py | 4 ++-- .../test_stable_video_diffusion.py | 12 ++++++----- tests/pipelines/test_pipelines_common.py | 20 ++++++++++--------- .../test_text_to_video_zero_sdxl.py | 13 ++++++------ 23 files changed, 74 insertions(+), 61 deletions(-) diff --git a/tests/pipelines/animatediff/test_animatediff.py b/tests/pipelines/animatediff/test_animatediff.py index 5e7831a24f3a..c382bb5b7f30 100644 --- a/tests/pipelines/animatediff/test_animatediff.py +++ b/tests/pipelines/animatediff/test_animatediff.py @@ -21,7 +21,7 @@ from diffusers.utils import is_xformers_available, logging from diffusers.utils.testing_utils import ( numpy_cosine_similarity_distance, - require_non_cpu, + require_accelerator, require_torch_gpu, slow, torch_device, diff --git a/tests/pipelines/animatediff/test_animatediff_sdxl.py b/tests/pipelines/animatediff/test_animatediff_sdxl.py index a76562faf89a..45fa6bfc5c6d 100644 --- a/tests/pipelines/animatediff/test_animatediff_sdxl.py +++ b/tests/pipelines/animatediff/test_animatediff_sdxl.py @@ -14,7 +14,7 @@ UNetMotionModel, ) from diffusers.utils import is_xformers_available, logging -from diffusers.utils.testing_utils import require_non_cpu, torch_device +from diffusers.utils.testing_utils import require_accelerator, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( @@ -212,7 +212,7 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff - @require_non_cpu + @require_accelerator def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/animatediff/test_animatediff_sparsectrl.py b/tests/pipelines/animatediff/test_animatediff_sparsectrl.py index 761c6588039e..21b59d0252b2 100644 --- a/tests/pipelines/animatediff/test_animatediff_sparsectrl.py +++ b/tests/pipelines/animatediff/test_animatediff_sparsectrl.py @@ -20,7 +20,7 @@ ) from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import require_non_cpu, torch_device +from diffusers.utils.testing_utils import require_accelerator, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( @@ -345,7 +345,7 @@ def test_inference_batch_single_identical_use_simplified_condition_embedding_tru max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff - @require_non_cpu + @require_accelerator def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/animatediff/test_animatediff_video2video.py b/tests/pipelines/animatediff/test_animatediff_video2video.py index 7a40e449f030..bb1cb9882c69 100644 --- a/tests/pipelines/animatediff/test_animatediff_video2video.py +++ b/tests/pipelines/animatediff/test_animatediff_video2video.py @@ -19,7 +19,7 @@ ) from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import is_xformers_available, logging -from diffusers.utils.testing_utils import require_non_cpu, torch_device +from diffusers.utils.testing_utils import require_accelerator, torch_device from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin @@ -258,7 +258,7 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff - @require_non_cpu + @require_accelerator def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py b/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py index a81a5c4467d5..5a4b507aff50 100644 --- a/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py +++ b/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py @@ -20,7 +20,7 @@ ) from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import is_xformers_available, logging -from diffusers.utils.testing_utils import require_non_cpu, torch_device +from diffusers.utils.testing_utils import require_accelerator, torch_device from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin @@ -274,7 +274,7 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff - @require_non_cpu + @require_accelerator def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/controlnet_xs/test_controlnetxs.py b/tests/pipelines/controlnet_xs/test_controlnetxs.py index 7a759f781dbe..007a2b0e46d7 100644 --- a/tests/pipelines/controlnet_xs/test_controlnetxs.py +++ b/tests/pipelines/controlnet_xs/test_controlnetxs.py @@ -38,7 +38,7 @@ is_torch_compile, load_image, load_numpy, - require_non_cpu, + require_accelerator, require_torch_2, require_torch_gpu, run_test_in_subprocess, @@ -307,7 +307,7 @@ def test_multi_vae(self): assert out_vae_np.shape == out_np.shape - @require_non_cpu + @require_accelerator def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/deepfloyd_if/test_if.py b/tests/pipelines/deepfloyd_if/test_if.py index f56ae7a2112f..9a767426193e 100644 --- a/tests/pipelines/deepfloyd_if/test_if.py +++ b/tests/pipelines/deepfloyd_if/test_if.py @@ -23,7 +23,7 @@ ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import load_numpy, require_non_cpu, require_torch_gpu, skip_mps, slow, torch_device +from diffusers.utils.testing_utils import load_numpy, require_accelerator, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference diff --git a/tests/pipelines/deepfloyd_if/test_if_img2img.py b/tests/pipelines/deepfloyd_if/test_if_img2img.py index aa30d491690e..26ac42831b8b 100644 --- a/tests/pipelines/deepfloyd_if/test_if_img2img.py +++ b/tests/pipelines/deepfloyd_if/test_if_img2img.py @@ -25,7 +25,7 @@ from diffusers.utils.testing_utils import ( floats_tensor, load_numpy, - require_non_cpu, + require_accelerator, require_torch_gpu, skip_mps, slow, @@ -78,12 +78,14 @@ def test_save_load_optional_components(self): def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) - @require_non_cpu + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) - @require_non_cpu + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) diff --git a/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py b/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py index a8b8f9524720..1d1244c96c33 100644 --- a/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py +++ b/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py @@ -25,7 +25,7 @@ from diffusers.utils.testing_utils import ( floats_tensor, load_numpy, - require_non_cpu, + require_accelerator, require_torch_gpu, skip_mps, slow, @@ -80,7 +80,8 @@ def test_xformers_attention_forwardGenerator_pass(self): def test_save_load_optional_components(self): self._test_save_load_optional_components() - @require_non_cpu + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) diff --git a/tests/pipelines/deepfloyd_if/test_if_inpainting.py b/tests/pipelines/deepfloyd_if/test_if_inpainting.py index 677b644a5db6..1c4f27403332 100644 --- a/tests/pipelines/deepfloyd_if/test_if_inpainting.py +++ b/tests/pipelines/deepfloyd_if/test_if_inpainting.py @@ -25,7 +25,7 @@ from diffusers.utils.testing_utils import ( floats_tensor, load_numpy, - require_non_cpu, + require_accelerator, require_torch_gpu, skip_mps, slow, @@ -80,7 +80,8 @@ def test_xformers_attention_forwardGenerator_pass(self): def test_save_load_optional_components(self): self._test_save_load_optional_components() - @require_non_cpu + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) diff --git a/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py b/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py index ffe64c682761..fc1b04aacb9b 100644 --- a/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py +++ b/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py @@ -25,7 +25,7 @@ from diffusers.utils.testing_utils import ( floats_tensor, load_numpy, - require_non_cpu, + require_accelerator, require_torch_gpu, skip_mps, slow, @@ -82,7 +82,8 @@ def test_xformers_attention_forwardGenerator_pass(self): def test_save_load_optional_components(self): self._test_save_load_optional_components() - @require_non_cpu + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) diff --git a/tests/pipelines/deepfloyd_if/test_if_superresolution.py b/tests/pipelines/deepfloyd_if/test_if_superresolution.py index 7ee53a851db5..bdb9f8a76d8a 100644 --- a/tests/pipelines/deepfloyd_if/test_if_superresolution.py +++ b/tests/pipelines/deepfloyd_if/test_if_superresolution.py @@ -25,7 +25,7 @@ from diffusers.utils.testing_utils import ( floats_tensor, load_numpy, - require_non_cpu, + require_accelerator, require_torch_gpu, skip_mps, slow, @@ -75,7 +75,8 @@ def test_xformers_attention_forwardGenerator_pass(self): def test_save_load_optional_components(self): self._test_save_load_optional_components() - @require_non_cpu + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator def test_save_load_float16(self): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_float16(expected_max_diff=1e-1) diff --git a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py index fc4bf98b9135..38ac6a46ccca 100644 --- a/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py +++ b/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py @@ -26,7 +26,7 @@ floats_tensor, load_image, nightly, - require_non_cpu, + require_accelerator, require_torch, torch_device, ) @@ -94,7 +94,7 @@ def test_inference_superresolution(self): assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - @require_non_cpu + @require_accelerator def test_inference_superresolution_fp16(self): unet = self.dummy_uncond_unet scheduler = DDIMScheduler() diff --git a/tests/pipelines/pag/test_pag_animatediff.py b/tests/pipelines/pag/test_pag_animatediff.py index 02bbc85676be..59ce9cc0a987 100644 --- a/tests/pipelines/pag/test_pag_animatediff.py +++ b/tests/pipelines/pag/test_pag_animatediff.py @@ -19,7 +19,7 @@ ) from diffusers.models.attention import FreeNoiseTransformerBlock from diffusers.utils import is_xformers_available -from diffusers.utils.testing_utils import require_non_cpu, torch_device +from diffusers.utils.testing_utils import require_accelerator, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( @@ -218,7 +218,7 @@ def test_dict_tuple_outputs_equivalent(self): expected_slice = np.array([0.5295, 0.3947, 0.5300, 0.4864, 0.4518, 0.5315, 0.5440, 0.4775, 0.5538]) return super().test_dict_tuple_outputs_equivalent(expected_slice=expected_slice) - @require_non_cpu + @require_accelerator def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/pia/test_pia.py b/tests/pipelines/pia/test_pia.py index 5f34807ad2da..e461860eff65 100644 --- a/tests/pipelines/pia/test_pia.py +++ b/tests/pipelines/pia/test_pia.py @@ -18,7 +18,7 @@ UNetMotionModel, ) from diffusers.utils import is_xformers_available, logging -from diffusers.utils.testing_utils import floats_tensor, require_non_cpu, torch_device +from diffusers.utils.testing_utils import floats_tensor, require_accelerator, torch_device from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin @@ -278,7 +278,7 @@ def test_inference_batch_single_identical( max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff - @require_non_cpu + @require_accelerator def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) diff --git a/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py b/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py index 23c1e22ade51..6cd431f02d58 100644 --- a/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py +++ b/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py @@ -28,7 +28,7 @@ enable_full_determinism, floats_tensor, nightly, - require_non_cpu, + require_accelerator, require_torch_gpu, torch_device, ) @@ -238,7 +238,7 @@ def test_semantic_diffusion_no_safety_checker(self): image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None - @require_non_cpu + @require_accelerator def test_semantic_diffusion_fp16(self): """Test that stable diffusion works with fp16""" unet = self.dummy_cond_unet diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py index 2216a9425a0c..01a0a3abe4ee 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py @@ -43,7 +43,7 @@ load_numpy, nightly, require_accelerate_version_greater, - require_non_cpu, + require_accelerator, require_torch_gpu, skip_mps, slow, @@ -195,7 +195,8 @@ def test_save_load_local(self): max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 1e-4) - @require_non_cpu + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator def test_save_load_float16(self): components = self.get_dummy_components() for name, module in components.items(): @@ -227,7 +228,8 @@ def test_save_load_float16(self): max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 2e-2, "The output of the fp16 pipeline changed after saving and loading.") - @require_non_cpu + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator def test_float16_inference(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -247,7 +249,7 @@ def test_float16_inference(self): max_diff = np.abs(output - output_fp16).max() self.assertLess(max_diff, 1.3e-2, "The outputs of the fp16 and fp32 pipelines are too different.") - @require_non_cpu + @require_accelerator @require_accelerate_version_greater("0.14.0") def test_cpu_offload_forward_pass(self): components = self.get_dummy_components() diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py index 440b91b8558a..4b04169a270b 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py @@ -29,7 +29,7 @@ floats_tensor, load_image, load_numpy, - require_non_cpu, + require_accelerator, require_torch_gpu, slow, torch_device, @@ -290,7 +290,7 @@ def test_stable_diffusion_upscale_prompt_embeds(self): assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_prompt_embeds_slice.flatten() - expected_slice).max() < 1e-2 - @require_non_cpu + @require_accelerator def test_stable_diffusion_upscale_fp16(self): """Test that stable diffusion upscale works with fp16""" unet = self.dummy_cond_unet_upscale diff --git a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py index 982842e0c155..d69d1c492548 100644 --- a/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py +++ b/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py @@ -34,7 +34,7 @@ enable_full_determinism, load_numpy, numpy_cosine_similarity_distance, - require_non_cpu, + require_accelerator, require_torch_gpu, slow, torch_device, @@ -214,7 +214,7 @@ def test_stable_diffusion_v_pred_k_euler(self): assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - @require_non_cpu + @require_accelerator def test_stable_diffusion_v_pred_fp16(self): """Test that stable diffusion v-prediction works with fp16""" unet = self.dummy_cond_unet diff --git a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py index 263018f5a304..269677c08345 100644 --- a/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py +++ b/tests/pipelines/stable_diffusion_safe/test_safe_diffusion.py @@ -24,7 +24,7 @@ from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline -from diffusers.utils.testing_utils import floats_tensor, nightly, require_non_cpu, require_torch_gpu, torch_device +from diffusers.utils.testing_utils import floats_tensor, nightly, require_accelerator, require_torch_gpu, torch_device class SafeDiffusionPipelineFastTests(unittest.TestCase): @@ -228,7 +228,7 @@ def test_stable_diffusion_no_safety_checker(self): image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None - @require_non_cpu + @require_accelerator def test_stable_diffusion_fp16(self): """Test that stable diffusion works with fp16""" unet = self.dummy_cond_unet diff --git a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py index e1560f10f0a6..094bef210aaf 100644 --- a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py +++ b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py @@ -26,7 +26,7 @@ floats_tensor, numpy_cosine_similarity_distance, require_accelerate_version_greater, - require_non_cpu, + require_accelerator, require_torch_gpu, slow, torch_device, @@ -252,7 +252,8 @@ def test_float16_inference(self, expected_max_diff=5e-2): max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") - @require_non_cpu + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): @@ -368,7 +369,8 @@ def test_save_load_local(self, expected_max_difference=9e-4): max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) - @require_non_cpu + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -404,7 +406,7 @@ def test_to_dtype(self): model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) - @require_non_cpu + @require_accelerator @require_accelerate_version_greater("0.14.0") def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): components = self.get_dummy_components() @@ -427,7 +429,7 @@ def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") - @require_non_cpu + @require_accelerator @require_accelerate_version_greater("0.17.0") def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): generator_device = "cpu" diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index e77ebf5af8dc..7ec677558059 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -42,7 +42,7 @@ from diffusers.utils.testing_utils import ( CaptureLogger, require_accelerate_version_greater, - require_non_cpu, + require_accelerator, require_torch, skip_mps, torch_device, @@ -772,7 +772,7 @@ def test_from_pipe_consistent_forward_pass(self, expected_max_diff=1e-3): type(proc) == AttnProcessor for proc in component.attn_processors.values() ), "`from_pipe` changed the attention processor in original pipeline." - @require_non_cpu + @require_accelerator @require_accelerate_version_greater("0.14.0") def test_from_pipe_consistent_forward_pass_cpu_offload(self, expected_max_diff=1e-3): components = self.get_dummy_components() @@ -1202,7 +1202,8 @@ def test_components_function(self): self.assertTrue(hasattr(pipe, "components")) self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) - @require_non_cpu + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -1239,7 +1240,8 @@ def test_float16_inference(self, expected_max_diff=5e-2): max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") - @require_non_cpu + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): @@ -1320,7 +1322,7 @@ def test_save_load_optional_components(self, expected_max_difference=1e-4): max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) - @require_non_cpu + @require_accelerator def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) @@ -1394,7 +1396,7 @@ def _test_attention_slicing_forward_pass( assert_mean_pixel_difference(to_np(output_with_slicing1[0]), to_np(output_without_slicing[0])) assert_mean_pixel_difference(to_np(output_with_slicing2[0]), to_np(output_without_slicing[0])) - @require_non_cpu + @require_accelerator @require_accelerate_version_greater("0.14.0") def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): import accelerate @@ -1455,7 +1457,7 @@ def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", ) - @require_non_cpu + @require_accelerator @require_accelerate_version_greater("0.17.0") def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): import accelerate @@ -1510,7 +1512,7 @@ def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", ) - @require_non_cpu + @require_accelerator @require_accelerate_version_greater("0.17.0") def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4): import accelerate @@ -1565,7 +1567,7 @@ def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4): f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", ) - @require_non_cpu + @require_accelerator @require_accelerate_version_greater("0.14.0") def test_sequential_offload_forward_pass_twice(self, expected_max_diff=2e-4): import accelerate diff --git a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py index 2dbb41a08765..95a528c9d039 100644 --- a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py +++ b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py @@ -27,7 +27,7 @@ enable_full_determinism, nightly, require_accelerate_version_greater, - require_non_cpu, + require_accelerator, require_torch_gpu, torch_device, ) @@ -219,7 +219,7 @@ def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4): max_diff = np.abs(to_np(output) - to_np(output_tuple)).max() self.assertLess(max_diff, expected_max_difference) - @require_non_cpu + @require_accelerator def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() for name, module in components.items(): @@ -261,7 +261,7 @@ def test_inference_batch_consistent(self): def test_inference_batch_single_identical(self): pass - @require_non_cpu + @require_accelerator @require_accelerate_version_greater("0.17.0") def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): components = self.get_dummy_components() @@ -282,8 +282,9 @@ def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.") def test_pipeline_call_signature(self): pass - - @require_non_cpu + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): @@ -335,7 +336,7 @@ def test_save_load_optional_components(self): def test_sequential_cpu_offload_forward_pass(self): pass - @require_non_cpu + @require_accelerator def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) From 35de6d348d315eb3b72bbf349b837dc053552bea Mon Sep 17 00:00:00 2001 From: "Lin, Fanli" Date: Sun, 10 Nov 2024 19:12:27 -0800 Subject: [PATCH 48/51] no unitest.skipt --- .../stable_video_diffusion/test_stable_video_diffusion.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py index 094bef210aaf..ac9acb26afd3 100644 --- a/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py +++ b/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py @@ -369,7 +369,6 @@ def test_save_load_local(self, expected_max_difference=9e-4): max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, expected_max_difference) - @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") @require_accelerator def test_to_device(self): components = self.get_dummy_components() From 64ef84a5285f23cc3340a40c2f43e6a48aa400f2 Mon Sep 17 00:00:00 2001 From: "Lin, Fanli" Date: Sun, 10 Nov 2024 19:17:42 -0800 Subject: [PATCH 49/51] update --- .../text_to_video_synthesis/test_text_to_video_zero_sdxl.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py index 95a528c9d039..58a803288879 100644 --- a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py +++ b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py @@ -219,6 +219,7 @@ def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4): max_diff = np.abs(to_np(output) - to_np(output_tuple)).max() self.assertLess(max_diff, expected_max_difference) + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") @require_accelerator def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() From c0455244d7eb444e51f08d5e8fa237066550abf7 Mon Sep 17 00:00:00 2001 From: "Lin, Fanli" Date: Mon, 18 Nov 2024 02:14:57 -0800 Subject: [PATCH 50/51] apply style check --- tests/models/autoencoders/test_models_vae.py | 6 ++---- tests/pipelines/amused/test_amused.py | 3 +-- tests/pipelines/amused/test_amused_img2img.py | 3 +-- tests/pipelines/amused/test_amused_inpaint.py | 3 +-- tests/pipelines/controlnet/test_controlnet_sdxl.py | 2 +- tests/pipelines/deepfloyd_if/test_if.py | 9 ++++++++- .../test_text_to_video_zero_sdxl.py | 2 +- 7 files changed, 15 insertions(+), 13 deletions(-) diff --git a/tests/models/autoencoders/test_models_vae.py b/tests/models/autoencoders/test_models_vae.py index d29defbf6085..ccb0e944f0f6 100644 --- a/tests/models/autoencoders/test_models_vae.py +++ b/tests/models/autoencoders/test_models_vae.py @@ -376,12 +376,10 @@ def prepare_init_args_and_inputs_for_common(self): return self.init_dict, self.inputs_dict() @unittest.skip - def test_training(self): - ... + def test_training(self): ... @unittest.skip - def test_ema_training(self): - ... + def test_ema_training(self): ... class AutoencoderKLTemporalDecoderFastTests(ModelTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/amused/test_amused.py b/tests/pipelines/amused/test_amused.py index f28d8708d309..f348008ae4de 100644 --- a/tests/pipelines/amused/test_amused.py +++ b/tests/pipelines/amused/test_amused.py @@ -124,8 +124,7 @@ def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") - def test_inference_batch_single_identical(self): - ... + def test_inference_batch_single_identical(self): ... @slow diff --git a/tests/pipelines/amused/test_amused_img2img.py b/tests/pipelines/amused/test_amused_img2img.py index 2699bbe7f56f..942735f15707 100644 --- a/tests/pipelines/amused/test_amused_img2img.py +++ b/tests/pipelines/amused/test_amused_img2img.py @@ -126,8 +126,7 @@ def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") - def test_inference_batch_single_identical(self): - ... + def test_inference_batch_single_identical(self): ... @slow diff --git a/tests/pipelines/amused/test_amused_inpaint.py b/tests/pipelines/amused/test_amused_inpaint.py index 645379a7eab1..541b988f1798 100644 --- a/tests/pipelines/amused/test_amused_inpaint.py +++ b/tests/pipelines/amused/test_amused_inpaint.py @@ -130,8 +130,7 @@ def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") - def test_inference_batch_single_identical(self): - ... + def test_inference_batch_single_identical(self): ... @slow diff --git a/tests/pipelines/controlnet/test_controlnet_sdxl.py b/tests/pipelines/controlnet/test_controlnet_sdxl.py index c931391ac4d5..ea7fff5537a5 100644 --- a/tests/pipelines/controlnet/test_controlnet_sdxl.py +++ b/tests/pipelines/controlnet/test_controlnet_sdxl.py @@ -1019,7 +1019,7 @@ def test_conditioning_channels(self): ) controlnet = ControlNetModel.from_unet(unet, conditioning_channels=4) - assert type(controlnet.mid_block) == UNetMidBlock2D + assert type(controlnet.mid_block) is UNetMidBlock2D assert controlnet.conditioning_channels == 4 def get_dummy_components(self, time_cond_proj_dim=None): diff --git a/tests/pipelines/deepfloyd_if/test_if.py b/tests/pipelines/deepfloyd_if/test_if.py index 9a767426193e..13a05855f145 100644 --- a/tests/pipelines/deepfloyd_if/test_if.py +++ b/tests/pipelines/deepfloyd_if/test_if.py @@ -23,7 +23,14 @@ ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available -from diffusers.utils.testing_utils import load_numpy, require_accelerator, require_torch_gpu, skip_mps, slow, torch_device +from diffusers.utils.testing_utils import ( + load_numpy, + require_accelerator, + require_torch_gpu, + skip_mps, + slow, + torch_device, +) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference diff --git a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py index 58a803288879..db24767b60fc 100644 --- a/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py +++ b/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py @@ -283,7 +283,7 @@ def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.") def test_pipeline_call_signature(self): pass - + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") @require_accelerator def test_save_load_float16(self, expected_max_diff=1e-2): From 05ce46137ec106eb9754f7b8f07da888c6288bfc Mon Sep 17 00:00:00 2001 From: "Lin, Fanli" Date: Mon, 18 Nov 2024 02:53:45 -0800 Subject: [PATCH 51/51] reapply format --- tests/models/autoencoders/test_models_vae.py | 6 ++++-- tests/pipelines/amused/test_amused.py | 3 ++- tests/pipelines/amused/test_amused_img2img.py | 3 ++- tests/pipelines/amused/test_amused_inpaint.py | 3 ++- 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/tests/models/autoencoders/test_models_vae.py b/tests/models/autoencoders/test_models_vae.py index ccb0e944f0f6..d29defbf6085 100644 --- a/tests/models/autoencoders/test_models_vae.py +++ b/tests/models/autoencoders/test_models_vae.py @@ -376,10 +376,12 @@ def prepare_init_args_and_inputs_for_common(self): return self.init_dict, self.inputs_dict() @unittest.skip - def test_training(self): ... + def test_training(self): + ... @unittest.skip - def test_ema_training(self): ... + def test_ema_training(self): + ... class AutoencoderKLTemporalDecoderFastTests(ModelTesterMixin, unittest.TestCase): diff --git a/tests/pipelines/amused/test_amused.py b/tests/pipelines/amused/test_amused.py index f348008ae4de..f28d8708d309 100644 --- a/tests/pipelines/amused/test_amused.py +++ b/tests/pipelines/amused/test_amused.py @@ -124,7 +124,8 @@ def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") - def test_inference_batch_single_identical(self): ... + def test_inference_batch_single_identical(self): + ... @slow diff --git a/tests/pipelines/amused/test_amused_img2img.py b/tests/pipelines/amused/test_amused_img2img.py index 942735f15707..2699bbe7f56f 100644 --- a/tests/pipelines/amused/test_amused_img2img.py +++ b/tests/pipelines/amused/test_amused_img2img.py @@ -126,7 +126,8 @@ def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") - def test_inference_batch_single_identical(self): ... + def test_inference_batch_single_identical(self): + ... @slow diff --git a/tests/pipelines/amused/test_amused_inpaint.py b/tests/pipelines/amused/test_amused_inpaint.py index 541b988f1798..645379a7eab1 100644 --- a/tests/pipelines/amused/test_amused_inpaint.py +++ b/tests/pipelines/amused/test_amused_inpaint.py @@ -130,7 +130,8 @@ def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") - def test_inference_batch_single_identical(self): ... + def test_inference_batch_single_identical(self): + ... @slow