- 
                Notifications
    You must be signed in to change notification settings 
- Fork 6.5k
enable pipeline test cases on xpu #11527
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 2 commits
73f1a33
              08ba266
              8dc75fc
              e903a60
              0e5b271
              7f72a24
              c17841b
              e029a58
              4b57a58
              5dd97dc
              89eed1f
              bcc72f5
              a6b2065
              68301b3
              228bfa6
              85c164f
              File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
|  | @@ -19,31 +19,37 @@ | |
| import torch | ||
|  | ||
| from diffusers import DDIMScheduler, TextToVideoZeroPipeline | ||
| from diffusers.utils.testing_utils import load_pt, nightly, require_torch_gpu | ||
| from diffusers.utils.testing_utils import ( | ||
| backend_empty_cache, | ||
| load_pt, | ||
| nightly, | ||
| require_torch_accelerator, | ||
| torch_device, | ||
| ) | ||
|  | ||
| from ..test_pipelines_common import assert_mean_pixel_difference | ||
|  | ||
|  | ||
| @nightly | ||
| @require_torch_gpu | ||
| @require_torch_accelerator | ||
| class TextToVideoZeroPipelineSlowTests(unittest.TestCase): | ||
| def setUp(self): | ||
| # clean up the VRAM before each test | ||
| super().setUp() | ||
| gc.collect() | ||
| torch.cuda.empty_cache() | ||
| backend_empty_cache(torch_device) | ||
|  | ||
| def tearDown(self): | ||
| # clean up the VRAM after each test | ||
| super().tearDown() | ||
| gc.collect() | ||
| torch.cuda.empty_cache() | ||
| backend_empty_cache(torch_device) | ||
|  | ||
| def test_full_model(self): | ||
| model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" | ||
| pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") | ||
| pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(torch_device) | ||
| pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) | ||
| generator = torch.Generator(device="cuda").manual_seed(0) | ||
| generator = torch.Generator(device="cpu").manual_seed(0) | ||
| There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. PRNG will behave different across accelerators, set back to "cpu" for cross-device reproducibility, as other cases in diffusers | ||
|  | ||
| prompt = "A bear is playing a guitar on Times Square" | ||
| result = pipe(prompt=prompt, generator=generator).images | ||
|  | ||
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
|  | @@ -24,11 +24,11 @@ | |
|  | ||
| from diffusers import AutoencoderKL, DDIMScheduler, TextToVideoZeroSDXLPipeline, UNet2DConditionModel | ||
| from diffusers.utils.testing_utils import ( | ||
| backend_empty_cache, | ||
| enable_full_determinism, | ||
| nightly, | ||
| require_accelerate_version_greater, | ||
| require_accelerator, | ||
| require_torch_gpu, | ||
| require_torch_accelerator, | ||
| torch_device, | ||
| ) | ||
|  | ||
|  | @@ -220,7 +220,7 @@ def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4): | |
| self.assertLess(max_diff, expected_max_difference) | ||
|  | ||
| @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") | ||
| @require_accelerator | ||
| @require_torch_accelerator | ||
| There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. actually it's torch case, use  | ||
| def test_float16_inference(self, expected_max_diff=5e-2): | ||
| components = self.get_dummy_components() | ||
| for name, module in components.items(): | ||
|  | @@ -262,7 +262,7 @@ def test_inference_batch_consistent(self): | |
| def test_inference_batch_single_identical(self): | ||
| pass | ||
|  | ||
| @require_accelerator | ||
| @require_torch_accelerator | ||
| @require_accelerate_version_greater("0.17.0") | ||
| def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): | ||
| components = self.get_dummy_components() | ||
|  | @@ -285,7 +285,7 @@ def test_pipeline_call_signature(self): | |
| pass | ||
|  | ||
| @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") | ||
| @require_accelerator | ||
| @require_torch_accelerator | ||
| def test_save_load_float16(self, expected_max_diff=1e-2): | ||
| components = self.get_dummy_components() | ||
| for name, module in components.items(): | ||
|  | @@ -337,7 +337,7 @@ def test_save_load_optional_components(self): | |
| def test_sequential_cpu_offload_forward_pass(self): | ||
| pass | ||
|  | ||
| @require_accelerator | ||
| @require_torch_accelerator | ||
| def test_to_device(self): | ||
| components = self.get_dummy_components() | ||
| pipe = self.pipeline_class(**components) | ||
|  | @@ -365,19 +365,19 @@ def test_xformers_attention_forwardGenerator_pass(self): | |
|  | ||
|  | ||
| @nightly | ||
| @require_torch_gpu | ||
| @require_torch_accelerator | ||
| class TextToVideoZeroSDXLPipelineSlowTests(unittest.TestCase): | ||
| def setUp(self): | ||
| # clean up the VRAM before each test | ||
| super().setUp() | ||
| gc.collect() | ||
| torch.cuda.empty_cache() | ||
| backend_empty_cache(torch_device) | ||
|  | ||
| def tearDown(self): | ||
| # clean up the VRAM after each test | ||
| super().tearDown() | ||
| gc.collect() | ||
| torch.cuda.empty_cache() | ||
| backend_empty_cache(torch_device) | ||
|  | ||
| def test_full_model(self): | ||
| model_id = "stabilityai/stable-diffusion-xl-base-1.0" | ||
|  | ||
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
weights_onlydefault value changed fromFalsetoTruefrom PyTorch 2.6, so set explicitly toFalsehere, otherwisetests/pipelines/text_to_video_synthesis/test_text_to_video_zero.py::TextToVideoZeroPipelineSlowTests::test_full_modelwill raise errormap_locationdefault as None which aligns w.torch.load, otherwisetests/pipelines/text_to_video_synthesis/test_text_to_video_zero.py::TextToVideoZeroPipelineSlowTests::test_full_modelwill raise errorThere was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Could we add
weights_only=Trueas an arg toload_ptand pass in to torch load. In the test itself we can set it to false so that it's clear what's happening in the test. I would avoid doing this under the hood because it is a potential security hole.Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@DN6 , done, pls help review, thx.