- 
                Notifications
    You must be signed in to change notification settings 
- Fork 6.5k
[Tests] Fix more tests sayak #10359
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[Tests] Fix more tests sayak #10359
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
|  | @@ -15,8 +15,6 @@ | |
| import sys | ||
| import unittest | ||
|  | ||
| import numpy as np | ||
| import pytest | ||
| import torch | ||
| from transformers import CLIPTextModel, CLIPTokenizer, LlamaModel, LlamaTokenizerFast | ||
|  | ||
|  | @@ -28,16 +26,14 @@ | |
| ) | ||
| from diffusers.utils.testing_utils import ( | ||
| floats_tensor, | ||
| is_torch_version, | ||
| require_peft_backend, | ||
| skip_mps, | ||
| torch_device, | ||
| ) | ||
|  | ||
|  | ||
| sys.path.append(".") | ||
|  | ||
| from utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402 | ||
| from utils import PeftLoraLoaderMixinTests # noqa: E402 | ||
|  | ||
|  | ||
| @require_peft_backend | ||
|  | @@ -144,46 +140,6 @@ def get_dummy_inputs(self, with_generator=True): | |
|  | ||
| return noise, input_ids, pipeline_inputs | ||
|  | ||
| @pytest.mark.xfail( | ||
| condition=torch.device(torch_device).type == "cpu" and is_torch_version(">=", "2.5"), | ||
| reason="Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1.", | ||
| strict=True, | ||
| ) | ||
| 
      Comment on lines
    
      -147
     to 
      -151
    
   There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Same. | ||
| def test_lora_fuse_nan(self): | ||
| for scheduler_cls in self.scheduler_classes: | ||
| components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) | ||
| pipe = self.pipeline_class(**components) | ||
| pipe = pipe.to(torch_device) | ||
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|  | ||
| pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") | ||
|  | ||
| self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") | ||
|  | ||
| # corrupt one LoRA weight with `inf` values | ||
| with torch.no_grad(): | ||
| pipe.transformer.transformer_blocks[0].attn.to_q.lora_A["adapter-1"].weight += float("inf") | ||
|  | ||
| # with `safe_fusing=True` we should see an Error | ||
| with self.assertRaises(ValueError): | ||
| pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=True) | ||
|  | ||
| # without we should not see an error, but every image will be black | ||
| pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=False) | ||
|  | ||
| out = pipe( | ||
| prompt=inputs["prompt"], | ||
| height=inputs["height"], | ||
| width=inputs["width"], | ||
| num_frames=inputs["num_frames"], | ||
| num_inference_steps=inputs["num_inference_steps"], | ||
| max_sequence_length=inputs["max_sequence_length"], | ||
| output_type="np", | ||
| )[0] | ||
|  | ||
| self.assertTrue(np.isnan(out).all()) | ||
|  | ||
| def test_simple_inference_with_text_lora_denoiser_fused_multi(self): | ||
| super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) | ||
|  | ||
|  | ||
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
|  | @@ -15,24 +15,20 @@ | |
| import sys | ||
| import unittest | ||
|  | ||
| import numpy as np | ||
| import pytest | ||
| import torch | ||
| from transformers import AutoTokenizer, T5EncoderModel | ||
|  | ||
| from diffusers import AutoencoderKLMochi, FlowMatchEulerDiscreteScheduler, MochiPipeline, MochiTransformer3DModel | ||
| from diffusers.utils.testing_utils import ( | ||
| floats_tensor, | ||
| is_torch_version, | ||
| require_peft_backend, | ||
| skip_mps, | ||
| torch_device, | ||
| ) | ||
|  | ||
|  | ||
| sys.path.append(".") | ||
|  | ||
| from utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402 | ||
| from utils import PeftLoraLoaderMixinTests # noqa: E402 | ||
|  | ||
|  | ||
| @require_peft_backend | ||
|  | @@ -103,40 +99,6 @@ def get_dummy_inputs(self, with_generator=True): | |
|  | ||
| return noise, input_ids, pipeline_inputs | ||
|  | ||
| @pytest.mark.xfail( | ||
| condition=torch.device(torch_device).type == "cpu" and is_torch_version(">=", "2.5"), | ||
| 
      Comment on lines
    
      -106
     to 
      -107
    
   There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Same. | ||
| reason="Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1.", | ||
| strict=True, | ||
| ) | ||
| def test_lora_fuse_nan(self): | ||
| for scheduler_cls in self.scheduler_classes: | ||
| components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) | ||
| pipe = self.pipeline_class(**components) | ||
| pipe = pipe.to(torch_device) | ||
| pipe.set_progress_bar_config(disable=None) | ||
| _, _, inputs = self.get_dummy_inputs(with_generator=False) | ||
|  | ||
| pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") | ||
|  | ||
| self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") | ||
|  | ||
| # corrupt one LoRA weight with `inf` values | ||
| with torch.no_grad(): | ||
| pipe.transformer.transformer_blocks[0].attn1.to_q.lora_A["adapter-1"].weight += float("inf") | ||
|  | ||
| # with `safe_fusing=True` we should see an Error | ||
| with self.assertRaises(ValueError): | ||
| pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=True) | ||
|  | ||
| # without we should not see an error, but every image will be black | ||
| pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=False) | ||
|  | ||
| out = pipe( | ||
| "test", num_inference_steps=2, max_sequence_length=inputs["max_sequence_length"], output_type="np" | ||
| )[0] | ||
|  | ||
| self.assertTrue(np.isnan(out).all()) | ||
|  | ||
| def test_simple_inference_with_text_lora_denoiser_fused_multi(self): | ||
| super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) | ||
|  | ||
|  | ||
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
|  | @@ -1528,7 +1528,7 @@ def test_simple_inference_with_text_denoiser_multi_adapter_weighted(self): | |
| @pytest.mark.xfail( | ||
| condition=torch.device(torch_device).type == "cpu" and is_torch_version(">=", "2.5"), | ||
| reason="Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1.", | ||
| strict=True, | ||
| strict=False, | ||
| There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is because on CPU and PyTorch 2.5.1, SANA and HunyuanVideo pass this test. On GPU all pipelines pass this test. | ||
| ) | ||
| def test_lora_fuse_nan(self): | ||
| for scheduler_cls in self.scheduler_classes: | ||
|  | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We don't need it here as it's implemented in
tests/lora/utils.py.