From 403ba585379c74052e45ddec4fc966017ee26964 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Tue, 1 Jul 2025 11:13:40 +0530 Subject: [PATCH 1/4] wan vace. --- tests/lora/test_lora_layers_wanvace.py | 134 +++++++++++++++++++++++++ 1 file changed, 134 insertions(+) create mode 100644 tests/lora/test_lora_layers_wanvace.py diff --git a/tests/lora/test_lora_layers_wanvace.py b/tests/lora/test_lora_layers_wanvace.py new file mode 100644 index 000000000000..29fe7a8e8398 --- /dev/null +++ b/tests/lora/test_lora_layers_wanvace.py @@ -0,0 +1,134 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanPipeline, WanVACETransformer3DModel +from diffusers.utils.testing_utils import floats_tensor, require_peft_backend, skip_mps + + +sys.path.append(".") + +from utils import PeftLoraLoaderMixinTests # noqa: E402 + + +@require_peft_backend +@skip_mps +class WanLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): + pipeline_class = WanPipeline + scheduler_cls = FlowMatchEulerDiscreteScheduler + scheduler_classes = [FlowMatchEulerDiscreteScheduler] + scheduler_kwargs = {} + + transformer_kwargs = { + "patch_size": (1, 2, 2), + "num_attention_heads": 2, + "attention_head_dim": 12, + "in_channels": 16, + "out_channels": 16, + "text_dim": 32, + "freq_dim": 256, + "ffn_dim": 32, + "num_layers": 2, + "cross_attn_norm": True, + "qk_norm": "rms_norm_across_heads", + "rope_max_seq_len": 32, + } + transformer_cls = WanVACETransformer3DModel + vae_kwargs = { + "base_dim": 3, + "z_dim": 16, + "dim_mult": [1, 1, 1, 1], + "num_res_blocks": 1, + "temperal_downsample": [False, True, True], + } + vae_cls = AutoencoderKLWan + has_two_text_encoders = True + tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5" + text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5" + + text_encoder_target_modules = ["q", "k", "v", "o"] + + @property + def output_shape(self): + return (1, 9, 32, 32, 3) + + def get_dummy_inputs(self, with_generator=True): + batch_size = 1 + sequence_length = 16 + num_channels = 4 + num_frames = 9 + num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1 + sizes = (4, 4) + + generator = torch.manual_seed(0) + noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes) + input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) + + pipeline_inputs = { + "prompt": "", + "num_frames": num_frames, + "num_inference_steps": 1, + "guidance_scale": 6.0, + "height": 32, + "width": 32, + "max_sequence_length": sequence_length, + "output_type": "np", + } + if with_generator: + pipeline_inputs.update({"generator": generator}) + + return noise, input_ids, pipeline_inputs + + def test_simple_inference_with_text_lora_denoiser_fused_multi(self): + super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) + + def test_simple_inference_with_text_denoiser_lora_unfused(self): + super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) + + @unittest.skip("Not supported in Wan.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + + @unittest.skip("Not supported in Wan.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Not supported in Wan.") + def test_modify_padding_mode(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Wan.") + def test_simple_inference_with_partial_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Wan.") + def test_simple_inference_with_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Wan.") + def test_simple_inference_with_text_lora_and_scale(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Wan.") + def test_simple_inference_with_text_lora_fused(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Wan.") + def test_simple_inference_with_text_lora_save_load(self): + pass From 763dba509c9e680755e2e12204dbb3a4c58a9f28 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Tue, 1 Jul 2025 12:58:05 +0530 Subject: [PATCH 2/4] update --- tests/lora/test_lora_layers_wanvace.py | 108 ++++++++++++++++++++----- 1 file changed, 86 insertions(+), 22 deletions(-) diff --git a/tests/lora/test_lora_layers_wanvace.py b/tests/lora/test_lora_layers_wanvace.py index 29fe7a8e8398..2e55f86f5675 100644 --- a/tests/lora/test_lora_layers_wanvace.py +++ b/tests/lora/test_lora_layers_wanvace.py @@ -13,13 +13,23 @@ # limitations under the License. import sys +import tempfile import unittest +import numpy as np +import pytest import torch +from PIL import Image from transformers import AutoTokenizer, T5EncoderModel -from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanPipeline, WanVACETransformer3DModel -from diffusers.utils.testing_utils import floats_tensor, require_peft_backend, skip_mps +from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanVACEPipeline, WanVACETransformer3DModel +from diffusers.utils.testing_utils import ( + floats_tensor, + require_peft_backend, + require_peft_version_greater, + skip_mps, + torch_device, +) sys.path.append(".") @@ -29,8 +39,8 @@ @require_peft_backend @skip_mps -class WanLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): - pipeline_class = WanPipeline +class WanVACELoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): + pipeline_class = WanVACEPipeline scheduler_cls = FlowMatchEulerDiscreteScheduler scheduler_classes = [FlowMatchEulerDiscreteScheduler] scheduler_kwargs = {} @@ -38,22 +48,26 @@ class WanLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): transformer_kwargs = { "patch_size": (1, 2, 2), "num_attention_heads": 2, - "attention_head_dim": 12, - "in_channels": 16, - "out_channels": 16, + "attention_head_dim": 8, + "in_channels": 4, + "out_channels": 4, "text_dim": 32, - "freq_dim": 256, - "ffn_dim": 32, + "freq_dim": 16, + "ffn_dim": 16, "num_layers": 2, "cross_attn_norm": True, "qk_norm": "rms_norm_across_heads", - "rope_max_seq_len": 32, + "rope_max_seq_len": 16, + "vace_layers": [0], + "vace_in_channels": 72, } transformer_cls = WanVACETransformer3DModel vae_kwargs = { "base_dim": 3, - "z_dim": 16, + "z_dim": 4, "dim_mult": [1, 1, 1, 1], + "latents_mean": torch.randn(4).numpy().tolist(), + "latents_std": torch.randn(4).numpy().tolist(), "num_res_blocks": 1, "temperal_downsample": [False, True, True], } @@ -66,7 +80,7 @@ class WanLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): @property def output_shape(self): - return (1, 9, 32, 32, 3) + return (1, 9, 16, 16, 3) def get_dummy_inputs(self, with_generator=True): batch_size = 1 @@ -75,18 +89,23 @@ def get_dummy_inputs(self, with_generator=True): num_frames = 9 num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1 sizes = (4, 4) + height, width = 16, 16 generator = torch.manual_seed(0) noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes) input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) + video = [Image.new("RGB", (height, width))] * num_frames + mask = [Image.new("L", (height, width), 0)] * num_frames pipeline_inputs = { + "video": video, + "mask": mask, "prompt": "", "num_frames": num_frames, "num_inference_steps": 1, "guidance_scale": 6.0, - "height": 32, - "width": 32, + "height": height, + "width": height, "max_sequence_length": sequence_length, "output_type": "np", } @@ -101,34 +120,79 @@ def test_simple_inference_with_text_lora_denoiser_fused_multi(self): def test_simple_inference_with_text_denoiser_lora_unfused(self): super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) - @unittest.skip("Not supported in Wan.") + @unittest.skip("Not supported in Wan VACE.") def test_simple_inference_with_text_denoiser_block_scale(self): pass - @unittest.skip("Not supported in Wan.") + @unittest.skip("Not supported in Wan VACE.") def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): pass - @unittest.skip("Not supported in Wan.") + @unittest.skip("Not supported in Wan VACE.") def test_modify_padding_mode(self): pass - @unittest.skip("Text encoder LoRA is not supported in Wan.") + @unittest.skip("Text encoder LoRA is not supported in Wan VACE.") def test_simple_inference_with_partial_text_lora(self): pass - @unittest.skip("Text encoder LoRA is not supported in Wan.") + @unittest.skip("Text encoder LoRA is not supported in Wan VACE.") def test_simple_inference_with_text_lora(self): pass - @unittest.skip("Text encoder LoRA is not supported in Wan.") + @unittest.skip("Text encoder LoRA is not supported in Wan VACE.") def test_simple_inference_with_text_lora_and_scale(self): pass - @unittest.skip("Text encoder LoRA is not supported in Wan.") + @unittest.skip("Text encoder LoRA is not supported in Wan VACE.") def test_simple_inference_with_text_lora_fused(self): pass - @unittest.skip("Text encoder LoRA is not supported in Wan.") + @unittest.skip("Text encoder LoRA is not supported in Wan VACE.") def test_simple_inference_with_text_lora_save_load(self): pass + + @pytest.mark.xfail( + condition=True, + reason="RuntimeError: Input type (float) and bias type (c10::BFloat16) should be the same", + strict=True, + ) + def test_layerwise_casting_inference_denoiser(self): + super().test_layerwise_casting_inference_denoiser() + + @require_peft_version_greater("0.13.2") + def test_lora_exclude_modules_wanvace(self): + scheduler_cls = self.scheduler_classes[0] + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components).to(torch_device) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + # only supported for `denoiser` now + denoiser_lora_config.target_modules = ["proj_out"] + denoiser_lora_config.exclude_modules = ["vace_blocks.0.proj_out"] + pipe, _ = self.add_adapters_to_pipeline( + pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config + ) + output_lora_exclude_modules = pipe(**inputs, generator=torch.manual_seed(0))[0] + + with tempfile.TemporaryDirectory() as tmpdir: + modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) + lora_state_dicts = self._get_lora_state_dicts(modules_to_save) + lora_metadatas = self._get_lora_adapter_metadata(modules_to_save) + self.pipeline_class.save_lora_weights(save_directory=tmpdir, **lora_state_dicts, **lora_metadatas) + pipe.unload_lora_weights() + pipe.load_lora_weights(tmpdir) + + output_lora_pretrained = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue( + not np.allclose(output_no_lora, output_lora_exclude_modules, atol=1e-3, rtol=1e-3), + "LoRA should change outputs.", + ) + self.assertTrue( + np.allclose(output_lora_exclude_modules, output_lora_pretrained, atol=1e-3, rtol=1e-3), + "Lora outputs should match.", + ) From 102e98dd3fd25b302431186868073760342039ca Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Tue, 1 Jul 2025 14:02:00 +0530 Subject: [PATCH 3/4] update --- tests/lora/test_lora_layers_wanvace.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/tests/lora/test_lora_layers_wanvace.py b/tests/lora/test_lora_layers_wanvace.py index 2e55f86f5675..c84bb1fc922a 100644 --- a/tests/lora/test_lora_layers_wanvace.py +++ b/tests/lora/test_lora_layers_wanvace.py @@ -12,13 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import sys import tempfile import unittest import numpy as np import pytest +import safetensors.torch import torch +from peft.utils import get_peft_model_state_dict from PIL import Image from transformers import AutoTokenizer, T5EncoderModel @@ -163,6 +166,7 @@ def test_layerwise_casting_inference_denoiser(self): @require_peft_version_greater("0.13.2") def test_lora_exclude_modules_wanvace(self): scheduler_cls = self.scheduler_classes[0] + exclude_module_name = "vace_blocks.0.proj_out" components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) pipe = self.pipeline_class(**components).to(torch_device) _, _, inputs = self.get_dummy_inputs(with_generator=False) @@ -172,22 +176,34 @@ def test_lora_exclude_modules_wanvace(self): # only supported for `denoiser` now denoiser_lora_config.target_modules = ["proj_out"] - denoiser_lora_config.exclude_modules = ["vace_blocks.0.proj_out"] + denoiser_lora_config.exclude_modules = [exclude_module_name] pipe, _ = self.add_adapters_to_pipeline( pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config ) + # The state dict shouldn't contain the modules to be excluded from LoRA. + state_dict_from_model = get_peft_model_state_dict(pipe.transformer, adapter_name="default") + self.assertTrue(not any(exclude_module_name in k for k in state_dict_from_model)) + self.assertTrue(any("proj_out" in k for k in state_dict_from_model)) output_lora_exclude_modules = pipe(**inputs, generator=torch.manual_seed(0))[0] with tempfile.TemporaryDirectory() as tmpdir: modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) lora_state_dicts = self._get_lora_state_dicts(modules_to_save) - lora_metadatas = self._get_lora_adapter_metadata(modules_to_save) - self.pipeline_class.save_lora_weights(save_directory=tmpdir, **lora_state_dicts, **lora_metadatas) + self.pipeline_class.save_lora_weights(save_directory=tmpdir, **lora_state_dicts) pipe.unload_lora_weights() + + # Check in the loaded state dict. + loaded_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) + self.assertTrue(not any(exclude_module_name in k for k in loaded_state_dict)) + self.assertTrue(any("proj_out" in k for k in loaded_state_dict)) + + # Check in the state dict obtained after loading LoRA. pipe.load_lora_weights(tmpdir) + state_dict_from_model = get_peft_model_state_dict(pipe.transformer, adapter_name="default_0") + self.assertTrue(not any(exclude_module_name in k for k in state_dict_from_model)) + self.assertTrue(any("proj_out" in k for k in state_dict_from_model)) output_lora_pretrained = pipe(**inputs, generator=torch.manual_seed(0))[0] - self.assertTrue( not np.allclose(output_no_lora, output_lora_exclude_modules, atol=1e-3, rtol=1e-3), "LoRA should change outputs.", From 416fee7d27814d99266a51134b868a58a5e56cc4 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Tue, 1 Jul 2025 14:15:11 +0530 Subject: [PATCH 4/4] import problem --- tests/lora/test_lora_layers_wanvace.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/lora/test_lora_layers_wanvace.py b/tests/lora/test_lora_layers_wanvace.py index c84bb1fc922a..740c00f941ed 100644 --- a/tests/lora/test_lora_layers_wanvace.py +++ b/tests/lora/test_lora_layers_wanvace.py @@ -21,11 +21,11 @@ import pytest import safetensors.torch import torch -from peft.utils import get_peft_model_state_dict from PIL import Image from transformers import AutoTokenizer, T5EncoderModel from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanVACEPipeline, WanVACETransformer3DModel +from diffusers.utils.import_utils import is_peft_available from diffusers.utils.testing_utils import ( floats_tensor, require_peft_backend, @@ -35,6 +35,9 @@ ) +if is_peft_available(): + from peft.utils import get_peft_model_state_dict + sys.path.append(".") from utils import PeftLoraLoaderMixinTests # noqa: E402