Skip to content

Commit b94c720

Browse files
yiyixuxugithub-actions[bot]
authored andcommitted
[wan2.2] follow-up (huggingface#12024)
* up --------- Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
1 parent e880a24 commit b94c720

File tree

7 files changed

+895
-74
lines changed

7 files changed

+895
-74
lines changed

src/diffusers/models/transformers/transformer_wan.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -324,7 +324,7 @@ def forward(
324324
):
325325
timestep = self.timesteps_proj(timestep)
326326
if timestep_seq_len is not None:
327-
timestep = timestep.unflatten(0, (1, timestep_seq_len))
327+
timestep = timestep.unflatten(0, (-1, timestep_seq_len))
328328

329329
time_embedder_dtype = next(iter(self.time_embedder.parameters())).dtype
330330
if timestep.dtype != time_embedder_dtype and time_embedder_dtype != torch.int8:

src/diffusers/pipelines/wan/pipeline_wan.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -125,15 +125,15 @@ class WanPipeline(DiffusionPipeline, WanLoraLoaderMixin):
125125

126126
model_cpu_offload_seq = "text_encoder->transformer->transformer_2->vae"
127127
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
128-
_optional_components = ["transformer_2"]
128+
_optional_components = ["transformer", "transformer_2"]
129129

130130
def __init__(
131131
self,
132132
tokenizer: AutoTokenizer,
133133
text_encoder: UMT5EncoderModel,
134-
transformer: WanTransformer3DModel,
135134
vae: AutoencoderKLWan,
136135
scheduler: FlowMatchEulerDiscreteScheduler,
136+
transformer: Optional[WanTransformer3DModel] = None,
137137
transformer_2: Optional[WanTransformer3DModel] = None,
138138
boundary_ratio: Optional[float] = None,
139139
expand_timesteps: bool = False, # Wan2.2 ti2v
@@ -526,7 +526,7 @@ def __call__(
526526
device=device,
527527
)
528528

529-
transformer_dtype = self.transformer.dtype
529+
transformer_dtype = self.transformer.dtype if self.transformer is not None else self.transformer_2.dtype
530530
prompt_embeds = prompt_embeds.to(transformer_dtype)
531531
if negative_prompt_embeds is not None:
532532
negative_prompt_embeds = negative_prompt_embeds.to(transformer_dtype)
@@ -536,7 +536,11 @@ def __call__(
536536
timesteps = self.scheduler.timesteps
537537

538538
# 5. Prepare latent variables
539-
num_channels_latents = self.transformer.config.in_channels
539+
num_channels_latents = (
540+
self.transformer.config.in_channels
541+
if self.transformer is not None
542+
else self.transformer_2.config.in_channels
543+
)
540544
latents = self.prepare_latents(
541545
batch_size * num_videos_per_prompt,
542546
num_channels_latents,

src/diffusers/pipelines/wan/pipeline_wan_i2v.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -162,17 +162,17 @@ class WanImageToVideoPipeline(DiffusionPipeline, WanLoraLoaderMixin):
162162

163163
model_cpu_offload_seq = "text_encoder->image_encoder->transformer->transformer_2->vae"
164164
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
165-
_optional_components = ["transformer_2", "image_encoder", "image_processor"]
165+
_optional_components = ["transformer", "transformer_2", "image_encoder", "image_processor"]
166166

167167
def __init__(
168168
self,
169169
tokenizer: AutoTokenizer,
170170
text_encoder: UMT5EncoderModel,
171-
transformer: WanTransformer3DModel,
172171
vae: AutoencoderKLWan,
173172
scheduler: FlowMatchEulerDiscreteScheduler,
174173
image_processor: CLIPImageProcessor = None,
175174
image_encoder: CLIPVisionModel = None,
175+
transformer: WanTransformer3DModel = None,
176176
transformer_2: WanTransformer3DModel = None,
177177
boundary_ratio: Optional[float] = None,
178178
expand_timesteps: bool = False,
@@ -669,12 +669,13 @@ def __call__(
669669
)
670670

671671
# Encode image embedding
672-
transformer_dtype = self.transformer.dtype
672+
transformer_dtype = self.transformer.dtype if self.transformer is not None else self.transformer_2.dtype
673673
prompt_embeds = prompt_embeds.to(transformer_dtype)
674674
if negative_prompt_embeds is not None:
675675
negative_prompt_embeds = negative_prompt_embeds.to(transformer_dtype)
676676

677-
if self.config.boundary_ratio is None and not self.config.expand_timesteps:
677+
# only wan 2.1 i2v transformer accepts image_embeds
678+
if self.transformer is not None and self.transformer.config.image_dim is not None:
678679
if image_embeds is None:
679680
if last_image is None:
680681
image_embeds = self.encode_image(image, device)
@@ -709,6 +710,7 @@ def __call__(
709710
last_image,
710711
)
711712
if self.config.expand_timesteps:
713+
# wan 2.2 5b i2v use firt_frame_mask to mask timesteps
712714
latents, condition, first_frame_mask = latents_outputs
713715
else:
714716
latents, condition = latents_outputs

tests/pipelines/wan/test_wan.py

Lines changed: 41 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
# limitations under the License.
1414

1515
import gc
16+
import tempfile
1617
import unittest
1718

1819
import numpy as np
@@ -88,29 +89,13 @@ def get_dummy_components(self):
8889
rope_max_seq_len=32,
8990
)
9091

91-
torch.manual_seed(0)
92-
transformer_2 = WanTransformer3DModel(
93-
patch_size=(1, 2, 2),
94-
num_attention_heads=2,
95-
attention_head_dim=12,
96-
in_channels=16,
97-
out_channels=16,
98-
text_dim=32,
99-
freq_dim=256,
100-
ffn_dim=32,
101-
num_layers=2,
102-
cross_attn_norm=True,
103-
qk_norm="rms_norm_across_heads",
104-
rope_max_seq_len=32,
105-
)
106-
10792
components = {
10893
"transformer": transformer,
10994
"vae": vae,
11095
"scheduler": scheduler,
11196
"text_encoder": text_encoder,
11297
"tokenizer": tokenizer,
113-
"transformer_2": transformer_2,
98+
"transformer_2": None,
11499
}
115100
return components
116101

@@ -154,6 +139,45 @@ def test_inference(self):
154139
def test_attention_slicing_forward_pass(self):
155140
pass
156141

142+
# _optional_components include transformer, transformer_2, but only transformer_2 is optional for this wan2.1 t2v pipeline
143+
def test_save_load_optional_components(self, expected_max_difference=1e-4):
144+
optional_component = "transformer_2"
145+
146+
components = self.get_dummy_components()
147+
components[optional_component] = None
148+
pipe = self.pipeline_class(**components)
149+
for component in pipe.components.values():
150+
if hasattr(component, "set_default_attn_processor"):
151+
component.set_default_attn_processor()
152+
pipe.to(torch_device)
153+
pipe.set_progress_bar_config(disable=None)
154+
155+
generator_device = "cpu"
156+
inputs = self.get_dummy_inputs(generator_device)
157+
torch.manual_seed(0)
158+
output = pipe(**inputs)[0]
159+
160+
with tempfile.TemporaryDirectory() as tmpdir:
161+
pipe.save_pretrained(tmpdir, safe_serialization=False)
162+
pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)
163+
for component in pipe_loaded.components.values():
164+
if hasattr(component, "set_default_attn_processor"):
165+
component.set_default_attn_processor()
166+
pipe_loaded.to(torch_device)
167+
pipe_loaded.set_progress_bar_config(disable=None)
168+
169+
self.assertTrue(
170+
getattr(pipe_loaded, optional_component) is None,
171+
f"`{optional_component}` did not stay set to None after loading.",
172+
)
173+
174+
inputs = self.get_dummy_inputs(generator_device)
175+
torch.manual_seed(0)
176+
output_loaded = pipe_loaded(**inputs)[0]
177+
178+
max_diff = np.abs(output.detach().cpu().numpy() - output_loaded.detach().cpu().numpy()).max()
179+
self.assertLess(max_diff, expected_max_difference)
180+
157181

158182
@slow
159183
@require_torch_accelerator

0 commit comments

Comments
 (0)