Skip to content

Commit 9fa3d93

Browse files
committed
add comment
1 parent 63e581c commit 9fa3d93

File tree

1 file changed

+2
-1
lines changed

1 file changed

+2
-1
lines changed

src/diffusers/loaders/lora_pipeline.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4275,7 +4275,7 @@ def _maybe_expand_t2v_lora_for_i2v(
42754275

42764276
return state_dict
42774277

4278-
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.load_lora_weights
4278+
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.load_lora_weights with T2V LoRA->I2V LoRA option
42794279
def load_lora_weights(
42804280
self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name=None, **kwargs
42814281
):
@@ -4313,6 +4313,7 @@ def load_lora_weights(
43134313

43144314
# First, ensure that the checkpoint is a compatible one and can be successfully loaded.
43154315
state_dict = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs)
4316+
# convert T2V LoRA to I2V LoRA (when loaded to Wan I2V) by adding zeros for the additional (missing) _img layers
43164317
state_dict = self._maybe_expand_t2v_lora_for_i2v(
43174318
transformer=getattr(self, self.transformer_name) if not hasattr(self, "transformer") else self.transformer,
43184319
state_dict=state_dict,

0 commit comments

Comments
 (0)