@@ -4280,7 +4280,7 @@ def load_lora_into_transformer(
42804280                A standard state dict containing the lora layer parameters. The keys can either be indexed directly 
42814281                into the unet or prefixed with an additional `unet` which can be used to distinguish between text 
42824282                encoder lora layers. 
4283-             transformer (`CogVideoXTransformer3DModel `): 
4283+             transformer (`WanTransformer3DModel `): 
42844284                The Transformer model to load the LoRA layers into. 
42854285            adapter_name (`str`, *optional*): 
42864286                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use 
@@ -4352,7 +4352,7 @@ def save_lora_weights(
43524352            safe_serialization = safe_serialization ,
43534353        )
43544354
4355-     # Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin .fuse_lora with unet->transformer  
4355+     # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin .fuse_lora 
43564356    def  fuse_lora (
43574357        self ,
43584358        components : List [str ] =  ["transformer" ],
@@ -4396,7 +4396,7 @@ def fuse_lora(
43964396            components = components , lora_scale = lora_scale , safe_fusing = safe_fusing , adapter_names = adapter_names 
43974397        )
43984398
4399-     # Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin .unfuse_lora with unet->transformer  
4399+     # Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin .unfuse_lora 
44004400    def  unfuse_lora (self , components : List [str ] =  ["transformer" ], ** kwargs ):
44014401        r""" 
44024402        Reverses the effect of 
0 commit comments