Skip to content

Commit aca7e8f

Browse files
committed
copy fix
1 parent 12161c0 commit aca7e8f

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

src/diffusers/loaders/lora_pipeline.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3720,7 +3720,7 @@ def load_lora_weights(
37203720
)
37213721

37223722
@classmethod
3723-
# Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->CogVideoXTransformer3DModel
3723+
# Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->SanaTransformer2DModel
37243724
def load_lora_into_transformer(
37253725
cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False
37263726
):
@@ -3732,7 +3732,7 @@ def load_lora_into_transformer(
37323732
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
37333733
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
37343734
encoder lora layers.
3735-
transformer (`CogVideoXTransformer3DModel`):
3735+
transformer (`SanaTransformer2DModel`):
37363736
The Transformer model to load the LoRA layers into.
37373737
adapter_name (`str`, *optional*):
37383738
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use

0 commit comments

Comments
 (0)