Skip to content

Commit 5001efe

Browse files
committed
Merge branch 'main' into expand-flux-lora
2 parents 8e51289 + 0d96a89 commit 5001efe

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

src/diffusers/loaders/lora_pipeline.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3131,7 +3131,7 @@ def load_lora_weights(
31313131
)
31323132

31333133
@classmethod
3134-
# Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->CogVideoXTransformer3DModel
3134+
# Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->MochiTransformer3DModel
31353135
def load_lora_into_transformer(
31363136
cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False
31373137
):
@@ -3143,7 +3143,7 @@ def load_lora_into_transformer(
31433143
A standard state dict containing the lora layer parameters. The keys can either be indexed directly
31443144
into the unet or prefixed with an additional `unet` which can be used to distinguish between text
31453145
encoder lora layers.
3146-
transformer (`CogVideoXTransformer3DModel`):
3146+
transformer (`MochiTransformer3DModel`):
31473147
The Transformer model to load the LoRA layers into.
31483148
adapter_name (`str`, *optional*):
31493149
Adapter name to be used for referencing the loaded adapter model. If not specified, it will use

0 commit comments

Comments
 (0)