Skip to content

Commit 7c67b38

Browse files
Make fix
1 parent c7378ed commit 7c67b38

File tree

1 file changed

+10
-1
lines changed

1 file changed

+10
-1
lines changed

src/diffusers/loaders/lora_pipeline.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -807,7 +807,14 @@ def lora_state_dict(
807807
@classmethod
808808
# Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.load_lora_into_unet
809809
def load_lora_into_unet(
810-
cls, state_dict, network_alphas, unet, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False
810+
cls,
811+
state_dict,
812+
network_alphas,
813+
unet,
814+
adapter_name=None,
815+
_pipeline=None,
816+
low_cpu_mem_usage=False,
817+
hotswap: bool = False,
811818
):
812819
"""
813820
This will load the LoRA layers specified in `state_dict` into `unet`.
@@ -829,6 +836,7 @@ def load_lora_into_unet(
829836
low_cpu_mem_usage (`boo`, *optional*):
830837
Speed up model loading by only loading the pretrained LoRA weights and not initializing the random
831838
weights.
839+
hotswap TODO
832840
"""
833841
if not USE_PEFT_BACKEND:
834842
raise ValueError("PEFT backend is required for this method.")
@@ -852,6 +860,7 @@ def load_lora_into_unet(
852860
adapter_name=adapter_name,
853861
_pipeline=_pipeline,
854862
low_cpu_mem_usage=low_cpu_mem_usage,
863+
hotswap=hotswap,
855864
)
856865

857866
@classmethod

0 commit comments

Comments
 (0)