@@ -341,7 +341,7 @@ def load_lora_into_text_encoder(
341341            adapter_name (`str`, *optional*): 
342342                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use 
343343                `default_{i}` where i is the total number of adapters being loaded. 
344-             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. 
344+             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights.:  
345345        """ 
346346        if  not  USE_PEFT_BACKEND :
347347            raise  ValueError ("PEFT backend is required for this method." )
@@ -601,7 +601,7 @@ def load_lora_weights(
601601            adapter_name (`str`, *optional*): 
602602                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use 
603603                `default_{i}` where i is the total number of adapters being loaded. 
604-             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. 
604+             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights.:  
605605            kwargs (`dict`, *optional*): 
606606                See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. 
607607        """ 
@@ -805,7 +805,7 @@ def load_lora_into_unet(
805805            adapter_name (`str`, *optional*): 
806806                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use 
807807                `default_{i}` where i is the total number of adapters being loaded. 
808-             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. 
808+             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights.:  
809809        """ 
810810        if  not  USE_PEFT_BACKEND :
811811            raise  ValueError ("PEFT backend is required for this method." )
@@ -865,7 +865,7 @@ def load_lora_into_text_encoder(
865865            adapter_name (`str`, *optional*): 
866866                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use 
867867                `default_{i}` where i is the total number of adapters being loaded. 
868-             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. 
868+             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights.:  
869869        """ 
870870        if  not  USE_PEFT_BACKEND :
871871            raise  ValueError ("PEFT backend is required for this method." )
@@ -1226,7 +1226,7 @@ def load_lora_weights(
12261226            adapter_name (`str`, *optional*): 
12271227                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use 
12281228                `default_{i}` where i is the total number of adapters being loaded. 
1229-             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. 
1229+             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights.:  
12301230            kwargs (`dict`, *optional*): 
12311231                See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. 
12321232        """ 
@@ -1301,7 +1301,7 @@ def load_lora_into_transformer(
13011301            adapter_name (`str`, *optional*): 
13021302                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use 
13031303                `default_{i}` where i is the total number of adapters being loaded. 
1304-             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. 
1304+             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights.:  
13051305        """ 
13061306        if  low_cpu_mem_usage  and  is_peft_version ("<" , "0.13.0" ):
13071307            raise  ValueError (
@@ -1408,7 +1408,7 @@ def load_lora_into_text_encoder(
14081408            adapter_name (`str`, *optional*): 
14091409                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use 
14101410                `default_{i}` where i is the total number of adapters being loaded. 
1411-             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. 
1411+             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights.:  
14121412        """ 
14131413        if  not  USE_PEFT_BACKEND :
14141414            raise  ValueError ("PEFT backend is required for this method." )
@@ -1803,7 +1803,7 @@ def load_lora_weights(
18031803            adapter_name (`str`, *optional*): 
18041804                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use 
18051805                `default_{i}` where i is the total number of adapters being loaded. 
1806-             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. 
1806+             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights.:  
18071807        """ 
18081808        if  not  USE_PEFT_BACKEND :
18091809            raise  ValueError ("PEFT backend is required for this method." )
@@ -1870,7 +1870,7 @@ def load_lora_into_transformer(
18701870            adapter_name (`str`, *optional*): 
18711871                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use 
18721872                `default_{i}` where i is the total number of adapters being loaded. 
1873-             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. 
1873+             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights.:  
18741874        """ 
18751875        if  low_cpu_mem_usage  and  not  is_peft_version (">=" , "0.13.1" ):
18761876            raise  ValueError (
@@ -1982,7 +1982,7 @@ def load_lora_into_text_encoder(
19821982            adapter_name (`str`, *optional*): 
19831983                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use 
19841984                `default_{i}` where i is the total number of adapters being loaded. 
1985-             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. 
1985+             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights.:  
19861986        """ 
19871987        if  not  USE_PEFT_BACKEND :
19881988            raise  ValueError ("PEFT backend is required for this method." )
@@ -2329,7 +2329,7 @@ def load_lora_into_text_encoder(
23292329            adapter_name (`str`, *optional*): 
23302330                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use 
23312331                `default_{i}` where i is the total number of adapters being loaded. 
2332-             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. 
2332+             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights.:  
23332333        """ 
23342334        if  not  USE_PEFT_BACKEND :
23352335            raise  ValueError ("PEFT backend is required for this method." )
@@ -2610,7 +2610,7 @@ def load_lora_weights(
26102610            adapter_name (`str`, *optional*): 
26112611                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use 
26122612                `default_{i}` where i is the total number of adapters being loaded. 
2613-             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. 
2613+             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights.:  
26142614            kwargs (`dict`, *optional*): 
26152615                See [`~loaders.StableDiffusionLoraLoaderMixin.lora_state_dict`]. 
26162616        """ 
@@ -2660,7 +2660,7 @@ def load_lora_into_transformer(
26602660            adapter_name (`str`, *optional*): 
26612661                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use 
26622662                `default_{i}` where i is the total number of adapters being loaded. 
2663-             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights. 
2663+             Speed up model loading by only loading the pretrained LoRA weights and not initializing the random weights.:  
26642664        """ 
26652665        if  low_cpu_mem_usage  and  is_peft_version ("<" , "0.13.0" ):
26662666            raise  ValueError (
0 commit comments