@@ -177,9 +177,9 @@ def _convert_non_diffusers_lora_to_diffusers(state_dict, unet_name="unet", text_
177177 # Store DoRA scale if present.
178178 if dora_present_in_unet :
179179 dora_scale_key_to_replace = "_lora.down." if "_lora.down." in diffusers_name else ".lora.down."
180- unet_state_dict [diffusers_name . replace ( dora_scale_key_to_replace , ".lora_magnitude_vector." )] = (
181- state_dict . pop ( key . replace ("lora_down.weight" , "dora_scale" ) )
182- )
180+ unet_state_dict [
181+ diffusers_name . replace (dora_scale_key_to_replace , ".lora_magnitude_vector." )
182+ ] = state_dict . pop ( key . replace ( "lora_down.weight" , "dora_scale" ) )
183183
184184 # Handle text encoder LoRAs.
185185 elif lora_name .startswith (("lora_te_" , "lora_te1_" , "lora_te2_" )):
@@ -199,13 +199,13 @@ def _convert_non_diffusers_lora_to_diffusers(state_dict, unet_name="unet", text_
199199 "_lora.down." if "_lora.down." in diffusers_name else ".lora_linear_layer."
200200 )
201201 if lora_name .startswith (("lora_te_" , "lora_te1_" )):
202- te_state_dict [diffusers_name . replace ( dora_scale_key_to_replace_te , ".lora_magnitude_vector." )] = (
203- state_dict . pop ( key . replace ("lora_down.weight" , "dora_scale" ) )
204- )
202+ te_state_dict [
203+ diffusers_name . replace (dora_scale_key_to_replace_te , ".lora_magnitude_vector." )
204+ ] = state_dict . pop ( key . replace ( "lora_down.weight" , "dora_scale" ) )
205205 elif lora_name .startswith ("lora_te2_" ):
206- te2_state_dict [diffusers_name . replace ( dora_scale_key_to_replace_te , ".lora_magnitude_vector." )] = (
207- state_dict . pop ( key . replace ("lora_down.weight" , "dora_scale" ) )
208- )
206+ te2_state_dict [
207+ diffusers_name . replace (dora_scale_key_to_replace_te , ".lora_magnitude_vector." )
208+ ] = state_dict . pop ( key . replace ( "lora_down.weight" , "dora_scale" ) )
209209
210210 # Store alpha if present.
211211 if lora_name_alpha in state_dict :
@@ -684,21 +684,21 @@ def swap_scale_shift(weight):
684684
685685 for lora_key in ["lora_A" , "lora_B" ]:
686686 ## time_text_embed.timestep_embedder <- time_in
687- converted_state_dict [f"time_text_embed.timestep_embedder.linear_1. { lora_key } .weight" ] = (
688- original_state_dict . pop ( f"time_in.in_layer. { lora_key } .weight")
689- )
687+ converted_state_dict [
688+ f"time_text_embed.timestep_embedder.linear_1. { lora_key } .weight"
689+ ] = original_state_dict . pop ( f"time_in.in_layer. { lora_key } .weight" )
690690 if f"time_in.in_layer.{ lora_key } .bias" in original_state_dict_keys :
691- converted_state_dict [f"time_text_embed.timestep_embedder.linear_1. { lora_key } .bias" ] = (
692- original_state_dict . pop ( f"time_in.in_layer. { lora_key } .bias")
693- )
691+ converted_state_dict [
692+ f"time_text_embed.timestep_embedder.linear_1. { lora_key } .bias"
693+ ] = original_state_dict . pop ( f"time_in.in_layer. { lora_key } .bias" )
694694
695- converted_state_dict [f"time_text_embed.timestep_embedder.linear_2. { lora_key } .weight" ] = (
696- original_state_dict . pop ( f"time_in.out_layer. { lora_key } .weight")
697- )
695+ converted_state_dict [
696+ f"time_text_embed.timestep_embedder.linear_2. { lora_key } .weight"
697+ ] = original_state_dict . pop ( f"time_in.out_layer. { lora_key } .weight" )
698698 if f"time_in.out_layer.{ lora_key } .bias" in original_state_dict_keys :
699- converted_state_dict [f"time_text_embed.timestep_embedder.linear_2. { lora_key } .bias" ] = (
700- original_state_dict . pop ( f"time_in.out_layer. { lora_key } .bias")
701- )
699+ converted_state_dict [
700+ f"time_text_embed.timestep_embedder.linear_2. { lora_key } .bias"
701+ ] = original_state_dict . pop ( f"time_in.out_layer. { lora_key } .bias" )
702702
703703 ## time_text_embed.text_embedder <- vector_in
704704 converted_state_dict [f"time_text_embed.text_embedder.linear_1.{ lora_key } .weight" ] = original_state_dict .pop (
@@ -720,21 +720,21 @@ def swap_scale_shift(weight):
720720 # guidance
721721 has_guidance = any ("guidance" in k for k in original_state_dict )
722722 if has_guidance :
723- converted_state_dict [f"time_text_embed.guidance_embedder.linear_1. { lora_key } .weight" ] = (
724- original_state_dict . pop ( f"guidance_in.in_layer. { lora_key } .weight")
725- )
723+ converted_state_dict [
724+ f"time_text_embed.guidance_embedder.linear_1. { lora_key } .weight"
725+ ] = original_state_dict . pop ( f"guidance_in.in_layer. { lora_key } .weight" )
726726 if f"guidance_in.in_layer.{ lora_key } .bias" in original_state_dict_keys :
727- converted_state_dict [f"time_text_embed.guidance_embedder.linear_1. { lora_key } .bias" ] = (
728- original_state_dict . pop ( f"guidance_in.in_layer. { lora_key } .bias")
729- )
727+ converted_state_dict [
728+ f"time_text_embed.guidance_embedder.linear_1. { lora_key } .bias"
729+ ] = original_state_dict . pop ( f"guidance_in.in_layer. { lora_key } .bias" )
730730
731- converted_state_dict [f"time_text_embed.guidance_embedder.linear_2. { lora_key } .weight" ] = (
732- original_state_dict . pop ( f"guidance_in.out_layer. { lora_key } .weight")
733- )
731+ converted_state_dict [
732+ f"time_text_embed.guidance_embedder.linear_2. { lora_key } .weight"
733+ ] = original_state_dict . pop ( f"guidance_in.out_layer. { lora_key } .weight" )
734734 if f"guidance_in.out_layer.{ lora_key } .bias" in original_state_dict_keys :
735- converted_state_dict [f"time_text_embed.guidance_embedder.linear_2. { lora_key } .bias" ] = (
736- original_state_dict . pop ( f"guidance_in.out_layer. { lora_key } .bias")
737- )
735+ converted_state_dict [
736+ f"time_text_embed.guidance_embedder.linear_2. { lora_key } .bias"
737+ ] = original_state_dict . pop ( f"guidance_in.out_layer. { lora_key } .bias" )
738738
739739 # context_embedder
740740 converted_state_dict [f"context_embedder.{ lora_key } .weight" ] = original_state_dict .pop (
0 commit comments