Skip to content

Commit 787a69c

Browse files
committed
make style
1 parent 12855b2 commit 787a69c

36 files changed

+1447
-1193
lines changed

examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -746,9 +746,9 @@ def initialize_new_tokens(self, inserting_toks: List[str]):
746746
.to(dtype=self.dtype)
747747
* std_token_embedding
748748
)
749-
self.embeddings_settings[
750-
f"original_embeddings_{idx}"
751-
] = text_encoder.text_model.embeddings.token_embedding.weight.data.clone()
749+
self.embeddings_settings[f"original_embeddings_{idx}"] = (
750+
text_encoder.text_model.embeddings.token_embedding.weight.data.clone()
751+
)
752752
self.embeddings_settings[f"std_token_embedding_{idx}"] = std_token_embedding
753753

754754
inu = torch.ones((len(tokenizer),), dtype=torch.bool)

examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -913,9 +913,9 @@ def initialize_new_tokens(self, inserting_toks: List[str]):
913913
.to(dtype=self.dtype)
914914
* std_token_embedding
915915
)
916-
self.embeddings_settings[
917-
f"original_embeddings_{idx}"
918-
] = text_encoder.text_model.embeddings.token_embedding.weight.data.clone()
916+
self.embeddings_settings[f"original_embeddings_{idx}"] = (
917+
text_encoder.text_model.embeddings.token_embedding.weight.data.clone()
918+
)
919919
self.embeddings_settings[f"std_token_embedding_{idx}"] = std_token_embedding
920920

921921
inu = torch.ones((len(tokenizer),), dtype=torch.bool)

examples/research_projects/multi_token_textual_inversion/textual_inversion.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -830,9 +830,9 @@ def main():
830830
# Let's make sure we don't update any embedding weights besides the newly added token
831831
index_no_updates = get_mask(tokenizer, accelerator)
832832
with torch.no_grad():
833-
accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[
834-
index_no_updates
835-
] = orig_embeds_params[index_no_updates]
833+
accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[index_no_updates] = (
834+
orig_embeds_params[index_no_updates]
835+
)
836836

837837
# Checks if the accelerator has performed an optimization step behind the scenes
838838
if accelerator.sync_gradients:

examples/research_projects/onnxruntime/textual_inversion/textual_inversion.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -886,9 +886,9 @@ def main():
886886
index_no_updates[min(placeholder_token_ids) : max(placeholder_token_ids) + 1] = False
887887

888888
with torch.no_grad():
889-
accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[
890-
index_no_updates
891-
] = orig_embeds_params[index_no_updates]
889+
accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[index_no_updates] = (
890+
orig_embeds_params[index_no_updates]
891+
)
892892

893893
# Checks if the accelerator has performed an optimization step behind the scenes
894894
if accelerator.sync_gradients:

examples/textual_inversion/textual_inversion.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -910,9 +910,9 @@ def main():
910910
index_no_updates[min(placeholder_token_ids) : max(placeholder_token_ids) + 1] = False
911911

912912
with torch.no_grad():
913-
accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[
914-
index_no_updates
915-
] = orig_embeds_params[index_no_updates]
913+
accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[index_no_updates] = (
914+
orig_embeds_params[index_no_updates]
915+
)
916916

917917
# Checks if the accelerator has performed an optimization step behind the scenes
918918
if accelerator.sync_gradients:

examples/textual_inversion/textual_inversion_sdxl.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -965,12 +965,12 @@ def main():
965965
index_no_updates_2[min(placeholder_token_ids_2) : max(placeholder_token_ids_2) + 1] = False
966966

967967
with torch.no_grad():
968-
accelerator.unwrap_model(text_encoder_1).get_input_embeddings().weight[
969-
index_no_updates
970-
] = orig_embeds_params[index_no_updates]
971-
accelerator.unwrap_model(text_encoder_2).get_input_embeddings().weight[
972-
index_no_updates_2
973-
] = orig_embeds_params_2[index_no_updates_2]
968+
accelerator.unwrap_model(text_encoder_1).get_input_embeddings().weight[index_no_updates] = (
969+
orig_embeds_params[index_no_updates]
970+
)
971+
accelerator.unwrap_model(text_encoder_2).get_input_embeddings().weight[index_no_updates_2] = (
972+
orig_embeds_params_2[index_no_updates_2]
973+
)
974974

975975
# Checks if the accelerator has performed an optimization step behind the scenes
976976
if accelerator.sync_gradients:

scripts/convert_hunyuandit_controlnet_to_diffusers.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,9 @@ def main(args):
2121
model_config = HunyuanDiT2DControlNetModel.load_config(
2222
"Tencent-Hunyuan/HunyuanDiT-v1.2-Diffusers", subfolder="transformer"
2323
)
24-
model_config[
25-
"use_style_cond_and_image_meta_size"
26-
] = args.use_style_cond_and_image_meta_size ### version <= v1.1: True; version >= v1.2: False
24+
model_config["use_style_cond_and_image_meta_size"] = (
25+
args.use_style_cond_and_image_meta_size
26+
) ### version <= v1.1: True; version >= v1.2: False
2727
print(model_config)
2828

2929
for key in state_dict:

scripts/convert_hunyuandit_to_diffusers.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,9 @@ def main(args):
1919

2020
device = "cuda"
2121
model_config = HunyuanDiT2DModel.load_config("Tencent-Hunyuan/HunyuanDiT-Diffusers", subfolder="transformer")
22-
model_config[
23-
"use_style_cond_and_image_meta_size"
24-
] = args.use_style_cond_and_image_meta_size ### version <= v1.1: True; version >= v1.2: False
22+
model_config["use_style_cond_and_image_meta_size"] = (
23+
args.use_style_cond_and_image_meta_size
24+
) ### version <= v1.1: True; version >= v1.2: False
2525

2626
# input_size -> sample_size, text_dim -> cross_attention_dim
2727
for key in state_dict:

scripts/convert_mochi_to_diffusers.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -303,9 +303,9 @@ def convert_mochi_vae_state_dict_to_diffusers(encoder_ckpt_path, decoder_ckpt_pa
303303

304304
for i in range(down_block_layers[block]):
305305
# Convert resnets
306-
new_state_dict[
307-
f"{prefix}down_blocks.{block}.resnets.{i}.norm1.norm_layer.weight"
308-
] = encoder_state_dict.pop(f"layers.{block+4}.layers.{i+1}.stack.0.weight")
306+
new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.norm1.norm_layer.weight"] = (
307+
encoder_state_dict.pop(f"layers.{block+4}.layers.{i+1}.stack.0.weight")
308+
)
309309
new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.norm1.norm_layer.bias"] = encoder_state_dict.pop(
310310
f"layers.{block+4}.layers.{i+1}.stack.0.bias"
311311
)
@@ -315,9 +315,9 @@ def convert_mochi_vae_state_dict_to_diffusers(encoder_ckpt_path, decoder_ckpt_pa
315315
new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.conv1.conv.bias"] = encoder_state_dict.pop(
316316
f"layers.{block+4}.layers.{i+1}.stack.2.bias"
317317
)
318-
new_state_dict[
319-
f"{prefix}down_blocks.{block}.resnets.{i}.norm2.norm_layer.weight"
320-
] = encoder_state_dict.pop(f"layers.{block+4}.layers.{i+1}.stack.3.weight")
318+
new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.norm2.norm_layer.weight"] = (
319+
encoder_state_dict.pop(f"layers.{block+4}.layers.{i+1}.stack.3.weight")
320+
)
321321
new_state_dict[f"{prefix}down_blocks.{block}.resnets.{i}.norm2.norm_layer.bias"] = encoder_state_dict.pop(
322322
f"layers.{block+4}.layers.{i+1}.stack.3.bias"
323323
)

scripts/convert_svd_to_diffusers.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -381,9 +381,9 @@ def convert_ldm_unet_checkpoint(
381381

382382
# TODO resnet time_mixer.mix_factor
383383
if f"input_blocks.{i}.0.time_mixer.mix_factor" in unet_state_dict:
384-
new_checkpoint[
385-
f"down_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor"
386-
] = unet_state_dict[f"input_blocks.{i}.0.time_mixer.mix_factor"]
384+
new_checkpoint[f"down_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor"] = (
385+
unet_state_dict[f"input_blocks.{i}.0.time_mixer.mix_factor"]
386+
)
387387

388388
if len(attentions):
389389
paths = renew_attention_paths(attentions)
@@ -478,9 +478,9 @@ def convert_ldm_unet_checkpoint(
478478
)
479479

480480
if f"output_blocks.{i}.0.time_mixer.mix_factor" in unet_state_dict:
481-
new_checkpoint[
482-
f"up_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor"
483-
] = unet_state_dict[f"output_blocks.{i}.0.time_mixer.mix_factor"]
481+
new_checkpoint[f"up_blocks.{block_id}.resnets.{layer_in_block_id}.time_mixer.mix_factor"] = (
482+
unet_state_dict[f"output_blocks.{i}.0.time_mixer.mix_factor"]
483+
)
484484

485485
output_block_list = {k: sorted(v) for k, v in output_block_list.items()}
486486
if ["conv.bias", "conv.weight"] in output_block_list.values():

0 commit comments

Comments
 (0)