Skip to content

Commit d3de540

Browse files
committed
corrected code quality
1 parent 56a7718 commit d3de540

File tree

3 files changed

+7
-7
lines changed

3 files changed

+7
-7
lines changed

src/diffusers/loaders/single_file_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,8 @@
3636
convert_ltx_transformer_checkpoint_to_diffusers,
3737
convert_ltx_vae_checkpoint_to_diffusers,
3838
convert_lumina2_to_diffusers,
39-
convert_sana_transformer_to_diffusers,
4039
convert_mochi_transformer_checkpoint_to_diffusers,
40+
convert_sana_transformer_to_diffusers,
4141
convert_sd3_transformer_checkpoint_to_diffusers,
4242
convert_stable_cascade_unet_single_file_to_diffusers,
4343
create_controlnet_diffusers_config_from_ldm,

src/diffusers/loaders/single_file_utils.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@
117117
"hunyuan-video": "txt_in.individual_token_refiner.blocks.0.adaLN_modulation.1.bias",
118118
"instruct-pix2pix": "model.diffusion_model.input_blocks.0.0.weight",
119119
"lumina2": ["model.diffusion_model.cap_embedder.0.weight", "cap_embedder.0.weight"],
120-
"sana": [
120+
"sana": [
121121
"blocks.0.cross_attn.q_linear.weight",
122122
"blocks.0.cross_attn.q_linear.bias",
123123
"blocks.0.cross_attn.kv_linear.weight",
@@ -2877,7 +2877,7 @@ def convert_sana_transformer_to_diffusers(checkpoint, **kwargs):
28772877
checkpoint[k.replace("model.diffusion_model.", "")] = checkpoint.pop(k)
28782878

28792879
num_layers = list(set(int(k.split(".", 2)[1]) for k in checkpoint if "blocks" in k))[-1] + 1 # noqa: C401
2880-
2880+
28812881

28822882
# Positional and patch embeddings.
28832883
checkpoint.pop("pos_embed")
@@ -2891,7 +2891,7 @@ def convert_sana_transformer_to_diffusers(checkpoint, **kwargs):
28912891
converted_state_dict["time_embed.emb.timestep_embedder.linear_2.bias"] = checkpoint.pop("t_embedder.mlp.2.bias")
28922892
converted_state_dict["time_embed.linear.weight"] = checkpoint.pop("t_block.1.weight")
28932893
converted_state_dict["time_embed.linear.bias"] = checkpoint.pop("t_block.1.bias")
2894-
2894+
28952895
# Caption Projection.
28962896
checkpoint.pop("y_embedder.y_embedding")
28972897
converted_state_dict["caption_projection.linear_1.weight"] = checkpoint.pop("y_embedder.y_proj.fc1.weight")
@@ -2935,10 +2935,10 @@ def convert_sana_transformer_to_diffusers(checkpoint, **kwargs):
29352935
converted_state_dict[f"transformer_blocks.{i}.ff.conv_depth.weight"] = checkpoint.pop(f"blocks.{i}.mlp.depth_conv.conv.weight")
29362936
converted_state_dict[f"transformer_blocks.{i}.ff.conv_depth.bias"] = checkpoint.pop(f"blocks.{i}.mlp.depth_conv.conv.bias")
29372937
converted_state_dict[f"transformer_blocks.{i}.ff.conv_point.weight"] = checkpoint.pop(f"blocks.{i}.mlp.point_conv.conv.weight")
2938-
2938+
29392939
# Final layer
29402940
converted_state_dict["proj_out.weight"] = checkpoint.pop("final_layer.linear.weight")
29412941
converted_state_dict["proj_out.bias"] = checkpoint.pop("final_layer.linear.bias")
29422942
converted_state_dict["scale_shift_table"] = checkpoint.pop("final_layer.scale_shift_table")
29432943

2944-
return converted_state_dict
2944+
return converted_state_dict

src/diffusers/models/transformers/sana_transformer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
from torch import nn
1919

2020
from ...configuration_utils import ConfigMixin, register_to_config
21-
from ...loaders import PeftAdapterMixin, FromOriginalModelMixin
21+
from ...loaders import FromOriginalModelMixin, PeftAdapterMixin
2222
from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
2323
from ..attention_processor import (
2424
Attention,

0 commit comments

Comments
 (0)