Skip to content

Commit ea4cfc5

Browse files
committed
make style && make quality
1 parent 23f780f commit ea4cfc5

File tree

7 files changed

+30
-26
lines changed

7 files changed

+30
-26
lines changed

scripts/convert_sana_pag_to_diffusers.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,9 @@
77

88
import torch
99
from accelerate import init_empty_weights
10+
from termcolor import colored
11+
from transformers import AutoModelForCausalLM, AutoTokenizer
12+
1013
from diffusers import (
1114
DCAE,
1215
DPMSolverMultistepScheduler,
@@ -16,8 +19,7 @@
1619
)
1720
from diffusers.models.modeling_utils import load_model_dict_into_meta
1821
from diffusers.utils.import_utils import is_accelerate_available
19-
from termcolor import colored
20-
from transformers import AutoModelForCausalLM, AutoTokenizer
22+
2123

2224
CTX = init_empty_weights if is_accelerate_available else nullcontext
2325

@@ -203,7 +205,7 @@ def main(args):
203205
# Scheduler
204206
if args.scheduler_type == "flow-dpm_solver":
205207
scheduler = DPMSolverMultistepScheduler(
206-
flow_shift=flow_shift,
208+
flow_shift=flow_shift,
207209
use_flow_sigmas=True,
208210
prediction_type="flow_prediction",
209211
)

scripts/convert_sana_to_diffusers.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,9 @@
77

88
import torch
99
from accelerate import init_empty_weights
10+
from termcolor import colored
11+
from transformers import AutoModelForCausalLM, AutoTokenizer
12+
1013
from diffusers import (
1114
DCAE,
1215
DPMSolverMultistepScheduler,
@@ -16,8 +19,7 @@
1619
)
1720
from diffusers.models.modeling_utils import load_model_dict_into_meta
1821
from diffusers.utils.import_utils import is_accelerate_available
19-
from termcolor import colored
20-
from transformers import AutoModelForCausalLM, AutoTokenizer
22+
2123

2224
CTX = init_empty_weights if is_accelerate_available else nullcontext
2325

@@ -203,7 +205,7 @@ def main(args):
203205
# Scheduler
204206
if args.scheduler_type == "flow-dpm_solver":
205207
scheduler = DPMSolverMultistepScheduler(
206-
flow_shift=flow_shift,
208+
flow_shift=flow_shift,
207209
use_flow_sigmas=True,
208210
prediction_type="flow_prediction",
209211
)

src/diffusers/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -803,9 +803,9 @@
803803
PixArtAlphaPipeline,
804804
PixArtSigmaPAGPipeline,
805805
PixArtSigmaPipeline,
806-
SanaPipeline,
807-
SanaPAGPipeline,
808806
ReduxImageEncoder,
807+
SanaPAGPipeline,
808+
SanaPipeline,
809809
SemanticStableDiffusionPipeline,
810810
ShapEImg2ImgPipeline,
811811
ShapEPipeline,

src/diffusers/models/attention.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,14 @@
2222
from .activations import GEGLU, GELU, ApproximateGELU, FP32SiLU, SwiGLU, get_activation
2323
from .attention_processor import Attention, JointAttnProcessor2_0
2424
from .embeddings import SinusoidalPositionalEmbedding
25-
from .normalization import AdaLayerNorm, AdaLayerNormContinuous, AdaLayerNormZero, RMSNorm, SD35AdaLayerNormZeroX, RMSNorm2d
25+
from .normalization import (
26+
AdaLayerNorm,
27+
AdaLayerNormContinuous,
28+
AdaLayerNormZero,
29+
RMSNorm,
30+
RMSNorm2d,
31+
SD35AdaLayerNormZeroX,
32+
)
2633

2734

2835
logger = logging.get_logger(__name__)
@@ -1267,7 +1274,7 @@ def __init__(
12671274
total_dim = heads * dim
12681275

12691276
self.dim = dim
1270-
1277+
12711278
qkv = [nn.Conv2d(in_channels=in_channels, out_channels=3 * total_dim, kernel_size=1, bias=use_bias[0])]
12721279
if norm[0] is None:
12731280
pass
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
2+
from .autoencoder_dc import DCAE
23
from .autoencoder_kl import AutoencoderKL
34
from .autoencoder_kl_allegro import AutoencoderKLAllegro
45
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
@@ -7,5 +8,4 @@
78
from .autoencoder_oobleck import AutoencoderOobleck
89
from .autoencoder_tiny import AutoencoderTiny
910
from .consistency_decoder_vae import ConsistencyDecoderVAE
10-
from .autoencoder_dc import DCAE
1111
from .vq_model import VQModel

src/diffusers/models/autoencoders/autoencoder_dc.py

Lines changed: 8 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -13,25 +13,19 @@
1313
# See the License for the specific language governing permissions and
1414
# limitations under the License.
1515

16-
from typing import Any, Optional, Callable, Union
17-
from collections import OrderedDict
16+
from typing import Optional, Union
1817

1918
import torch
2019
import torch.nn as nn
21-
import torch.nn.functional as F
2220
from torch.nn import BatchNorm2d
23-
from huggingface_hub import PyTorchModelHubMixin
24-
import ipdb
2521

2622
from ...configuration_utils import ConfigMixin, register_to_config
27-
from ..modeling_utils import ModelMixin
28-
2923
from ..activations import get_activation
30-
from ..normalization import RMSNorm2d
31-
from ..downsampling import ConvPixelUnshuffleDownsample2D, PixelUnshuffleChannelAveragingDownsample2D
32-
from ..upsampling import ConvPixelShuffleUpsample2D, ChannelDuplicatingPixelUnshuffleUpsample2D, Upsample2D
3324
from ..attention import DCAELiteMLA
34-
25+
from ..downsampling import ConvPixelUnshuffleDownsample2D, PixelUnshuffleChannelAveragingDownsample2D
26+
from ..modeling_utils import ModelMixin
27+
from ..normalization import RMSNorm2d
28+
from ..upsampling import ChannelDuplicatingPixelUnshuffleUpsample2D, ConvPixelShuffleUpsample2D, Upsample2D
3529
from .vae import DecoderOutput
3630

3731

@@ -267,7 +261,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
267261

268262
class Encoder(nn.Module):
269263
def __init__(
270-
self,
264+
self,
271265
in_channels: int,
272266
latent_channels: int,
273267
width_list: list[int] = [128, 256, 512, 512, 1024, 1024],
@@ -291,7 +285,7 @@ def __init__(
291285
raise ValueError(f"len(depth_list) {len(depth_list)} and len(width_list) {len(width_list)} should be equal to num_stages {num_stages}")
292286
if not isinstance(block_type, (str, list)) or (isinstance(block_type, list) and len(block_type) != num_stages):
293287
raise ValueError(f"block_type should be either a str or a list of str with length {num_stages}, but got {block_type}")
294-
288+
295289
# project in
296290
if depth_list[0] > 0:
297291
project_in_block = nn.Conv2d(
@@ -422,7 +416,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
422416

423417
class Decoder(nn.Module):
424418
def __init__(
425-
self,
419+
self,
426420
in_channels: int,
427421
latent_channels: int,
428422
in_shortcut: Optional[str] = "duplicating",

src/diffusers/pipelines/pag/pipeline_pag_sana.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@
3636
from ...utils.torch_utils import randn_tensor
3737
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
3838
from ..pixart_alpha.pipeline_pixart_alpha import (
39-
ASPECT_RATIO_256_BIN,
4039
ASPECT_RATIO_512_BIN,
4140
ASPECT_RATIO_1024_BIN,
4241
)

0 commit comments

Comments
 (0)