Skip to content

Commit fd18f9a

Browse files
committed
make style
1 parent 892b70d commit fd18f9a

File tree

8 files changed

+361
-281
lines changed

8 files changed

+361
-281
lines changed

src/diffusers/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@
7777
else:
7878
_import_structure["models"].extend(
7979
[
80+
"AllegroTransformer3DModel",
8081
"AsymmetricAutoencoderKL",
8182
"AuraFlowTransformer2DModel",
8283
"AutoencoderKL",
@@ -85,7 +86,6 @@
8586
"AutoencoderKLTemporalDecoder",
8687
"AutoencoderOobleck",
8788
"AutoencoderTiny",
88-
"AllegroTransformer3DModel",
8989
"CogVideoXTransformer3DModel",
9090
"CogView3PlusTransformer2DModel",
9191
"ConsistencyDecoderVAE",
@@ -559,9 +559,9 @@
559559
from .utils.dummy_pt_objects import * # noqa F403
560560
else:
561561
from .models import (
562+
AllegroTransformer3DModel,
562563
AsymmetricAutoencoderKL,
563564
AuraFlowTransformer2DModel,
564-
AllegroTransformer3DModel,
565565
AutoencoderKL,
566566
AutoencoderKLAllegro,
567567
AutoencoderKLCogVideoX,

src/diffusers/models/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,8 +100,8 @@
100100
from .embeddings import ImageProjection
101101
from .modeling_utils import ModelMixin
102102
from .transformers import (
103-
AuraFlowTransformer2DModel,
104103
AllegroTransformer3DModel,
104+
AuraFlowTransformer2DModel,
105105
CogVideoXTransformer3DModel,
106106
CogView3PlusTransformer2DModel,
107107
DiTTransformer2DModel,

src/diffusers/models/attention_processor.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1514,7 +1514,9 @@ class AllegroAttnProcessor2_0:
15141514

15151515
def __init__(self):
15161516
if not hasattr(F, "scaled_dot_product_attention"):
1517-
raise ImportError("AllegroAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
1517+
raise ImportError(
1518+
"AllegroAttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0."
1519+
)
15181520

15191521
def __call__(
15201522
self,
@@ -1569,7 +1571,7 @@ def __call__(
15691571
# Apply RoPE if needed
15701572
if image_rotary_emb is not None and not attn.is_cross_attention:
15711573
from .embeddings import apply_rotary_emb_allegro
1572-
1574+
15731575
query = apply_rotary_emb_allegro(query, image_rotary_emb[0], image_rotary_emb[1])
15741576
key = apply_rotary_emb_allegro(key, image_rotary_emb[0], image_rotary_emb[1])
15751577

0 commit comments

Comments
 (0)