Skip to content

Commit 86a1290

Browse files
committed
update
1 parent 57f374b commit 86a1290

26 files changed

+32
-27
lines changed

src/diffusers/models/attention.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020

2121
from ..utils import logging
2222
from ..utils.import_utils import is_torch_npu_available, is_torch_xla_available, is_xformers_available
23-
from .attention_processor import Attention, AttentionProcessor
23+
from .attention_processor import Attention, AttentionProcessor # noqa
2424

2525

2626
if is_xformers_available():

src/diffusers/models/transformers/cogvideox_transformer_3d.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,18 @@
2222
from ...loaders import PeftAdapterMixin
2323
from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
2424
from ...utils.torch_utils import maybe_allow_in_graph
25-
from ..attention_processor import Attention, AttentionProcessor, CogVideoXAttnProcessor2_0, FusedCogVideoXAttnProcessor2_0
26-
from .modeling_common import FeedForward
25+
from ..attention_processor import (
26+
Attention,
27+
AttentionProcessor,
28+
CogVideoXAttnProcessor2_0,
29+
FusedCogVideoXAttnProcessor2_0,
30+
)
2731
from ..cache_utils import CacheMixin
2832
from ..embeddings import CogVideoXPatchEmbed, TimestepEmbedding, Timesteps
2933
from ..modeling_outputs import Transformer2DModelOutput
3034
from ..modeling_utils import ModelMixin
3135
from ..normalization import AdaLayerNorm, CogVideoXLayerNormZero
36+
from .modeling_common import FeedForward
3237

3338

3439
logger = logging.get_logger(__name__) # pylint: disable=invalid-name

src/diffusers/models/transformers/consisid_transformer_3d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,11 @@
2323
from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
2424
from ...utils.torch_utils import maybe_allow_in_graph
2525
from ..attention_processor import Attention, AttentionProcessor, CogVideoXAttnProcessor2_0
26-
from .modeling_common import FeedForward
2726
from ..embeddings import CogVideoXPatchEmbed, TimestepEmbedding, Timesteps
2827
from ..modeling_outputs import Transformer2DModelOutput
2928
from ..modeling_utils import ModelMixin
3029
from ..normalization import AdaLayerNorm, CogVideoXLayerNormZero
30+
from .modeling_common import FeedForward
3131

3232

3333
logger = logging.get_logger(__name__) # pylint: disable=invalid-name

src/diffusers/models/transformers/hunyuan_transformer_2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
from ...configuration_utils import ConfigMixin, register_to_config
2020
from ...utils import logging
2121
from ...utils.torch_utils import maybe_allow_in_graph
22-
from .modeling_common import FeedForward
2322
from ..attention_processor import Attention, AttentionProcessor, FusedHunyuanAttnProcessor2_0, HunyuanAttnProcessor2_0
2423
from ..embeddings import (
2524
HunyuanCombinedTimestepTextSizeStyleEmbedding,
@@ -29,6 +28,7 @@
2928
from ..modeling_outputs import Transformer2DModelOutput
3029
from ..modeling_utils import ModelMixin
3130
from ..normalization import AdaLayerNormContinuous, FP32LayerNorm
31+
from .modeling_common import FeedForward
3232

3333

3434
logger = logging.get_logger(__name__) # pylint: disable=invalid-name

src/diffusers/models/transformers/latte_transformer_3d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,12 +18,12 @@
1818
from torch import nn
1919

2020
from ...configuration_utils import ConfigMixin, register_to_config
21-
from .modeling_common import BasicTransformerBlock
2221
from ..cache_utils import CacheMixin
2322
from ..embeddings import PatchEmbed, PixArtAlphaTextProjection, get_1d_sincos_pos_embed_from_grid
2423
from ..modeling_outputs import Transformer2DModelOutput
2524
from ..modeling_utils import ModelMixin
2625
from ..normalization import AdaLayerNormSingle
26+
from .modeling_common import BasicTransformerBlock
2727

2828

2929
class LatteTransformer3DModel(ModelMixin, ConfigMixin, CacheMixin):

src/diffusers/models/transformers/lumina_nextdit2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919

2020
from ...configuration_utils import ConfigMixin, register_to_config
2121
from ...utils import logging
22-
from .modeling_common import LuminaFeedForward
2322
from ..attention_processor import Attention, LuminaAttnProcessor2_0
2423
from ..embeddings import (
2524
LuminaCombinedTimestepCaptionEmbedding,
@@ -28,6 +27,7 @@
2827
from ..modeling_outputs import Transformer2DModelOutput
2928
from ..modeling_utils import ModelMixin
3029
from ..normalization import LuminaLayerNormContinuous, LuminaRMSNormZero, RMSNorm
30+
from .modeling_common import LuminaFeedForward
3131

3232

3333
logger = logging.get_logger(__name__) # pylint: disable=invalid-name

src/diffusers/models/transformers/pixart_transformer_2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,12 +18,12 @@
1818

1919
from ...configuration_utils import ConfigMixin, register_to_config
2020
from ...utils import logging
21-
from .modeling_common import BasicTransformerBlock
2221
from ..attention_processor import Attention, AttentionProcessor, AttnProcessor, FusedAttnProcessor2_0
2322
from ..embeddings import PatchEmbed, PixArtAlphaTextProjection
2423
from ..modeling_outputs import Transformer2DModelOutput
2524
from ..modeling_utils import ModelMixin
2625
from ..normalization import AdaLayerNormSingle
26+
from .modeling_common import BasicTransformerBlock
2727

2828

2929
logger = logging.get_logger(__name__) # pylint: disable=invalid-name

src/diffusers/models/transformers/prior_transformer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
from ...configuration_utils import ConfigMixin, register_to_config
99
from ...loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin
1010
from ...utils import BaseOutput
11-
from .modeling_common import BasicTransformerBlock
1211
from ..attention_processor import (
1312
ADDED_KV_ATTENTION_PROCESSORS,
1413
CROSS_ATTENTION_PROCESSORS,
@@ -18,6 +17,7 @@
1817
)
1918
from ..embeddings import TimestepEmbedding, Timesteps
2019
from ..modeling_utils import ModelMixin
20+
from .modeling_common import BasicTransformerBlock
2121

2222

2323
@dataclass

src/diffusers/models/transformers/stable_audio_transformer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,10 @@
2323
from ...configuration_utils import ConfigMixin, register_to_config
2424
from ...utils import logging
2525
from ...utils.torch_utils import maybe_allow_in_graph
26-
from .modeling_common import FeedForward
2726
from ..attention_processor import Attention, AttentionProcessor, StableAudioAttnProcessor2_0
2827
from ..modeling_utils import ModelMixin
2928
from ..transformers.transformer_2d import Transformer2DModelOutput
29+
from .modeling_common import FeedForward
3030

3131

3232
logger = logging.get_logger(__name__) # pylint: disable=invalid-name

src/diffusers/models/transformers/transformer_allegro.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,13 @@
2222
from ...configuration_utils import ConfigMixin, register_to_config
2323
from ...utils import logging
2424
from ...utils.torch_utils import maybe_allow_in_graph
25-
from .modeling_common import FeedForward
2625
from ..attention_processor import AllegroAttnProcessor2_0, Attention
2726
from ..cache_utils import CacheMixin
2827
from ..embeddings import PatchEmbed, PixArtAlphaTextProjection
2928
from ..modeling_outputs import Transformer2DModelOutput
3029
from ..modeling_utils import ModelMixin
3130
from ..normalization import AdaLayerNormSingle
31+
from .modeling_common import FeedForward
3232

3333

3434
logger = logging.get_logger(__name__)

0 commit comments

Comments
 (0)