Skip to content

Commit 57f374b

Browse files
committed
update
1 parent 3b2e85d commit 57f374b

27 files changed

+36
-29
lines changed

src/diffusers/models/attention.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020

2121
from ..utils import logging
2222
from ..utils.import_utils import is_torch_npu_available, is_torch_xla_available, is_xformers_available
23-
from .attention_processor import AttentionProcessor
23+
from .attention_processor import Attention, AttentionProcessor
2424

2525

2626
if is_xformers_available():

src/diffusers/models/transformers/cogvideox_transformer_3d.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@
2222
from ...loaders import PeftAdapterMixin
2323
from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
2424
from ...utils.torch_utils import maybe_allow_in_graph
25-
from ..attention import Attention, FeedForward
26-
from ..attention_processor import AttentionProcessor, CogVideoXAttnProcessor2_0, FusedCogVideoXAttnProcessor2_0
25+
from ..attention_processor import Attention, AttentionProcessor, CogVideoXAttnProcessor2_0, FusedCogVideoXAttnProcessor2_0
26+
from .modeling_common import FeedForward
2727
from ..cache_utils import CacheMixin
2828
from ..embeddings import CogVideoXPatchEmbed, TimestepEmbedding, Timesteps
2929
from ..modeling_outputs import Transformer2DModelOutput

src/diffusers/models/transformers/consisid_transformer_3d.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@
2222
from ...loaders import PeftAdapterMixin
2323
from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
2424
from ...utils.torch_utils import maybe_allow_in_graph
25-
from ..attention import Attention, FeedForward
26-
from ..attention_processor import AttentionProcessor, CogVideoXAttnProcessor2_0
25+
from ..attention_processor import Attention, AttentionProcessor, CogVideoXAttnProcessor2_0
26+
from .modeling_common import FeedForward
2727
from ..embeddings import CogVideoXPatchEmbed, TimestepEmbedding, Timesteps
2828
from ..modeling_outputs import Transformer2DModelOutput
2929
from ..modeling_utils import ModelMixin

src/diffusers/models/transformers/hunyuan_transformer_2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
from ...configuration_utils import ConfigMixin, register_to_config
2020
from ...utils import logging
2121
from ...utils.torch_utils import maybe_allow_in_graph
22-
from ..attention import FeedForward
22+
from .modeling_common import FeedForward
2323
from ..attention_processor import Attention, AttentionProcessor, FusedHunyuanAttnProcessor2_0, HunyuanAttnProcessor2_0
2424
from ..embeddings import (
2525
HunyuanCombinedTimestepTextSizeStyleEmbedding,

src/diffusers/models/transformers/latte_transformer_3d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
from torch import nn
1919

2020
from ...configuration_utils import ConfigMixin, register_to_config
21-
from ..attention import BasicTransformerBlock
21+
from .modeling_common import BasicTransformerBlock
2222
from ..cache_utils import CacheMixin
2323
from ..embeddings import PatchEmbed, PixArtAlphaTextProjection, get_1d_sincos_pos_embed_from_grid
2424
from ..modeling_outputs import Transformer2DModelOutput

src/diffusers/models/transformers/lumina_nextdit2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919

2020
from ...configuration_utils import ConfigMixin, register_to_config
2121
from ...utils import logging
22-
from ..attention import LuminaFeedForward
22+
from .modeling_common import LuminaFeedForward
2323
from ..attention_processor import Attention, LuminaAttnProcessor2_0
2424
from ..embeddings import (
2525
LuminaCombinedTimestepCaptionEmbedding,

src/diffusers/models/transformers/pixart_transformer_2d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818

1919
from ...configuration_utils import ConfigMixin, register_to_config
2020
from ...utils import logging
21-
from ..attention import BasicTransformerBlock
21+
from .modeling_common import BasicTransformerBlock
2222
from ..attention_processor import Attention, AttentionProcessor, AttnProcessor, FusedAttnProcessor2_0
2323
from ..embeddings import PatchEmbed, PixArtAlphaTextProjection
2424
from ..modeling_outputs import Transformer2DModelOutput

src/diffusers/models/transformers/prior_transformer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from ...configuration_utils import ConfigMixin, register_to_config
99
from ...loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin
1010
from ...utils import BaseOutput
11-
from ..attention import BasicTransformerBlock
11+
from .modeling_common import BasicTransformerBlock
1212
from ..attention_processor import (
1313
ADDED_KV_ATTENTION_PROCESSORS,
1414
CROSS_ATTENTION_PROCESSORS,

src/diffusers/models/transformers/stable_audio_transformer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
from ...configuration_utils import ConfigMixin, register_to_config
2424
from ...utils import logging
2525
from ...utils.torch_utils import maybe_allow_in_graph
26-
from ..attention import FeedForward
26+
from .modeling_common import FeedForward
2727
from ..attention_processor import Attention, AttentionProcessor, StableAudioAttnProcessor2_0
2828
from ..modeling_utils import ModelMixin
2929
from ..transformers.transformer_2d import Transformer2DModelOutput

src/diffusers/models/transformers/transformer_allegro.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
from ...configuration_utils import ConfigMixin, register_to_config
2323
from ...utils import logging
2424
from ...utils.torch_utils import maybe_allow_in_graph
25-
from ..attention import FeedForward
25+
from .modeling_common import FeedForward
2626
from ..attention_processor import AllegroAttnProcessor2_0, Attention
2727
from ..cache_utils import CacheMixin
2828
from ..embeddings import PatchEmbed, PixArtAlphaTextProjection

0 commit comments

Comments
 (0)