Skip to content

Commit 06d4fb5

Browse files
committed
remove attn_processors property
1 parent 693d8a3 commit 06d4fb5

25 files changed

+50
-660
lines changed

src/diffusers/models/autoencoders/autoencoder_kl.py

Lines changed: 2 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
from ...loaders.single_file_model import FromOriginalModelMixin
2222
from ...utils import deprecate
2323
from ...utils.accelerate_utils import apply_forward_hook
24+
from ..attention import AttentionMixin
2425
from ..attention_processor import (
2526
ADDED_KV_ATTENTION_PROCESSORS,
2627
CROSS_ATTENTION_PROCESSORS,
@@ -35,7 +36,7 @@
3536
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
3637

3738

38-
class AutoencoderKL(ModelMixin, ConfigMixin, FromOriginalModelMixin, PeftAdapterMixin):
39+
class AutoencoderKL(ModelMixin, AttentionMixin, ConfigMixin, FromOriginalModelMixin, PeftAdapterMixin):
3940
r"""
4041
A VAE model with KL loss for encoding images into latents and decoding latent representations into images.
4142
@@ -167,31 +168,6 @@ def disable_slicing(self):
167168
"""
168169
self.use_slicing = False
169170

170-
@property
171-
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
172-
def attn_processors(self) -> Dict[str, AttentionProcessor]:
173-
r"""
174-
Returns:
175-
`dict` of attention processors: A dictionary containing all attention processors used in the model with
176-
indexed by its weight name.
177-
"""
178-
# set recursively
179-
processors = {}
180-
181-
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
182-
if hasattr(module, "get_processor"):
183-
processors[f"{name}.processor"] = module.get_processor()
184-
185-
for sub_name, child in module.named_children():
186-
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
187-
188-
return processors
189-
190-
for name, module in self.named_children():
191-
fn_recursive_add_processors(name, module, processors)
192-
193-
return processors
194-
195171
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
196172
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
197173
r"""

src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py

Lines changed: 2 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919

2020
from ...configuration_utils import ConfigMixin, register_to_config
2121
from ...utils.accelerate_utils import apply_forward_hook
22+
from ..attention import AttentionMixin
2223
from ..attention_processor import CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnProcessor
2324
from ..modeling_outputs import AutoencoderKLOutput
2425
from ..modeling_utils import ModelMixin
@@ -135,7 +136,7 @@ def forward(
135136
return sample
136137

137138

138-
class AutoencoderKLTemporalDecoder(ModelMixin, ConfigMixin):
139+
class AutoencoderKLTemporalDecoder(ModelMixin, AttentionMixin, ConfigMixin):
139140
r"""
140141
A VAE model with KL loss for encoding images into latents and decoding latent representations into images.
141142
@@ -202,31 +203,6 @@ def __init__(
202203

203204
self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
204205

205-
@property
206-
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
207-
def attn_processors(self) -> Dict[str, AttentionProcessor]:
208-
r"""
209-
Returns:
210-
`dict` of attention processors: A dictionary containing all attention processors used in the model with
211-
indexed by its weight name.
212-
"""
213-
# set recursively
214-
processors = {}
215-
216-
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
217-
if hasattr(module, "get_processor"):
218-
processors[f"{name}.processor"] = module.get_processor()
219-
220-
for sub_name, child in module.named_children():
221-
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
222-
223-
return processors
224-
225-
for name, module in self.named_children():
226-
fn_recursive_add_processors(name, module, processors)
227-
228-
return processors
229-
230206
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
231207
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
232208
r"""

src/diffusers/models/autoencoders/consistency_decoder_vae.py

Lines changed: 2 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
from ...utils import BaseOutput
2424
from ...utils.accelerate_utils import apply_forward_hook
2525
from ...utils.torch_utils import randn_tensor
26+
from ..attention import AttentionMixin
2627
from ..attention_processor import (
2728
ADDED_KV_ATTENTION_PROCESSORS,
2829
CROSS_ATTENTION_PROCESSORS,
@@ -49,7 +50,7 @@ class ConsistencyDecoderVAEOutput(BaseOutput):
4950
latent_dist: "DiagonalGaussianDistribution"
5051

5152

52-
class ConsistencyDecoderVAE(ModelMixin, ConfigMixin):
53+
class ConsistencyDecoderVAE(ModelMixin, AttentionMixin, ConfigMixin):
5354
r"""
5455
The consistency decoder used with DALL-E 3.
5556
@@ -200,31 +201,6 @@ def disable_slicing(self):
200201
"""
201202
self.use_slicing = False
202203

203-
@property
204-
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
205-
def attn_processors(self) -> Dict[str, AttentionProcessor]:
206-
r"""
207-
Returns:
208-
`dict` of attention processors: A dictionary containing all attention processors used in the model with
209-
indexed by its weight name.
210-
"""
211-
# set recursively
212-
processors = {}
213-
214-
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
215-
if hasattr(module, "get_processor"):
216-
processors[f"{name}.processor"] = module.get_processor()
217-
218-
for sub_name, child in module.named_children():
219-
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
220-
221-
return processors
222-
223-
for name, module in self.named_children():
224-
fn_recursive_add_processors(name, module, processors)
225-
226-
return processors
227-
228204
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
229205
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
230206
r"""

src/diffusers/models/controlnets/controlnet.py

Lines changed: 2 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
from ...configuration_utils import ConfigMixin, register_to_config
2222
from ...loaders.single_file_model import FromOriginalModelMixin
2323
from ...utils import BaseOutput, logging
24+
from ..attention import AttentionMixin
2425
from ..attention_processor import (
2526
ADDED_KV_ATTENTION_PROCESSORS,
2627
CROSS_ATTENTION_PROCESSORS,
@@ -106,7 +107,7 @@ def forward(self, conditioning):
106107
return embedding
107108

108109

109-
class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalModelMixin):
110+
class ControlNetModel(ModelMixin, AttentionMixin, ConfigMixin, FromOriginalModelMixin):
110111
"""
111112
A ControlNet model.
112113
@@ -515,31 +516,6 @@ def from_unet(
515516

516517
return controlnet
517518

518-
@property
519-
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
520-
def attn_processors(self) -> Dict[str, AttentionProcessor]:
521-
r"""
522-
Returns:
523-
`dict` of attention processors: A dictionary containing all attention processors used in the model with
524-
indexed by its weight name.
525-
"""
526-
# set recursively
527-
processors = {}
528-
529-
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
530-
if hasattr(module, "get_processor"):
531-
processors[f"{name}.processor"] = module.get_processor()
532-
533-
for sub_name, child in module.named_children():
534-
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
535-
536-
return processors
537-
538-
for name, module in self.named_children():
539-
fn_recursive_add_processors(name, module, processors)
540-
541-
return processors
542-
543519
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
544520
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
545521
r"""

src/diffusers/models/controlnets/controlnet_sana.py

Lines changed: 2 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
from ...configuration_utils import ConfigMixin, register_to_config
2222
from ...loaders import PeftAdapterMixin
2323
from ...utils import USE_PEFT_BACKEND, BaseOutput, logging, scale_lora_layers, unscale_lora_layers
24+
from ..attention import AttentionMixin
2425
from ..attention_processor import AttentionProcessor
2526
from ..embeddings import PatchEmbed, PixArtAlphaTextProjection
2627
from ..modeling_outputs import Transformer2DModelOutput
@@ -38,7 +39,7 @@ class SanaControlNetOutput(BaseOutput):
3839
controlnet_block_samples: Tuple[torch.Tensor]
3940

4041

41-
class SanaControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin):
42+
class SanaControlNetModel(ModelMixin, AttentionMixin, ConfigMixin, PeftAdapterMixin):
4243
_supports_gradient_checkpointing = True
4344
_no_split_modules = ["SanaTransformerBlock", "PatchEmbed"]
4445
_skip_layerwise_casting_patterns = ["patch_embed", "norm"]
@@ -117,31 +118,6 @@ def __init__(
117118

118119
self.gradient_checkpointing = False
119120

120-
@property
121-
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
122-
def attn_processors(self) -> Dict[str, AttentionProcessor]:
123-
r"""
124-
Returns:
125-
`dict` of attention processors: A dictionary containing all attention processors used in the model with
126-
indexed by its weight name.
127-
"""
128-
# set recursively
129-
processors = {}
130-
131-
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
132-
if hasattr(module, "get_processor"):
133-
processors[f"{name}.processor"] = module.get_processor()
134-
135-
for sub_name, child in module.named_children():
136-
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
137-
138-
return processors
139-
140-
for name, module in self.named_children():
141-
fn_recursive_add_processors(name, module, processors)
142-
143-
return processors
144-
145121
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
146122
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
147123
r"""

src/diffusers/models/controlnets/controlnet_sd3.py

Lines changed: 2 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
from ...configuration_utils import ConfigMixin, register_to_config
2323
from ...loaders import FromOriginalModelMixin, PeftAdapterMixin
2424
from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
25-
from ..attention import JointTransformerBlock
25+
from ..attention import AttentionMixin, JointTransformerBlock
2626
from ..attention_processor import Attention, AttentionProcessor, FusedJointAttnProcessor2_0
2727
from ..embeddings import CombinedTimestepTextProjEmbeddings, PatchEmbed
2828
from ..modeling_outputs import Transformer2DModelOutput
@@ -39,7 +39,7 @@ class SD3ControlNetOutput(BaseOutput):
3939
controlnet_block_samples: Tuple[torch.Tensor]
4040

4141

42-
class SD3ControlNetModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin):
42+
class SD3ControlNetModel(ModelMixin, AttentionMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin):
4343
r"""
4444
ControlNet model for [Stable Diffusion 3](https://huggingface.co/papers/2403.03206).
4545
@@ -204,31 +204,6 @@ def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int
204204
for module in self.children():
205205
fn_recursive_feed_forward(module, chunk_size, dim)
206206

207-
@property
208-
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
209-
def attn_processors(self) -> Dict[str, AttentionProcessor]:
210-
r"""
211-
Returns:
212-
`dict` of attention processors: A dictionary containing all attention processors used in the model with
213-
indexed by its weight name.
214-
"""
215-
# set recursively
216-
processors = {}
217-
218-
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
219-
if hasattr(module, "get_processor"):
220-
processors[f"{name}.processor"] = module.get_processor()
221-
222-
for sub_name, child in module.named_children():
223-
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
224-
225-
return processors
226-
227-
for name, module in self.named_children():
228-
fn_recursive_add_processors(name, module, processors)
229-
230-
return processors
231-
232207
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
233208
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
234209
r"""

src/diffusers/models/controlnets/controlnet_sparsectrl.py

Lines changed: 2 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
from ...configuration_utils import ConfigMixin, register_to_config
2323
from ...loaders import FromOriginalModelMixin
2424
from ...utils import BaseOutput, logging
25+
from ..attention import AttentionMixin
2526
from ..attention_processor import (
2627
ADDED_KV_ATTENTION_PROCESSORS,
2728
CROSS_ATTENTION_PROCESSORS,
@@ -93,7 +94,7 @@ def forward(self, conditioning: torch.Tensor) -> torch.Tensor:
9394
return embedding
9495

9596

96-
class SparseControlNetModel(ModelMixin, ConfigMixin, FromOriginalModelMixin):
97+
class SparseControlNetModel(ModelMixin, AttentionMixin, ConfigMixin, FromOriginalModelMixin):
9798
"""
9899
A SparseControlNet model as described in [SparseCtrl: Adding Sparse Controls to Text-to-Video Diffusion
99100
Models](https://huggingface.co/papers/2311.16933).
@@ -448,31 +449,6 @@ def from_unet(
448449

449450
return controlnet
450451

451-
@property
452-
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
453-
def attn_processors(self) -> Dict[str, AttentionProcessor]:
454-
r"""
455-
Returns:
456-
`dict` of attention processors: A dictionary containing all attention processors used in the model with
457-
indexed by its weight name.
458-
"""
459-
# set recursively
460-
processors = {}
461-
462-
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
463-
if hasattr(module, "get_processor"):
464-
processors[f"{name}.processor"] = module.get_processor()
465-
466-
for sub_name, child in module.named_children():
467-
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
468-
469-
return processors
470-
471-
for name, module in self.named_children():
472-
fn_recursive_add_processors(name, module, processors)
473-
474-
return processors
475-
476452
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
477453
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
478454
r"""

src/diffusers/models/controlnets/controlnet_union.py

Lines changed: 2 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
from ...configuration_utils import ConfigMixin, register_to_config
2020
from ...loaders.single_file_model import FromOriginalModelMixin
2121
from ...utils import logging
22+
from ..attention import AttentionMixin
2223
from ..attention_processor import (
2324
ADDED_KV_ATTENTION_PROCESSORS,
2425
CROSS_ATTENTION_PROCESSORS,
@@ -81,7 +82,7 @@ def forward(self, x: torch.Tensor):
8182
return x
8283

8384

84-
class ControlNetUnionModel(ModelMixin, ConfigMixin, FromOriginalModelMixin):
85+
class ControlNetUnionModel(ModelMixin, AttentionMixin, ConfigMixin, FromOriginalModelMixin):
8586
"""
8687
A ControlNetUnion model.
8788
@@ -455,31 +456,6 @@ def from_unet(
455456

456457
return controlnet
457458

458-
@property
459-
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
460-
def attn_processors(self) -> Dict[str, AttentionProcessor]:
461-
r"""
462-
Returns:
463-
`dict` of attention processors: A dictionary containing all attention processors used in the model with
464-
indexed by its weight name.
465-
"""
466-
# set recursively
467-
processors = {}
468-
469-
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
470-
if hasattr(module, "get_processor"):
471-
processors[f"{name}.processor"] = module.get_processor()
472-
473-
for sub_name, child in module.named_children():
474-
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
475-
476-
return processors
477-
478-
for name, module in self.named_children():
479-
fn_recursive_add_processors(name, module, processors)
480-
481-
return processors
482-
483459
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
484460
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
485461
r"""

0 commit comments

Comments
 (0)