Skip to content

Commit 2c9b7c9

Browse files
committed
more
1 parent 06d4fb5 commit 2c9b7c9

27 files changed

+26
-1035
lines changed

src/diffusers/models/autoencoders/autoencoder_kl.py

Lines changed: 1 addition & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14-
from typing import Dict, Optional, Tuple, Union
14+
from typing import Optional, Tuple, Union
1515

1616
import torch
1717
import torch.nn as nn
@@ -26,7 +26,6 @@
2626
ADDED_KV_ATTENTION_PROCESSORS,
2727
CROSS_ATTENTION_PROCESSORS,
2828
Attention,
29-
AttentionProcessor,
3029
AttnAddedKVProcessor,
3130
AttnProcessor,
3231
FusedAttnProcessor2_0,
@@ -168,41 +167,6 @@ def disable_slicing(self):
168167
"""
169168
self.use_slicing = False
170169

171-
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
172-
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
173-
r"""
174-
Sets the attention processor to use to compute attention.
175-
176-
Parameters:
177-
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
178-
The instantiated processor class or a dictionary of processor classes that will be set as the processor
179-
for **all** `Attention` layers.
180-
181-
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
182-
processor. This is strongly recommended when setting trainable attention processors.
183-
184-
"""
185-
count = len(self.attn_processors.keys())
186-
187-
if isinstance(processor, dict) and len(processor) != count:
188-
raise ValueError(
189-
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
190-
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
191-
)
192-
193-
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
194-
if hasattr(module, "set_processor"):
195-
if not isinstance(processor, dict):
196-
module.set_processor(processor)
197-
else:
198-
module.set_processor(processor.pop(f"{name}.processor"))
199-
200-
for sub_name, child in module.named_children():
201-
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
202-
203-
for name, module in self.named_children():
204-
fn_recursive_attn_processor(name, module, processor)
205-
206170
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
207171
def set_default_attn_processor(self):
208172
"""

src/diffusers/models/autoencoders/autoencoder_kl_temporal_decoder.py

Lines changed: 2 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -12,15 +12,15 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
import itertools
15-
from typing import Dict, Optional, Tuple, Union
15+
from typing import Optional, Tuple, Union
1616

1717
import torch
1818
import torch.nn as nn
1919

2020
from ...configuration_utils import ConfigMixin, register_to_config
2121
from ...utils.accelerate_utils import apply_forward_hook
2222
from ..attention import AttentionMixin
23-
from ..attention_processor import CROSS_ATTENTION_PROCESSORS, AttentionProcessor, AttnProcessor
23+
from ..attention_processor import CROSS_ATTENTION_PROCESSORS, AttnProcessor
2424
from ..modeling_outputs import AutoencoderKLOutput
2525
from ..modeling_utils import ModelMixin
2626
from ..unets.unet_3d_blocks import MidBlockTemporalDecoder, UpBlockTemporalDecoder
@@ -203,41 +203,6 @@ def __init__(
203203

204204
self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
205205

206-
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
207-
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
208-
r"""
209-
Sets the attention processor to use to compute attention.
210-
211-
Parameters:
212-
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
213-
The instantiated processor class or a dictionary of processor classes that will be set as the processor
214-
for **all** `Attention` layers.
215-
216-
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
217-
processor. This is strongly recommended when setting trainable attention processors.
218-
219-
"""
220-
count = len(self.attn_processors.keys())
221-
222-
if isinstance(processor, dict) and len(processor) != count:
223-
raise ValueError(
224-
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
225-
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
226-
)
227-
228-
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
229-
if hasattr(module, "set_processor"):
230-
if not isinstance(processor, dict):
231-
module.set_processor(processor)
232-
else:
233-
module.set_processor(processor.pop(f"{name}.processor"))
234-
235-
for sub_name, child in module.named_children():
236-
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
237-
238-
for name, module in self.named_children():
239-
fn_recursive_attn_processor(name, module, processor)
240-
241206
def set_default_attn_processor(self):
242207
"""
243208
Disables custom attention processors and sets the default attention implementation.

src/diffusers/models/autoencoders/consistency_decoder_vae.py

Lines changed: 1 addition & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
from dataclasses import dataclass
15-
from typing import Dict, Optional, Tuple, Union
15+
from typing import Optional, Tuple, Union
1616

1717
import torch
1818
import torch.nn.functional as F
@@ -27,7 +27,6 @@
2727
from ..attention_processor import (
2828
ADDED_KV_ATTENTION_PROCESSORS,
2929
CROSS_ATTENTION_PROCESSORS,
30-
AttentionProcessor,
3130
AttnAddedKVProcessor,
3231
AttnProcessor,
3332
)
@@ -201,41 +200,6 @@ def disable_slicing(self):
201200
"""
202201
self.use_slicing = False
203202

204-
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
205-
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
206-
r"""
207-
Sets the attention processor to use to compute attention.
208-
209-
Parameters:
210-
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
211-
The instantiated processor class or a dictionary of processor classes that will be set as the processor
212-
for **all** `Attention` layers.
213-
214-
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
215-
processor. This is strongly recommended when setting trainable attention processors.
216-
217-
"""
218-
count = len(self.attn_processors.keys())
219-
220-
if isinstance(processor, dict) and len(processor) != count:
221-
raise ValueError(
222-
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
223-
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
224-
)
225-
226-
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
227-
if hasattr(module, "set_processor"):
228-
if not isinstance(processor, dict):
229-
module.set_processor(processor)
230-
else:
231-
module.set_processor(processor.pop(f"{name}.processor"))
232-
233-
for sub_name, child in module.named_children():
234-
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
235-
236-
for name, module in self.named_children():
237-
fn_recursive_attn_processor(name, module, processor)
238-
239203
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
240204
def set_default_attn_processor(self):
241205
"""

src/diffusers/models/controlnets/controlnet.py

Lines changed: 0 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@
2525
from ..attention_processor import (
2626
ADDED_KV_ATTENTION_PROCESSORS,
2727
CROSS_ATTENTION_PROCESSORS,
28-
AttentionProcessor,
2928
AttnAddedKVProcessor,
3029
AttnProcessor,
3130
)
@@ -516,41 +515,6 @@ def from_unet(
516515

517516
return controlnet
518517

519-
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
520-
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
521-
r"""
522-
Sets the attention processor to use to compute attention.
523-
524-
Parameters:
525-
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
526-
The instantiated processor class or a dictionary of processor classes that will be set as the processor
527-
for **all** `Attention` layers.
528-
529-
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
530-
processor. This is strongly recommended when setting trainable attention processors.
531-
532-
"""
533-
count = len(self.attn_processors.keys())
534-
535-
if isinstance(processor, dict) and len(processor) != count:
536-
raise ValueError(
537-
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
538-
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
539-
)
540-
541-
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
542-
if hasattr(module, "set_processor"):
543-
if not isinstance(processor, dict):
544-
module.set_processor(processor)
545-
else:
546-
module.set_processor(processor.pop(f"{name}.processor"))
547-
548-
for sub_name, child in module.named_children():
549-
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
550-
551-
for name, module in self.named_children():
552-
fn_recursive_attn_processor(name, module, processor)
553-
554518
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
555519
def set_default_attn_processor(self):
556520
"""

src/diffusers/models/controlnets/controlnet_sana.py

Lines changed: 0 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222
from ...loaders import PeftAdapterMixin
2323
from ...utils import USE_PEFT_BACKEND, BaseOutput, logging, scale_lora_layers, unscale_lora_layers
2424
from ..attention import AttentionMixin
25-
from ..attention_processor import AttentionProcessor
2625
from ..embeddings import PatchEmbed, PixArtAlphaTextProjection
2726
from ..modeling_outputs import Transformer2DModelOutput
2827
from ..modeling_utils import ModelMixin
@@ -118,41 +117,6 @@ def __init__(
118117

119118
self.gradient_checkpointing = False
120119

121-
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
122-
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
123-
r"""
124-
Sets the attention processor to use to compute attention.
125-
126-
Parameters:
127-
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
128-
The instantiated processor class or a dictionary of processor classes that will be set as the processor
129-
for **all** `Attention` layers.
130-
131-
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
132-
processor. This is strongly recommended when setting trainable attention processors.
133-
134-
"""
135-
count = len(self.attn_processors.keys())
136-
137-
if isinstance(processor, dict) and len(processor) != count:
138-
raise ValueError(
139-
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
140-
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
141-
)
142-
143-
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
144-
if hasattr(module, "set_processor"):
145-
if not isinstance(processor, dict):
146-
module.set_processor(processor)
147-
else:
148-
module.set_processor(processor.pop(f"{name}.processor"))
149-
150-
for sub_name, child in module.named_children():
151-
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
152-
153-
for name, module in self.named_children():
154-
fn_recursive_attn_processor(name, module, processor)
155-
156120
def forward(
157121
self,
158122
hidden_states: torch.Tensor,

src/diffusers/models/controlnets/controlnet_sd3.py

Lines changed: 1 addition & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
from ...loaders import FromOriginalModelMixin, PeftAdapterMixin
2424
from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
2525
from ..attention import AttentionMixin, JointTransformerBlock
26-
from ..attention_processor import Attention, AttentionProcessor, FusedJointAttnProcessor2_0
26+
from ..attention_processor import Attention, FusedJointAttnProcessor2_0
2727
from ..embeddings import CombinedTimestepTextProjEmbeddings, PatchEmbed
2828
from ..modeling_outputs import Transformer2DModelOutput
2929
from ..modeling_utils import ModelMixin
@@ -204,41 +204,6 @@ def fn_recursive_feed_forward(module: torch.nn.Module, chunk_size: int, dim: int
204204
for module in self.children():
205205
fn_recursive_feed_forward(module, chunk_size, dim)
206206

207-
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
208-
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
209-
r"""
210-
Sets the attention processor to use to compute attention.
211-
212-
Parameters:
213-
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
214-
The instantiated processor class or a dictionary of processor classes that will be set as the processor
215-
for **all** `Attention` layers.
216-
217-
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
218-
processor. This is strongly recommended when setting trainable attention processors.
219-
220-
"""
221-
count = len(self.attn_processors.keys())
222-
223-
if isinstance(processor, dict) and len(processor) != count:
224-
raise ValueError(
225-
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
226-
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
227-
)
228-
229-
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
230-
if hasattr(module, "set_processor"):
231-
if not isinstance(processor, dict):
232-
module.set_processor(processor)
233-
else:
234-
module.set_processor(processor.pop(f"{name}.processor"))
235-
236-
for sub_name, child in module.named_children():
237-
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
238-
239-
for name, module in self.named_children():
240-
fn_recursive_attn_processor(name, module, processor)
241-
242207
# Copied from diffusers.models.transformers.transformer_sd3.SD3Transformer2DModel.fuse_qkv_projections
243208
def fuse_qkv_projections(self):
244209
"""

src/diffusers/models/controlnets/controlnet_sparsectrl.py

Lines changed: 0 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@
2626
from ..attention_processor import (
2727
ADDED_KV_ATTENTION_PROCESSORS,
2828
CROSS_ATTENTION_PROCESSORS,
29-
AttentionProcessor,
3029
AttnAddedKVProcessor,
3130
AttnProcessor,
3231
)
@@ -449,41 +448,6 @@ def from_unet(
449448

450449
return controlnet
451450

452-
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
453-
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
454-
r"""
455-
Sets the attention processor to use to compute attention.
456-
457-
Parameters:
458-
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
459-
The instantiated processor class or a dictionary of processor classes that will be set as the processor
460-
for **all** `Attention` layers.
461-
462-
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
463-
processor. This is strongly recommended when setting trainable attention processors.
464-
465-
"""
466-
count = len(self.attn_processors.keys())
467-
468-
if isinstance(processor, dict) and len(processor) != count:
469-
raise ValueError(
470-
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
471-
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
472-
)
473-
474-
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
475-
if hasattr(module, "set_processor"):
476-
if not isinstance(processor, dict):
477-
module.set_processor(processor)
478-
else:
479-
module.set_processor(processor.pop(f"{name}.processor"))
480-
481-
for sub_name, child in module.named_children():
482-
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
483-
484-
for name, module in self.named_children():
485-
fn_recursive_attn_processor(name, module, processor)
486-
487451
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
488452
def set_default_attn_processor(self):
489453
"""

0 commit comments

Comments
 (0)