Skip to content

Commit 432e526

Browse files
committed
Revert merge changes
1 parent 830740b commit 432e526

File tree

2 files changed

+1
-36
lines changed

2 files changed

+1
-36
lines changed

ldm/models/diffusion/cross_attention_control.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414

1515
from compel.cross_attention_control import Arguments
1616
from diffusers.models.unet_2d_condition import UNet2DConditionModel
17-
from diffusers.models.attention_processor import AttentionProcessor
1817
from ldm.invoke.devices import torch_dtype
1918

2019

@@ -288,13 +287,6 @@ def get_invokeai_attention_mem_efficient(self, q, k, v):
288287
return self.einsum_op_tensor_mem(q, k, v, 32)
289288

290289

291-
def restore_default_cross_attention(model, is_running_diffusers: bool, processors_to_restore: Optional[AttentionProcessor]=None):
292-
if is_running_diffusers:
293-
unet = model
294-
unet.set_attn_processor(processors_to_restore or AttnProcessor())
295-
else:
296-
remove_attention_function(model)
297-
298290
def setup_cross_attention_control_attention_processors(unet: UNet2DConditionModel, context: Context):
299291
"""
300292
Inject attention parameters and functions into the passed in model to enable cross attention editing.

ldm/models/diffusion/shared_invokeai_diffusion.py

Lines changed: 1 addition & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,10 @@
11
from contextlib import contextmanager
22
from dataclasses import dataclass
33
from math import ceil
4-
from typing import Callable, Optional, Union, Any, Dict
4+
from typing import Callable, Optional, Union, Any
55

66
import numpy as np
77
import torch
8-
from diffusers.models.attention_processor import AttentionProcessor
98
from diffusers import UNet2DConditionModel
109
from typing_extensions import TypeAlias
1110

@@ -123,32 +122,6 @@ def custom_attention_context(
123122
# TODO resuscitate attention map saving
124123
# self.remove_attention_map_saving()
125124

126-
def override_attention_processors(
127-
self, conditioning: ExtraConditioningInfo, step_count: int
128-
) -> Dict[str, AttentionProcessor]:
129-
"""
130-
setup cross attention .swap control. for diffusers this replaces the attention processor, so
131-
the previous attention processor is returned so that the caller can restore it later.
132-
"""
133-
old_attn_processors = self.model.attn_processors
134-
135-
# Load lora conditions into the model
136-
if conditioning.has_lora_conditions:
137-
for condition in conditioning.lora_conditions:
138-
condition(self.model)
139-
140-
if conditioning.wants_cross_attention_control:
141-
self.cross_attention_control_context = Context(
142-
arguments=conditioning.cross_attention_control_args,
143-
step_count=step_count,
144-
)
145-
override_cross_attention(
146-
self.model,
147-
self.cross_attention_control_context,
148-
is_running_diffusers=self.is_running_diffusers,
149-
)
150-
return old_attn_processors
151-
152125
def setup_attention_map_saving(self, saver: AttentionMapSaver):
153126
def callback(slice, dim, offset, slice_size, key):
154127
if dim is not None:

0 commit comments

Comments
 (0)