|
1 | 1 | from contextlib import contextmanager |
2 | 2 | from dataclasses import dataclass |
3 | 3 | from math import ceil |
4 | | -from typing import Callable, Optional, Union, Any, Dict |
| 4 | +from typing import Callable, Optional, Union, Any |
5 | 5 |
|
6 | 6 | import numpy as np |
7 | 7 | import torch |
8 | | -from diffusers.models.attention_processor import AttentionProcessor |
9 | 8 | from diffusers import UNet2DConditionModel |
10 | 9 | from typing_extensions import TypeAlias |
11 | 10 |
|
@@ -123,32 +122,6 @@ def custom_attention_context( |
123 | 122 | # TODO resuscitate attention map saving |
124 | 123 | # self.remove_attention_map_saving() |
125 | 124 |
|
126 | | - def override_attention_processors( |
127 | | - self, conditioning: ExtraConditioningInfo, step_count: int |
128 | | - ) -> Dict[str, AttentionProcessor]: |
129 | | - """ |
130 | | - setup cross attention .swap control. for diffusers this replaces the attention processor, so |
131 | | - the previous attention processor is returned so that the caller can restore it later. |
132 | | - """ |
133 | | - old_attn_processors = self.model.attn_processors |
134 | | - |
135 | | - # Load lora conditions into the model |
136 | | - if conditioning.has_lora_conditions: |
137 | | - for condition in conditioning.lora_conditions: |
138 | | - condition(self.model) |
139 | | - |
140 | | - if conditioning.wants_cross_attention_control: |
141 | | - self.cross_attention_control_context = Context( |
142 | | - arguments=conditioning.cross_attention_control_args, |
143 | | - step_count=step_count, |
144 | | - ) |
145 | | - override_cross_attention( |
146 | | - self.model, |
147 | | - self.cross_attention_control_context, |
148 | | - is_running_diffusers=self.is_running_diffusers, |
149 | | - ) |
150 | | - return old_attn_processors |
151 | | - |
152 | 125 | def setup_attention_map_saving(self, saver: AttentionMapSaver): |
153 | 126 | def callback(slice, dim, offset, slice_size, key): |
154 | 127 | if dim is not None: |
|
0 commit comments