Skip to content

Commit b972c4b

Browse files
committed
cleanup
1 parent cfe3921 commit b972c4b

File tree

2 files changed

+2
-20
lines changed

2 files changed

+2
-20
lines changed

src/diffusers/models/hooks.py

Lines changed: 1 addition & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -115,18 +115,9 @@ def reset_state(self, module):
115115

116116

117117
class PyramidAttentionBroadcastHook(ModelHook):
118-
def __init__(
119-
self,
120-
skip_callback: Callable[[torch.nn.Module], bool],
121-
# skip_range: int,
122-
# timestep_range: Tuple[int, int],
123-
# timestep_callback: Callable[[], Union[torch.LongTensor, int]],
124-
) -> None:
118+
def __init__(self, skip_callback: Callable[[torch.nn.Module], bool]) -> None:
125119
super().__init__()
126120

127-
# self.skip_range = skip_range
128-
# self.timestep_range = timestep_range
129-
# self.timestep_callback = timestep_callback
130121
self.skip_callback = skip_callback
131122

132123
self.cache = None
@@ -135,15 +126,6 @@ def __init__(
135126
def new_forward(self, module: torch.nn.Module, *args, **kwargs) -> Any:
136127
args, kwargs = module._diffusers_hook.pre_forward(module, *args, **kwargs)
137128

138-
# current_timestep = self.timestep_callback()
139-
# is_within_timestep_range = self.timestep_range[0] < current_timestep < self.timestep_range[1]
140-
# should_compute_attention = self._iteration % self.skip_range == 0
141-
142-
# if not is_within_timestep_range or should_compute_attention:
143-
# output = module._old_forward(*args, **kwargs)
144-
# else:
145-
# output = self.attention_cache
146-
147129
if self.cache is not None and self.skip_callback(module):
148130
output = self.cache
149131
else:

src/diffusers/pipelines/pyramid_attention_broadcast_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -251,7 +251,7 @@ def skip_callback(module: nnModulePAB) -> bool:
251251
pab_state.iteration += 1
252252
return not should_compute_attention
253253

254-
# We are still not yet in the phase of inference where skipping attention is possible without minimal quality
254+
# We are still not in the phase of inference where skipping attention is possible without minimal quality
255255
# loss, as described in the paper. So, the attention computation cannot be skipped
256256
return False
257257

0 commit comments

Comments
 (0)