Skip to content

Commit 896fb6d

Browse files
authored
Fix duplicate variable assignments in SD3's JointAttnProcessor (#8516)
* Fix duplicate variable assignments. * Fix duplicate variable assignments.
1 parent 7f51f28 commit 896fb6d

File tree

2 files changed

+2
-4
lines changed

2 files changed

+2
-4
lines changed

src/diffusers/models/attention_processor.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1132,9 +1132,7 @@ def __call__(
11321132
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
11331133
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
11341134

1135-
hidden_states = hidden_states = F.scaled_dot_product_attention(
1136-
query, key, value, dropout_p=0.0, is_causal=False
1137-
)
1135+
hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False)
11381136
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
11391137
hidden_states = hidden_states.to(query.dtype)
11401138

@@ -1406,7 +1404,6 @@ def __call__(
14061404

14071405

14081406
class AttnProcessorNPU:
1409-
14101407
r"""
14111408
Processor for implementing flash attention using torch_npu. Torch_npu supports only fp16 and bf16 data types. If
14121409
fp32 is used, F.scaled_dot_product_attention will be used for computation, but the acceleration effect on NPU is

utils/update_metadata.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
Script modified from:
2525
https://github.com/huggingface/transformers/blob/main/utils/update_metadata.py
2626
"""
27+
2728
import argparse
2829
import os
2930
import tempfile

0 commit comments

Comments
 (0)