Skip to content

Commit c012faf

Browse files
Pei ZhangPei Zhang
authored andcommitted
format fix with ruff cmd
1 parent dbe4725 commit c012faf

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

src/diffusers/models/attention_processor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2873,7 +2873,7 @@ def __call__(
28732873
partition_spec = self.partition_spec if is_spmd() else None
28742874
hidden_states = flash_attention(query, key, value, causal=False, partition_spec=partition_spec)
28752875
else:
2876-
logger.warning(f"Unable to use the flash attention pallas kernel API call due to QKV sequence length < 4096.")
2876+
logger.warning("Unable to use the flash attention pallas kernel API call due to QKV sequence length < 4096.")
28772877
hidden_states = F.scaled_dot_product_attention(
28782878
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
28792879
)

0 commit comments

Comments
 (0)