Skip to content

Commit 90ce00f

Browse files
committed
Update layer_norm for norm_added_q and norm_added_k in Attention
1 parent c3eebb2 commit 90ce00f

File tree

1 file changed

+4
-1
lines changed

1 file changed

+4
-1
lines changed

src/diffusers/models/attention_processor.py

100644100755
Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -272,7 +272,10 @@ def __init__(
272272
self.to_add_out = None
273273

274274
if qk_norm is not None and added_kv_proj_dim is not None:
275-
if qk_norm == "fp32_layer_norm":
275+
if qk_norm == "layer_norm":
276+
self.norm_added_q = nn.LayerNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine)
277+
self.norm_added_k = nn.LayerNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine)
278+
elif qk_norm == "fp32_layer_norm":
276279
self.norm_added_q = FP32LayerNorm(dim_head, elementwise_affine=False, bias=False, eps=eps)
277280
self.norm_added_k = FP32LayerNorm(dim_head, elementwise_affine=False, bias=False, eps=eps)
278281
elif qk_norm == "rms_norm":

0 commit comments

Comments
 (0)