Skip to content

Commit 65decb6

Browse files
committed
begin transformer conversion
1 parent 4f3ec53 commit 65decb6

File tree

2 files changed

+392
-2
lines changed

2 files changed

+392
-2
lines changed

src/diffusers/models/attention_processor.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -203,8 +203,8 @@ def __init__(
203203
self.norm_q = nn.LayerNorm(dim_head * heads, eps=eps)
204204
self.norm_k = nn.LayerNorm(dim_head * kv_heads, eps=eps)
205205
elif qk_norm == "rms_norm":
206-
self.norm_q = RMSNorm(dim_head, eps=eps)
207-
self.norm_k = RMSNorm(dim_head, eps=eps)
206+
self.norm_q = RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine)
207+
self.norm_k = RMSNorm(dim_head, eps=eps, elementwise_affine=elementwise_affine)
208208
elif qk_norm == "rms_norm_across_heads":
209209
# LTX applies qk norm across all heads
210210
self.norm_q = RMSNorm(dim_head * heads, eps=eps)

0 commit comments

Comments
 (0)