You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Copy file name to clipboardExpand all lines: native_sparse_attention_pytorch/native_sparse_attention.py
+10-7Lines changed: 10 additions & 7 deletions
Original file line number
Diff line number
Diff line change
@@ -115,7 +115,8 @@ def __init__(
115
115
window_size=sliding_window_size,
116
116
causal=True,
117
117
exact_windowsize=True,
118
-
autopad=True
118
+
autopad=True,
119
+
use_rotary_pos_emb=False
119
120
)
120
121
121
122
self.sliding_window_size=sliding_window_size
@@ -234,6 +235,10 @@ def forward(
234
235
235
236
compressed_attn_out=einsum(cattn, cv, 'b h i j, b h j d -> b h i d')
236
237
238
+
# for 2. and 3., will give them relative positions with rotary - compressed needs to be handled separately (even if they already have intra block absolute positions)
0 commit comments