@@ -82,11 +82,12 @@ static __global__ void flash_attn_ext_f16(
8282 const int sequence = blockIdx .z / ne02;
8383 const int head = blockIdx .z - sequence*ne02;
8484 const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix.
85- const float * Q_f = (const float *) (Q + nb03* sequence + nb02* head + nb01*ic0);
86- const half * K_h = (const half *) (K + nb13* sequence + nb12*(head / gqa_ratio));
87- const half * V_h = (const half *) (V + nb13* sequence + nb12*(head / gqa_ratio)); // K and V have same shape
88- const half * maskh = (const half *) (mask + nb33*(sequence % ne33) + nb31*ic0);
89- const half2 * mask2 = (const half2 *) maskh;
85+ const float * Q_f = (const float *) (Q + nb03* sequence + nb02* head + nb01*ic0);
86+ const half * K_h = (const half *) (K + nb13* sequence + nb12*(head / gqa_ratio));
87+ const half * V_h = (const half *) (V + nb13* sequence + nb12*(head / gqa_ratio)); // K and V have same shape
88+ const half * maskh = (const half *) (mask + nb33*(sequence % ne33) + nb31*ic0);
89+ const half2 * mask2 = (const half2 *) maskh;
90+ const float * sinksf = (const float *) sinks;
9091
9192 const int stride_Q = nb01 / sizeof (float );
9293 const int stride_KV = nb11 / sizeof (half);
@@ -381,6 +382,53 @@ static __global__ void flash_attn_ext_f16(
381382 __syncthreads ();
382383 }
383384
385+ // Apply attention sinks
386+ if (sinksf && blockIdx .y == 0 ) {
387+ const float sinkf = sinksf[head];
388+ const half sinkh = __float2half (sinkf);
389+
390+ #pragma unroll
391+ for (int j0 = 0 ; j0 < ncols; j0 += nwarps) {
392+ const int j = j0 + threadIdx .y ;
393+
394+ if (std::is_same<KQ_acc_t, float >::value) {
395+ float kqmax_new = fmaxf (KQ_max_f[j0/nwarps], sinkf);
396+
397+ const float KQ_max_scale = expf (KQ_max_f[j0/nwarps] - kqmax_new);
398+ KQ_max_f[j0/nwarps] = kqmax_new;
399+
400+ KQ_rowsum_f[j0/nwarps] = KQ_rowsum_f[j0/nwarps] * KQ_max_scale + expf (sinkf - KQ_max_f[j0/nwarps]);
401+
402+ const half2 scale_h2 = make_half2 (KQ_max_scale, KQ_max_scale);
403+ #pragma unroll
404+ for (int i0 = 0 ; i0 < D/2 ; i0 += warp_size) {
405+ const int i = i0 + threadIdx .x ;
406+ if (i0 + warp_size > D/2 && i >= D/2 ) break ;
407+ VKQ2[j*(D_padded/2 ) + i] *= scale_h2;
408+ }
409+ } else {
410+ half kqmax_old = __low2half (KQ_max_h2[j0/nwarps]);
411+ half kqmax_new = fmaxf (kqmax_old, sinkh);
412+ KQ_max_h2[j0/nwarps] = __half2half2 (kqmax_new);
413+
414+ const half KQ_max_scale_h = hexp (kqmax_old - kqmax_new);
415+ const half2 KQ_max_scale = __half2half2 (KQ_max_scale_h);
416+
417+ KQ_rowsum_h2[j0/nwarps] = KQ_rowsum_h2[j0/nwarps] * KQ_max_scale;
418+ const half val = hexp (sinkh - kqmax_new);
419+ KQ_rowsum_h2[j0/nwarps].x = __hadd (KQ_rowsum_h2[j0/nwarps].x , val);
420+
421+ #pragma unroll
422+ for (int i0 = 0 ; i0 < D/2 ; i0 += warp_size) {
423+ const int i = i0 + threadIdx .x ;
424+ if (i0 + warp_size > D/2 && i >= D/2 ) break ;
425+ VKQ2[j*(D_padded/2 ) + i] *= KQ_max_scale;
426+ }
427+ }
428+ }
429+
430+ __syncthreads ();
431+ }
384432#pragma unroll
385433 for (int j0 = 0 ; j0 < ncols; j0 += nwarps) {
386434 const int j_VKQ = j0 + threadIdx .y ;
0 commit comments