Skip to content

Commit d2c9fdd

Browse files
committed
old gpu fattn fix
1 parent b0fe1ac commit d2c9fdd

File tree

1 file changed

+5
-0
lines changed

1 file changed

+5
-0
lines changed

ggml/src/ggml-cuda/fattn.cu

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -415,6 +415,11 @@ static best_fattn_kernel ggml_cuda_get_best_fattn_kernel(const int device, const
415415
return BEST_FATTN_KERNEL_WMMA_F16;
416416
}
417417

418+
//kcpp: always force WMMA for older gpus, fix issues like "FlashAttention without tensor cores only supports head sizes 64 and 128."
419+
if (ggml_cuda_highest_compiled_arch(cc) <= GGML_CUDA_CC_TURING || cc == GGML_CUDA_CC_TURING) {
420+
return BEST_FATTN_KERNEL_WMMA_F16;
421+
}
422+
418423
// If there is no suitable kernel for tensor cores or small batch sizes, use the generic kernel for large batch sizes:
419424
if (prec == GGML_PREC_DEFAULT && fast_fp16_available(cc)) {
420425
return BEST_FATTN_KERNEL_TILE_F16;

0 commit comments

Comments
 (0)