Skip to content

Commit c17f8b5

Browse files
committed
Merge remote-tracking branch 'upstream' into cuda_graph_plan
2 parents 34b473c + ffa0590 commit c17f8b5

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

53 files changed

+1715
-1108
lines changed

ggml/src/ggml-cann/aclnn_ops.cpp

Lines changed: 100 additions & 107 deletions
Large diffs are not rendered by default.

ggml/src/ggml-cpu/ggml-cpu-impl.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ struct ggml_compute_params {
6868
#endif // __VXE2__
6969
#endif // __s390x__ && __VEC__
7070

71-
#if defined(__ARM_FEATURE_SVE)
71+
#if defined(__ARM_FEATURE_SVE) && defined(__linux__)
7272
#include <sys/prctl.h>
7373
#endif
7474

ggml/src/ggml-cpu/ggml-cpu.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -689,8 +689,13 @@ bool ggml_is_numa(void) {
689689
#endif
690690

691691
static void ggml_init_arm_arch_features(void) {
692-
#if defined(__linux__) && defined(__aarch64__) && defined(__ARM_FEATURE_SVE)
692+
#if defined(__aarch64__) && defined(__ARM_FEATURE_SVE)
693+
#if defined(__linux__)
693694
ggml_arm_arch_features.sve_cnt = PR_SVE_VL_LEN_MASK & prctl(PR_SVE_GET_VL);
695+
#else
696+
// TODO: add support of SVE for non-linux systems
697+
#error "TODO: SVE is not supported on this platform. To use SVE, sve_cnt needs to be initialized here."
698+
#endif
694699
#endif
695700
}
696701

ggml/src/ggml-cpu/vec.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -463,9 +463,9 @@ ggml_float ggml_vec_cvar_f32(const int n, float * y, const float * x, const floa
463463
#endif
464464
for (; i < n; ++i) {
465465
float val = x[i] - mean;
466+
y[i] = val;
466467
val *= val;
467468
sum += (ggml_float)val;
468-
y[i] = val;
469469
}
470470
return sum/n;
471471
}

ggml/src/ggml-cuda/common.cuh

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -941,13 +941,6 @@ struct ggml_cuda_graph {
941941
std::vector<cudaGraphNode_t> nodes;
942942
std::vector<cudaKernelNodeParams> params;
943943
std::vector<ggml_graph_node_properties> ggml_graph_properties;
944-
bool use_cpy_indirection = false;
945-
std::vector<char *> cpy_dest_ptrs;
946-
char ** dest_ptrs_d;
947-
int dest_ptrs_size = 0;
948-
// Index to allow each cpy kernel to be aware of it's position within the graph
949-
// relative to other cpy nodes.
950-
int graph_cpynode_index = -1;
951944
#endif
952945
};
953946

ggml/src/ggml-cuda/cpy.cu

Lines changed: 56 additions & 164 deletions
Large diffs are not rendered by default.

ggml/src/ggml-cuda/cpy.cuh

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,6 @@
22

33
#define CUDA_CPY_BLOCK_SIZE 64
44

5-
void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, ggml_cuda_graph * cuda_graph, const ggml_tensor * src0, ggml_tensor * src1, bool disable_indirection = false);
5+
void ggml_cuda_cpy(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, ggml_tensor * src1);
66

7-
void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_cuda_graph * cuda_graph, ggml_tensor * dst);
8-
9-
void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1);
10-
11-
void ggml_cuda_cpy_dest_ptrs_copy(ggml_cuda_graph * cuda_graph, char ** host_dest_ptrs, const int host_dest_ptrs_size, cudaStream_t stream);
7+
void ggml_cuda_dup(ggml_backend_cuda_context & ctx, ggml_tensor * dst);

ggml/src/ggml-cuda/fattn-tile.cuh

Lines changed: 17 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -540,10 +540,12 @@ static __device__ __forceinline__ void flash_attn_tile_iter(
540540
KQ_acc[(i_KQ_0/(np*warp_size))*cpw + jc0] = logit_softcap * tanhf(KQ_acc[(i_KQ_0/(np*warp_size))*cpw + jc0]);
541541
}
542542

543-
KQ_acc[(i_KQ_0/(np*warp_size))*cpw + jc0] += (ncols2 > 1 || mask) && (!oob_check || i_KQ < k_VKQ_sup) ?
544-
slope*__half2float(mask[j*stride_mask + k_VKQ_0 + i_KQ]) : 0.0f;
543+
if (!oob_check || i_KQ < k_VKQ_sup) {
544+
KQ_acc[(i_KQ_0/(np*warp_size))*cpw + jc0] += (ncols2 > 1 || mask) ?
545+
slope*__half2float(mask[j*stride_mask + k_VKQ_0 + i_KQ]) : 0.0f;
545546

546-
KQ_max_new[jc0] = fmaxf(KQ_max_new[jc0], KQ_acc[(i_KQ_0/(np*warp_size))*cpw + jc0]);
547+
KQ_max_new[jc0] = fmaxf(KQ_max_new[jc0], KQ_acc[(i_KQ_0/(np*warp_size))*cpw + jc0]);
548+
}
547549
}
548550

549551
KQ_max_new[jc0] = warp_reduce_max<warp_size>(KQ_max_new[jc0]);
@@ -581,10 +583,9 @@ static __device__ __forceinline__ void flash_attn_tile_iter(
581583
float KQ_sum_add = 0.0f;
582584
#pragma unroll
583585
for (int i0 = 0; i0 < nbatch_fa; i0 += np*warp_size) {
584-
const float val = expf(KQ_acc[(i0/(np*warp_size))*cpw + jc] - KQ_max[jc]);
585-
if (!oob_check || i0 + (threadIdx.y % np)*warp_size + threadIdx.x < k_VKQ_sup) {
586-
KQ_sum_add += val;
587-
}
586+
const float val = !oob_check || i0 + (threadIdx.y % np)*warp_size + threadIdx.x < k_VKQ_sup ?
587+
expf(KQ_acc[(i0/(np*warp_size))*cpw + jc] - KQ_max[jc]) : 0.0f;
588+
KQ_sum_add += val;
588589
tmp[i0/(np*warp_size)][jc1] = val;
589590
}
590591
KQ_sum[jc] = KQ_sum[jc]*KQ_max_scale + KQ_sum_add;
@@ -975,26 +976,6 @@ static __global__ void flash_attn_tile(
975976
}
976977
}
977978

978-
if (gridDim.y == 1) {
979-
#pragma unroll
980-
for (int jc0 = 0; jc0 < cpw; ++jc0) {
981-
#ifdef FAST_FP16_AVAILABLE
982-
const half2 KQ_sum_jc_inv = make_half2(1.0f/KQ_sum[jc0], 1.0f/KQ_sum[jc0]);
983-
#pragma unroll
984-
for (int i = 0; i < (DVp/2)/warp_size; ++i) {
985-
VKQ[jc0*((DVp/2)/warp_size) + i] *= KQ_sum_jc_inv;
986-
}
987-
#else
988-
const float KQ_sum_jc_inv = 1.0f/KQ_sum[jc0];
989-
#pragma unroll
990-
for (int i = 0; i < (DVp/2)/warp_size; ++i) {
991-
VKQ[jc0*((DVp/2)/warp_size) + i].x *= KQ_sum_jc_inv;
992-
VKQ[jc0*((DVp/2)/warp_size) + i].y *= KQ_sum_jc_inv;
993-
}
994-
#endif // FAST_FP16_AVAILABLE
995-
}
996-
}
997-
998979
// Write back results:
999980
#pragma unroll
1000981
for (int jc0 = 0; jc0 < cpw; ++jc0) {
@@ -1007,6 +988,8 @@ static __global__ void flash_attn_tile(
1007988
return;
1008989
}
1009990

991+
const float scale = gridDim.y == 1 ? 1.0f/KQ_sum[jc0] : 1.0f;
992+
1010993
const int j_dst_unrolled = ((sequence*ne01 + col_Q_0 + j)*ne02 + head0 + c)*gridDim.y + blockIdx.y;
1011994

1012995
#ifdef FAST_FP16_AVAILABLE
@@ -1017,6 +1000,8 @@ static __global__ void flash_attn_tile(
10171000
#pragma unroll
10181001
for (int i1 = 0; i1 < cpy_ne_D; ++i1) {
10191002
tmp[i1] = __half22float2(VKQ[jc0*((DVp/2)/warp_size) + i0/warp_size + i1]);
1003+
tmp[i1].x *= scale;
1004+
tmp[i1].y *= scale;
10201005
}
10211006
if (i0 + warp_size*cpy_ne_D <= DV/2 || i0 + threadIdx.x*cpy_ne_D < DV/2) {
10221007
ggml_cuda_memcpy_1<sizeof(tmp)>(&dst[j_dst_unrolled*DV + 2*i0 + threadIdx.x*(2*cpy_ne_D)], tmp);
@@ -1027,6 +1012,11 @@ static __global__ void flash_attn_tile(
10271012
#pragma unroll
10281013
for (int i0 = 0; i0 < DVp; i0 += warp_size*cpy_ne_D) {
10291014
if (i0 + warp_size*cpy_ne_D <= DV || i0 + threadIdx.x*cpy_ne_D < DV) {
1015+
#pragma unroll
1016+
for (int i1 = 0; i1 < cpy_ne_D/2; ++i1) {
1017+
VKQ[jc0*((DVp/2)/warp_size) + i0/(2*warp_size) + i1].x *= scale;
1018+
VKQ[jc0*((DVp/2)/warp_size) + i0/(2*warp_size) + i1].y *= scale;
1019+
}
10301020
ggml_cuda_memcpy_1<cpy_ne_D*4>(
10311021
&dst[j_dst_unrolled*DV + i0 + threadIdx.x*cpy_ne_D],
10321022
&VKQ[jc0*((DVp/2)/warp_size) + i0/(2*warp_size)]);

ggml/src/ggml-cuda/fattn-vec.cuh

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -516,8 +516,8 @@ void ggml_cuda_flash_attn_ext_vec_case_impl(ggml_backend_cuda_context & ctx, ggm
516516
const int nthreads = ggml_cuda_fattn_vec_get_nthreads_host(cc);
517517
const int nwarps = nthreads / WARP_SIZE;
518518
fattn_kernel_t fattn_kernel = flash_attn_ext_vec<D, cols_per_block, type_K, type_V, use_logit_softcap>;
519-
constexpr bool need_f16_K = false;
520-
constexpr bool need_f16_V = false;
519+
const bool need_f16_K = type_K == GGML_TYPE_F16;
520+
const bool need_f16_V = type_V == GGML_TYPE_F16;
521521
constexpr size_t nbytes_shared = 0;
522522
launch_fattn<D, cols_per_block, 1>(ctx, dst, fattn_kernel, nwarps, nbytes_shared, D, need_f16_K, need_f16_V, false);
523523
}
@@ -526,11 +526,6 @@ template <int D, ggml_type type_K, ggml_type type_V>
526526
void ggml_cuda_flash_attn_ext_vec_case(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
527527
const ggml_tensor * KQV = dst;
528528
const ggml_tensor * Q = dst->src[0];
529-
const ggml_tensor * K = dst->src[1];
530-
const ggml_tensor * V = dst->src[2];
531-
532-
GGML_ASSERT(K->type == type_K);
533-
GGML_ASSERT(V->type == type_V);
534529

535530
float logit_softcap;
536531
memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float));

ggml/src/ggml-cuda/fattn.cu

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -116,11 +116,15 @@ static void ggml_cuda_flash_attn_ext_mma_f16(ggml_backend_cuda_context & ctx, gg
116116
}
117117
}
118118

119-
#define FATTN_VEC_CASE(D, type_K, type_V) \
120-
if (Q->ne[0] == (D) && K->type == (type_K) && V->type == (type_V)) { \
121-
ggml_cuda_flash_attn_ext_vec_case<D, type_K, type_V>(ctx, dst); \
122-
return; \
123-
} \
119+
#define FATTN_VEC_CASE(D, type_K, type_V) \
120+
{ \
121+
const bool type_K_okay = K->type == (type_K) || (K->type == GGML_TYPE_F32 && (type_K) == GGML_TYPE_F16); \
122+
const bool type_V_okay = V->type == (type_V) || (V->type == GGML_TYPE_F32 && (type_V) == GGML_TYPE_F16); \
123+
if (Q->ne[0] == (D) && type_K_okay && type_V_okay) { \
124+
ggml_cuda_flash_attn_ext_vec_case<D, type_K, type_V>(ctx, dst); \
125+
return; \
126+
} \
127+
} \
124128

125129
#define FATTN_VEC_CASES_ALL_D(type_K, type_V) \
126130
FATTN_VEC_CASE( 64, type_K, type_V) \
@@ -247,6 +251,7 @@ static best_fattn_kernel ggml_cuda_get_best_fattn_kernel(const int device, const
247251
#endif // GGML_CUDA_FA_ALL_QUANTS
248252

249253
switch (K->type) {
254+
case GGML_TYPE_F32:
250255
case GGML_TYPE_F16:
251256
break;
252257
case GGML_TYPE_Q4_1:
@@ -272,7 +277,7 @@ static best_fattn_kernel ggml_cuda_get_best_fattn_kernel(const int device, const
272277
// If Turing tensor cores available, use them:
273278
if (turing_mma_available(cc) && K->ne[1] % FATTN_KQ_STRIDE == 0 && Q->ne[0] != 40) {
274279
if (can_use_vector_kernel) {
275-
if (K->type == GGML_TYPE_F16 && V->type == GGML_TYPE_F16) {
280+
if (!ggml_is_quantized(K->type) && !ggml_is_quantized(V->type)) {
276281
if (cc >= GGML_CUDA_CC_ADA_LOVELACE && Q->ne[1] == 1 && Q->ne[3] == 1 && !(gqa_ratio > 4 && K->ne[1] >= 8192)) {
277282
return BEST_FATTN_KERNEL_VEC;
278283
}
@@ -305,7 +310,7 @@ static best_fattn_kernel ggml_cuda_get_best_fattn_kernel(const int device, const
305310

306311
// If there are no tensor cores available, use the generic tile kernel:
307312
if (can_use_vector_kernel) {
308-
if (K->type == GGML_TYPE_F16 && V->type == GGML_TYPE_F16) {
313+
if (!ggml_is_quantized(K->type) && !ggml_is_quantized(V->type)) {
309314
if (Q->ne[1] == 1) {
310315
if (!gqa_opt_applies) {
311316
return BEST_FATTN_KERNEL_VEC;

0 commit comments

Comments
 (0)