|
| 1 | +#include "multiadd.cuh" |
| 2 | + |
| 3 | +static __global__ void multi_add_f32(int nused, int64_t ne0, int64_t ne1, int64_t nb1, int64_t nb01, const char * src0, char * dst) { |
| 4 | + const int64_t i = blockDim.x*blockIdx.x + threadIdx.x; |
| 5 | + int64_t k = ne0*ne1; |
| 6 | + if (i >= k) { |
| 7 | + return; |
| 8 | + } |
| 9 | + int i1 = i / ne0; |
| 10 | + int i0 = i % ne0; |
| 11 | + float * result = (float *)(dst + i1*nb1); |
| 12 | + const float * s = (const float *)(src0 + i1*nb01) + i0; |
| 13 | + if (nused == 1) { |
| 14 | + result[i0] = s[0]; |
| 15 | + } else { |
| 16 | + float sum = s[0] + s[ne0]; |
| 17 | + for (int j = 2; j < nused; ++j) sum += s[j*ne0]; |
| 18 | + result[i0] = sum; |
| 19 | + } |
| 20 | +} |
| 21 | + |
| 22 | +static void multi_add_f32_cuda(int nused, int64_t ne0, int64_t ne1, int64_t nb1, int64_t nb01, const char * src0, char * dst, cudaStream_t stream) { |
| 23 | + int64_t k = ne0 * ne1; |
| 24 | + const int num_blocks = (k + CUDA_MULTI_ADD_BLOCK_SIZE - 1) / CUDA_MULTI_ADD_BLOCK_SIZE; |
| 25 | + multi_add_f32<<<num_blocks, CUDA_MULTI_ADD_BLOCK_SIZE, 0, stream>>>(nused, ne0, ne1, nb1, nb01, src0, dst); |
| 26 | +} |
| 27 | + |
| 28 | +void ggml_cuda_op_multi_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { |
| 29 | + GGML_ASSERT(dst->type == GGML_TYPE_F32); |
| 30 | + GGML_ASSERT(dst->ne[2] == 1 && dst->ne[3] == 1); |
| 31 | + GGML_ASSERT(dst->nb[0] == sizeof(float)); |
| 32 | + int nused = dst->op_params[0]; |
| 33 | + GGML_ASSERT(nused >= 1); |
| 34 | + const char * src0 = (const char *)dst->src[0]->data; |
| 35 | + cudaStream_t stream = ctx.stream(); |
| 36 | + multi_add_f32_cuda(nused, dst->ne[0], dst->ne[1], dst->nb[1], dst->src[0]->nb[1], src0, (char *)dst->data, stream); |
| 37 | +} |
| 38 | + |
| 39 | + |
| 40 | +static __global__ void mul_multi_add_f32(int nused, int64_t ne0, int64_t ne1, int64_t nb1, int64_t nb01, int64_t nb02, int64_t nb11, int64_t nb12, const char * src0, const char * src1, char * dst) { |
| 41 | + const int64_t i = blockDim.x*blockIdx.x + threadIdx.x; |
| 42 | + int64_t k = ne0*ne1; |
| 43 | + if (i >= k) { |
| 44 | + return; |
| 45 | + } |
| 46 | + int i1 = i / ne0; |
| 47 | + int i0 = i % ne0; |
| 48 | + float * result = (float *)(dst + i1*nb1); |
| 49 | + |
| 50 | + auto c0 = src0 + i1*nb02; |
| 51 | + auto c1 = src1 + i1*nb12; |
| 52 | + |
| 53 | + float sum = 0; |
| 54 | + for (int j = 0; j < nused; ++j) { |
| 55 | + auto x0 = (const float *)c0; |
| 56 | + auto x1 = (const float *)c1; |
| 57 | + sum += x0[i0] * x1[0]; |
| 58 | + c0 += nb01; |
| 59 | + c1 += nb11; |
| 60 | + } |
| 61 | + result[i0] = sum; |
| 62 | +} |
| 63 | + |
| 64 | +static void mul_multi_add_f32_cuda(int nused, int64_t ne0, int64_t ne1, int64_t nb1, int64_t nb01, int64_t nb02, int64_t nb11, int64_t nb12, |
| 65 | + const char * src0, const char * src1, char * dst, cudaStream_t stream) { |
| 66 | + int64_t k = ne0 * ne1; |
| 67 | + const int num_blocks = (k + CUDA_MULTI_ADD_BLOCK_SIZE - 1) / CUDA_MULTI_ADD_BLOCK_SIZE; |
| 68 | + mul_multi_add_f32<<<num_blocks, CUDA_MULTI_ADD_BLOCK_SIZE, 0, stream>>>(nused, ne0, ne1, nb1, nb01, nb02, nb11, nb12, src0, src1, dst); |
| 69 | +} |
| 70 | + |
| 71 | +void ggml_cuda_op_mul_multi_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { |
| 72 | + auto src0 = dst->src[0]; |
| 73 | + auto src1 = dst->src[1]; |
| 74 | + GGML_ASSERT(src0->type == GGML_TYPE_F32); |
| 75 | + GGML_ASSERT(src1->type == GGML_TYPE_F32); |
| 76 | + GGML_ASSERT( dst->type == GGML_TYPE_F32); |
| 77 | + GGML_ASSERT(src0->ne[0] == dst->ne[0]); |
| 78 | + GGML_ASSERT(src0->ne[2] == dst->ne[1]); |
| 79 | + GGML_ASSERT(src0->ne[1] == src1->ne[1]); |
| 80 | + GGML_ASSERT(src0->ne[2] == src1->ne[2]); |
| 81 | + GGML_ASSERT(src0->ne[3] == src1->ne[3]); |
| 82 | + GGML_ASSERT(src0->ne[3] == 1); |
| 83 | + GGML_ASSERT(src1->ne[0] == 1); |
| 84 | + |
| 85 | + mul_multi_add_f32_cuda(src0->ne[1], dst->ne[0], dst->ne[1], dst->nb[1], src0->nb[1], src0->nb[2], src1->nb[1], src1->nb[2], |
| 86 | + (const char *)src0->data, (const char *)src1->data, (char *)dst->data, ctx.stream()); |
| 87 | +} |
0 commit comments