Skip to content

Commit 7a461f1

Browse files
authored
Merge branch 'ikawrakow:main' into main
2 parents 9350952 + 2522c97 commit 7a461f1

File tree

15 files changed

+270
-53
lines changed

15 files changed

+270
-53
lines changed

common/common.cpp

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1020,6 +1020,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
10201020
params.fused_up_gate = false;
10211021
return true;
10221022
}
1023+
if (arg == "-no-mmad" || arg == "--no-fused-mul-multiadd") {
1024+
params.fused_mmad = false;
1025+
return true;
1026+
}
10231027
if (arg == "-ser" || arg == "--smart-expert-reduction") {
10241028
CHECK_ARG
10251029
auto values = string_split_pairs<int,float>(argv[i], ',');
@@ -1806,6 +1810,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
18061810
options.push_back({ "*", "-fmoe, --fused-moe", "enable fused MoE (default: %s)", params.fused_moe_up_gate ? "enabled" : "disabled" });
18071811
options.push_back({ "*", "-ger, --grouped-expert-routing", "enable grouped expert routing (default: %s)", params.grouped_expert_routing ? "enabled" : "disabled" });
18081812
options.push_back({ "*", "-no-fug, --no-fused-up-gate", "disaable fused up-gate (default: %s)", params.fused_up_gate ? "enabled" : "disabled" });
1813+
options.push_back({ "*", "-no-mmad, --no-fused-mul-multiadd", "disaable fused mul-multi_add (default: %s)", params.fused_mmad? "enabled" : "disabled" });
18091814
options.push_back({ "*", "-ser, --smart-expert-reduction,","experts reduction (default: %d,%g)", params.min_experts, params.thresh_experts});
18101815
options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n"
18111816
"in conversation mode, this will be used as system prompt\n"
@@ -2762,6 +2767,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
27622767
cparams.fused_moe_up_gate = params.fused_moe_up_gate;
27632768
cparams.grouped_expert_routing = params.grouped_expert_routing;
27642769
cparams.fused_up_gate = params.fused_up_gate;
2770+
cparams.fused_mmad = params.fused_mmad;
27652771
cparams.min_experts = params.min_experts;
27662772
cparams.thresh_experts = params.thresh_experts;
27672773
cparams.only_active_experts = params.only_active_exps;
@@ -3879,6 +3885,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
38793885
fprintf(stream, "fused_moe: %s # default: false\n", params.fused_moe_up_gate ? "true" : "false");
38803886
fprintf(stream, "grouped_expert_routing: %s # default: false\n", params.grouped_expert_routing ? "true" : "false");
38813887
fprintf(stream, "fused_up_gate: %s # default: true\n", params.fused_up_gate ? "true" : "false");
3888+
fprintf(stream, "fused_mmad: %s # default: true\n", params.fused_mmad? "true" : "false");
38823889
fprintf(stream, "ser: %d,%g # defaulr: -1,0\n", params.min_experts, params.thresh_experts);
38833890
fprintf(stream, "temp: %f # default: 0.8\n", sparams.temp);
38843891

common/common.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -235,6 +235,7 @@ struct gpt_params {
235235
int attn_max_batch = 0; // Max batch size to use when computing attention (only applicable if flash_attn = false)
236236
bool fused_moe_up_gate = false; // fused up*unary(gate) op for MoE models
237237
bool fused_up_gate = true; // fused up*unary(gate) op
238+
bool fused_mmad = true; // fused mul+multi_add op
238239
bool grouped_expert_routing = false; // if to use grouped expert routing (BailingMoeV2 arch)
239240
int min_experts = -1;
240241
float thresh_experts = 0;

ggml/include/ggml.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -619,6 +619,7 @@ extern "C" {
619619
GGML_OP_OUT_PROD,
620620
GGML_OP_FUSED_UP_GATE,
621621
GGML_OP_MOE_FUSED_UP_GATE,
622+
GGML_OP_MUL_MULTI_ADD,
622623

623624
GGML_OP_SCALE,
624625
GGML_OP_SET,
@@ -1083,6 +1084,11 @@ extern "C" {
10831084
struct ggml_tensor * a,
10841085
int n_experts);
10851086

1087+
GGML_API struct ggml_tensor * ggml_mul_multi_add(
1088+
struct ggml_context * ctx,
1089+
struct ggml_tensor * a,
1090+
struct ggml_tensor * b);
1091+
10861092
// dst = a
10871093
// view(dst, nb1, nb2, nb3, offset) += b
10881094
// return dst

ggml/src/ggml-cuda.cu

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@
4646
#include "ggml-cuda/conv2d-dw.cuh"
4747
#include "ggml-cuda/set-rows.cuh"
4848
#include "ggml-cuda/argmax.cuh"
49+
#include "ggml-cuda/multiadd.cuh"
4950

5051
#include <algorithm>
5152
#include <array>
@@ -3178,6 +3179,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
31783179
case GGML_OP_MULTI_ADD:
31793180
ggml_cuda_op_multi_add(ctx, dst);
31803181
break;
3182+
case GGML_OP_MUL_MULTI_ADD:
3183+
ggml_cuda_op_mul_multi_add(ctx, dst);
3184+
break;
31813185
case GGML_OP_ACC:
31823186
ggml_cuda_op_acc(ctx, dst);
31833187
break;
@@ -4408,6 +4412,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
44084412
case GGML_OP_ADD:
44094413
case GGML_OP_ADD_ID:
44104414
case GGML_OP_MULTI_ADD:
4415+
case GGML_OP_MUL_MULTI_ADD:
44114416
case GGML_OP_MUL:
44124417
case GGML_OP_DIV:
44134418
case GGML_OP_FUSED_RMS_NORM:

ggml/src/ggml-cuda/multiadd.cu

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
#include "multiadd.cuh"
2+
3+
static __global__ void multi_add_f32(int nused, int64_t ne0, int64_t ne1, int64_t nb1, int64_t nb01, const char * src0, char * dst) {
4+
const int64_t i = blockDim.x*blockIdx.x + threadIdx.x;
5+
int64_t k = ne0*ne1;
6+
if (i >= k) {
7+
return;
8+
}
9+
int i1 = i / ne0;
10+
int i0 = i % ne0;
11+
float * result = (float *)(dst + i1*nb1);
12+
const float * s = (const float *)(src0 + i1*nb01) + i0;
13+
if (nused == 1) {
14+
result[i0] = s[0];
15+
} else {
16+
float sum = s[0] + s[ne0];
17+
for (int j = 2; j < nused; ++j) sum += s[j*ne0];
18+
result[i0] = sum;
19+
}
20+
}
21+
22+
static void multi_add_f32_cuda(int nused, int64_t ne0, int64_t ne1, int64_t nb1, int64_t nb01, const char * src0, char * dst, cudaStream_t stream) {
23+
int64_t k = ne0 * ne1;
24+
const int num_blocks = (k + CUDA_MULTI_ADD_BLOCK_SIZE - 1) / CUDA_MULTI_ADD_BLOCK_SIZE;
25+
multi_add_f32<<<num_blocks, CUDA_MULTI_ADD_BLOCK_SIZE, 0, stream>>>(nused, ne0, ne1, nb1, nb01, src0, dst);
26+
}
27+
28+
void ggml_cuda_op_multi_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
29+
GGML_ASSERT(dst->type == GGML_TYPE_F32);
30+
GGML_ASSERT(dst->ne[2] == 1 && dst->ne[3] == 1);
31+
GGML_ASSERT(dst->nb[0] == sizeof(float));
32+
int nused = dst->op_params[0];
33+
GGML_ASSERT(nused >= 1);
34+
const char * src0 = (const char *)dst->src[0]->data;
35+
cudaStream_t stream = ctx.stream();
36+
multi_add_f32_cuda(nused, dst->ne[0], dst->ne[1], dst->nb[1], dst->src[0]->nb[1], src0, (char *)dst->data, stream);
37+
}
38+
39+
40+
static __global__ void mul_multi_add_f32(int nused, int64_t ne0, int64_t ne1, int64_t nb1, int64_t nb01, int64_t nb02, int64_t nb11, int64_t nb12, const char * src0, const char * src1, char * dst) {
41+
const int64_t i = blockDim.x*blockIdx.x + threadIdx.x;
42+
int64_t k = ne0*ne1;
43+
if (i >= k) {
44+
return;
45+
}
46+
int i1 = i / ne0;
47+
int i0 = i % ne0;
48+
float * result = (float *)(dst + i1*nb1);
49+
50+
auto c0 = src0 + i1*nb02;
51+
auto c1 = src1 + i1*nb12;
52+
53+
float sum = 0;
54+
for (int j = 0; j < nused; ++j) {
55+
auto x0 = (const float *)c0;
56+
auto x1 = (const float *)c1;
57+
sum += x0[i0] * x1[0];
58+
c0 += nb01;
59+
c1 += nb11;
60+
}
61+
result[i0] = sum;
62+
}
63+
64+
static void mul_multi_add_f32_cuda(int nused, int64_t ne0, int64_t ne1, int64_t nb1, int64_t nb01, int64_t nb02, int64_t nb11, int64_t nb12,
65+
const char * src0, const char * src1, char * dst, cudaStream_t stream) {
66+
int64_t k = ne0 * ne1;
67+
const int num_blocks = (k + CUDA_MULTI_ADD_BLOCK_SIZE - 1) / CUDA_MULTI_ADD_BLOCK_SIZE;
68+
mul_multi_add_f32<<<num_blocks, CUDA_MULTI_ADD_BLOCK_SIZE, 0, stream>>>(nused, ne0, ne1, nb1, nb01, nb02, nb11, nb12, src0, src1, dst);
69+
}
70+
71+
void ggml_cuda_op_mul_multi_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
72+
auto src0 = dst->src[0];
73+
auto src1 = dst->src[1];
74+
GGML_ASSERT(src0->type == GGML_TYPE_F32);
75+
GGML_ASSERT(src1->type == GGML_TYPE_F32);
76+
GGML_ASSERT( dst->type == GGML_TYPE_F32);
77+
GGML_ASSERT(src0->ne[0] == dst->ne[0]);
78+
GGML_ASSERT(src0->ne[2] == dst->ne[1]);
79+
GGML_ASSERT(src0->ne[1] == src1->ne[1]);
80+
GGML_ASSERT(src0->ne[2] == src1->ne[2]);
81+
GGML_ASSERT(src0->ne[3] == src1->ne[3]);
82+
GGML_ASSERT(src0->ne[3] == 1);
83+
GGML_ASSERT(src1->ne[0] == 1);
84+
85+
mul_multi_add_f32_cuda(src0->ne[1], dst->ne[0], dst->ne[1], dst->nb[1], src0->nb[1], src0->nb[2], src1->nb[1], src1->nb[2],
86+
(const char *)src0->data, (const char *)src1->data, (char *)dst->data, ctx.stream());
87+
}

ggml/src/ggml-cuda/multiadd.cuh

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
//
2+
// Copyright (C) 2023-2024 The ggml authors
3+
// Copyright (C) 2024 Iwan Kawrakow
4+
// MIT license
5+
// SPDX-License-Identifier: MIT
6+
//
7+
8+
#include "common.cuh"
9+
10+
#define CUDA_MULTI_ADD_BLOCK_SIZE 256
11+
12+
void ggml_cuda_op_multi_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
13+
14+
void ggml_cuda_op_mul_multi_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst);

ggml/src/ggml-cuda/unary.cu

Lines changed: 0 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -59,25 +59,6 @@ static __global__ void fused_mul_silu_f32(const float * x, const float * y, floa
5959
dst[i] = x[i] * y[i] / (1.0f + expf(-x[i]));
6060
}
6161

62-
static __global__ void multi_add_f32(int nused, int64_t ne0, int64_t ne1, int64_t nb1, int64_t nb01, const char * src0, char * dst) {
63-
const int64_t i = blockDim.x*blockIdx.x + threadIdx.x;
64-
int64_t k = ne0*ne1;
65-
if (i >= k) {
66-
return;
67-
}
68-
int i1 = i / ne0;
69-
int i0 = i % ne0;
70-
float * result = (float *)(dst + i1*nb1);
71-
const float * s = (const float *)(src0 + i1*nb01) + i0;
72-
if (nused == 1) {
73-
result[i0] = s[0];
74-
} else {
75-
float sum = s[0] + s[ne0];
76-
for (int j = 2; j < nused; ++j) sum += s[j*ne0];
77-
result[i0] = sum;
78-
}
79-
}
80-
8162
static __global__ void fused_mul_relu_f32(const float * x, const float * y, float * dst, const int k) {
8263
const int i = blockDim.x*blockIdx.x + threadIdx.x;
8364

@@ -261,23 +242,6 @@ static void sqrt_f32_cuda(const float * x, float * dst, const int k, cudaStream_
261242
sqrt_f32<<<num_blocks, CUDA_SQRT_BLOCK_SIZE, 0, stream>>>(x, dst, k);
262243
}
263244

264-
static void multi_add_f32_cuda(int nused, int64_t ne0, int64_t ne1, int64_t nb1, int64_t nb01, const char * src0, char * dst, cudaStream_t stream) {
265-
int64_t k = ne0 * ne1;
266-
const int num_blocks = (k + CUDA_MULTI_ADD_BLOCK_SIZE - 1) / CUDA_MULTI_ADD_BLOCK_SIZE;
267-
multi_add_f32<<<num_blocks, CUDA_MULTI_ADD_BLOCK_SIZE, 0, stream>>>(nused, ne0, ne1, nb1, nb01, src0, dst);
268-
}
269-
270-
void ggml_cuda_op_multi_add(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
271-
GGML_ASSERT(dst->type == GGML_TYPE_F32);
272-
GGML_ASSERT(dst->ne[2] == 1 && dst->ne[3] == 1);
273-
GGML_ASSERT(dst->nb[0] == sizeof(float));
274-
int nused = dst->op_params[0];
275-
GGML_ASSERT(nused >= 1);
276-
const char * src0 = (const char *)dst->src[0]->data;
277-
cudaStream_t stream = ctx.stream();
278-
multi_add_f32_cuda(nused, dst->ne[0], dst->ne[1], dst->nb[1], dst->src[0]->nb[1], src0, (char *)dst->data, stream);
279-
}
280-
281245
void ggml_cuda_op_gelu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
282246
const ggml_tensor * src0 = dst->src[0];
283247
const float * src0_d = (const float *)src0->data;

0 commit comments

Comments
 (0)