Skip to content

Commit c7b8ab7

Browse files
committed
vulkan: Optimize soft_max
Large soft_max could already saturate memory, but small/medium sizes were pretty slow. The bulk of the gains for them comes from using a smaller workgroup size, and making the workgroup size match the subgroup size also makes the barriers much cheaper. Cache some values in locals to avoid refetching/recomputing. And stamp out a few "template instantiations" so smaller cases will fully unroll. Add a missing early return for OOB rows. This happens when there are more than 512 rows and the dispatch is 512 x H.
1 parent ae8de6d commit c7b8ab7

File tree

3 files changed

+110
-25
lines changed

3 files changed

+110
-25
lines changed

ggml/src/ggml-vulkan/ggml-vulkan.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -388,6 +388,7 @@ struct vk_op_soft_max_push_constants {
388388
float m0;
389389
float m1;
390390
uint32_t n_head_log2;
391+
uint32_t nrows_x;
391392
};
392393

393394
struct vk_op_argsort_push_constants {
@@ -1496,8 +1497,8 @@ static void ggml_vk_load_shaders(vk_device& device) {
14961497

14971498
ggml_vk_create_pipeline(device, device->pipeline_diag_mask_inf_f32, "diag_mask_inf_f32", diag_mask_inf_f32_len, diag_mask_inf_f32_data, "main", 2, sizeof(vk_op_diag_mask_push_constants), {512, 1, 1}, {}, 1);
14981499

1499-
ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32, "soft_max_f32", soft_max_f32_len, soft_max_f32_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, {}, 1);
1500-
ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_f16, "soft_max_f32_f16", soft_max_f32_f16_len, soft_max_f32_f16_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, {}, 1);
1500+
ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32, "soft_max_f32", soft_max_f32_len, soft_max_f32_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
1501+
ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_f16, "soft_max_f32_f16", soft_max_f32_f16_len, soft_max_f32_f16_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
15011502

15021503
ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f32, "rope_norm_f32", rope_norm_f32_len, rope_norm_f32_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
15031504
ggml_vk_create_pipeline(device, device->pipeline_rope_norm_f16, "rope_norm_f16", rope_norm_f16_len, rope_norm_f16_data, "main", 4, sizeof(vk_op_rope_push_constants), {1, 512, 1}, {}, 1);
@@ -4581,6 +4582,7 @@ static void ggml_vk_soft_max(ggml_backend_vk_context * ctx, vk_context& subctx,
45814582
scale, max_bias,
45824583
m0, m1,
45834584
n_head_log2,
4585+
nrows_x,
45844586
}, dryrun);
45854587
}
45864588

Lines changed: 98 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
#version 450
22

3-
#extension GL_EXT_shader_16bit_storage : require
3+
#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require
4+
#extension GL_EXT_control_flow_attributes : enable
45

56
layout (push_constant) uniform parameter
67
{
@@ -11,26 +12,32 @@ layout (push_constant) uniform parameter
1112
float m0;
1213
float m1;
1314
uint n_head_log2;
15+
uint nrows_x;
1416
} p;
1517

1618
#include "types.comp"
1719

18-
#extension GL_EXT_control_flow_attributes : enable
19-
#define BLOCK_SIZE 512
20-
21-
layout(local_size_x = BLOCK_SIZE, local_size_y = 1, local_size_z = 1) in;
20+
layout(constant_id = 0) const uint BLOCK_SIZE = 32;
21+
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
2222

2323
layout (binding = 0) readonly buffer X {A_TYPE data_a[];};
2424
layout (binding = 1) readonly buffer Y {B_TYPE data_b[];};
2525
layout (binding = 2) buffer D {D_TYPE data_d[];};
2626

2727
shared FLOAT_TYPE vals[BLOCK_SIZE];
2828

29-
void main() {
29+
// num_iters is the number of BLOCK_SIZE loop iterations we need to iterate
30+
// over all the columns. The main function tries to pass a constant here,
31+
// as if it were a template function, to allow unrolling.
32+
void soft_max(uint num_iters) {
3033
const uint tid = gl_LocalInvocationID.x;
3134
const uint rowx = gl_WorkGroupID.z * 262144 + gl_WorkGroupID.y * 512 + gl_WorkGroupID.x;
3235
const uint rowy = rowx % p.KY;
3336

37+
if (rowx >= p.nrows_x) {
38+
return;
39+
}
40+
3441
float slope = 1.0f;
3542

3643
// ALiBi
@@ -46,19 +53,37 @@ void main() {
4653
// Find max
4754
FLOAT_TYPE max_val = uintBitsToFloat(0xFF800000);
4855

49-
[[unroll]] for (uint col0 = 0; col0 < p.KX; col0 += BLOCK_SIZE) {
56+
// Cache values while we compute the max, so we don't need to read them
57+
// again when we're ready to compute exp(x-max).
58+
const uint DATA_CACHE_SIZE = 16;
59+
FLOAT_TYPE data_cache[DATA_CACHE_SIZE];
60+
61+
[[unroll]] for (uint col0 = 0, idx = 0; idx < num_iters; col0 += BLOCK_SIZE, ++idx) {
5062
const uint col = col0 + tid;
5163

52-
if (col >= p.KX) {
53-
break;
64+
FLOAT_TYPE a = FLOAT_TYPE(0);
65+
if (col < p.KX) {
66+
a = data_a[rowx * p.KX + col];
5467
}
5568

56-
max_val = max(max_val, FLOAT_TYPE(data_a[rowx * p.KX + col]) * p.scale + (p.KY > 0 ? slope * FLOAT_TYPE(data_b[rowy * p.KX + col]) : FLOAT_TYPE(0.0f)));
69+
FLOAT_TYPE b = FLOAT_TYPE(0);
70+
if (p.KY > 0 && col < p.KX) {
71+
b = data_b[rowy * p.KX + col];
72+
}
73+
74+
FLOAT_TYPE v = a * p.scale + slope * b;
75+
76+
max_val = max(max_val, v);
77+
78+
if (idx < DATA_CACHE_SIZE) {
79+
data_cache[idx] = v;
80+
}
5781
}
58-
vals[tid] = max_val;
5982

83+
// reduce across the workgroup
84+
vals[tid] = max_val;
6085
barrier();
61-
[[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
86+
[[unroll]] for (uint s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
6287
if (tid < s) {
6388
vals[tid] = max(vals[tid], vals[tid + s]);
6489
}
@@ -68,39 +93,89 @@ void main() {
6893
max_val = vals[0];
6994
barrier();
7095

71-
// Sum up values
72-
vals[tid] = FLOAT_TYPE(0.0f);
96+
FLOAT_TYPE sum = FLOAT_TYPE(0.0f);
7397

74-
[[unroll]] for (uint col0 = 0; col0 < p.KX; col0 += BLOCK_SIZE) {
98+
// Compute sum{exp(x - max)}
99+
[[unroll]] for (uint col0 = 0, idx = 0; idx < num_iters; col0 += BLOCK_SIZE, ++idx) {
75100
const uint col = col0 + tid;
76101

77102
if (col >= p.KX) {
78103
break;
79104
}
80105

106+
// compute exp(a*scale+b*slope), add it to sum, and cache the new value
107+
// in data_cache if possible.
81108
const uint i = rowx * p.KX + col;
82-
const FLOAT_TYPE val = exp(FLOAT_TYPE(data_a[i]) * p.scale + (p.KY > 0 ? slope * FLOAT_TYPE(data_b[rowy * p.KX + col]) : FLOAT_TYPE(0.0f)) - max_val);
83-
vals[tid] += val;
84-
data_d[i] = D_TYPE(val);
109+
FLOAT_TYPE val;
110+
if (idx < DATA_CACHE_SIZE) {
111+
val = exp(data_cache[idx] - max_val);
112+
} else {
113+
val = exp(FLOAT_TYPE(data_a[i]) * p.scale + (p.KY > 0 ? slope * FLOAT_TYPE(data_b[rowy * p.KX + col]) : FLOAT_TYPE(0.0f)) - max_val);
114+
}
115+
sum += val;
116+
if (idx < DATA_CACHE_SIZE) {
117+
data_cache[idx] = val;
118+
} else {
119+
data_d[i] = D_TYPE(val);
120+
}
85121
}
86122

123+
// reduce across the workgroup
124+
vals[tid] = sum;
87125
barrier();
88-
[[unroll]] for (int s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
126+
[[unroll]] for (uint s = BLOCK_SIZE / 2; s > 0; s >>= 1) {
89127
if (tid < s) {
90128
vals[tid] += vals[tid + s];
91129
}
92130
barrier();
93131
}
132+
sum = vals[0];
94133

95-
const D_TYPE divisor = D_TYPE(vals[0]);
134+
FLOAT_TYPE rcpdivisor = 1.0/sum;
96135

97-
[[unroll]] for (uint col0 = 0; col0 < p.KX; col0 += BLOCK_SIZE) {
136+
[[unroll]] for (uint col0 = 0, idx = 0; idx < num_iters; col0 += BLOCK_SIZE, ++idx) {
98137
const uint col = col0 + tid;
99138

100139
if (col >= p.KX) {
101-
break;
140+
continue;
141+
}
142+
143+
if (idx < DATA_CACHE_SIZE) {
144+
data_d[rowx*p.KX + col] = D_TYPE(data_cache[idx] * rcpdivisor);
145+
} else {
146+
data_d[rowx*p.KX + col] *= D_TYPE(rcpdivisor);
102147
}
148+
}
149+
}
103150

104-
data_d[rowx*p.KX + col] /= divisor;
151+
void main() {
152+
// instantiate the soft_max function for several different
153+
// dimensions, to allow loop unrolling
154+
uint num_blocks = (p.KX + BLOCK_SIZE - 1) / BLOCK_SIZE;
155+
switch (num_blocks) {
156+
case 1:
157+
soft_max(1);
158+
break;
159+
case 2:
160+
soft_max(2);
161+
break;
162+
case 3:
163+
soft_max(3);
164+
break;
165+
case 4:
166+
soft_max(4);
167+
break;
168+
case 5:
169+
case 6:
170+
case 7:
171+
case 8:
172+
soft_max(8);
173+
break;
174+
case 16:
175+
soft_max(16);
176+
break;
177+
default:
178+
soft_max(num_blocks);
179+
break;
105180
}
106181
}

tests/test-backend-ops.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3790,6 +3790,14 @@ static std::vector<std::unique_ptr<test_case>> make_test_cases_perf() {
37903790

37913791
test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F16, {512, 3072, 1, 1}));
37923792

3793+
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {4096, 4096, 5, 1}, false, 1.0f, 0.0f));
3794+
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 4096, 5, 1}, false, 1.0f, 0.0f));
3795+
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {1024, 1024, 10, 1}, false, 1.0f, 0.0f));
3796+
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 1024, 10, 1}, false, 1.0f, 0.0f));
3797+
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {256, 256, 20, 1}, false, 1.0f, 0.0f));
3798+
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {64, 64, 20, 1}, false, 1.0f, 0.0f));
3799+
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 64, 20, 1}, false, 1.0f, 0.0f));
3800+
37933801
for (int bs : {1, 512}) {
37943802
for (ggml_type type_a : all_types) {
37953803
for (ggml_type type_b : {GGML_TYPE_F32}) {

0 commit comments

Comments
 (0)