Skip to content

Commit 25c5316

Browse files
committed
vulkan: Add initial cross entropy loss backward shader
Signed-off-by: vineet <[email protected]>
1 parent 0aef6c8 commit 25c5316

File tree

3 files changed

+128
-0
lines changed

3 files changed

+128
-0
lines changed

ggml/src/ggml-vulkan/ggml-vulkan.cpp

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -464,6 +464,7 @@ struct vk_device_struct {
464464
vk_pipeline pipeline_leaky_relu_f32;
465465
vk_pipeline pipeline_silu_back_f32;
466466
vk_pipeline pipeline_diag_mask_inf_f32;
467+
vk_pipeline pipeline_cross_entropy_loss_back_f32;
467468
vk_pipeline pipeline_soft_max_f32, pipeline_soft_max_f32_f16;
468469
vk_pipeline pipeline_soft_max_f32_wg512, pipeline_soft_max_f32_f16_wg512;
469470
vk_pipeline pipeline_soft_max_back_f32;
@@ -2915,6 +2916,8 @@ static void ggml_vk_load_shaders(vk_device& device) {
29152916

29162917
ggml_vk_create_pipeline(device, device->pipeline_diag_mask_inf_f32, "diag_mask_inf_f32", diag_mask_inf_f32_len, diag_mask_inf_f32_data, "main", 2, sizeof(vk_op_diag_mask_push_constants), {1, 512, 1}, {}, 1, true);
29172918

2919+
ggml_vk_create_pipeline(device, device->pipeline_cross_entropy_loss_back_f32, "cross_entropy_loss_back_f32", cross_entropy_loss_back_f32_len, cross_entropy_loss_back_f32_data, "main", 4, sizeof(vk_op_push_constants), {256, 1, 1}, { device->subgroup_size }, 1);
2920+
29182921
ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32, "soft_max_f32", soft_max_f32_len, soft_max_f32_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
29192922
ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_wg512, "soft_max_f32_wg512", soft_max_f32_len, soft_max_f32_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { 512 }, 1);
29202923
ggml_vk_create_pipeline(device, device->pipeline_soft_max_f32_f16, "soft_max_f32_f16", soft_max_f32_f16_len, soft_max_f32_f16_data, "main", 3, sizeof(vk_op_soft_max_push_constants), {1, 1, 1}, { device->subgroup_size }, 1);
@@ -6703,6 +6706,11 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
67036706
return ctx->device->pipeline_diag_mask_inf_f32;
67046707
}
67056708
return nullptr;
6709+
case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
6710+
if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && src2->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
6711+
return ctx->device->pipeline_cross_entropy_loss_back_f32;
6712+
}
6713+
return nullptr;
67066714
case GGML_OP_SOFT_MAX:
67076715
GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16);
67086716

@@ -7084,6 +7092,7 @@ static void ggml_vk_op_f32(ggml_backend_vk_context * ctx, vk_context& subctx, co
70847092
case GGML_OP_L2_NORM:
70857093
case GGML_OP_SOFT_MAX:
70867094
case GGML_OP_SOFT_MAX_BACK:
7095+
case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
70877096
case GGML_OP_SUM_ROWS:
70887097
case GGML_OP_ARGMAX:
70897098
{
@@ -7811,6 +7820,19 @@ static void ggml_vk_diag_mask_inf(ggml_backend_vk_context * ctx, vk_context& sub
78117820
ggml_vk_op_f32<vk_op_diag_mask_push_constants>(ctx, subctx, src0, nullptr, nullptr, dst, GGML_OP_DIAG_MASK_INF, { (uint32_t)src0->ne[0], (uint32_t)src0->ne[1], op_params[0] }, dryrun);
78127821
}
78137822

7823+
static void ggml_vk_cross_entropy_loss_back(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * src2, ggml_tensor * dst, bool dryrun = false) {
7824+
const int64_t nclasses = src1->ne[0];
7825+
const int64_t nrows = ggml_nrows(src1);
7826+
7827+
ggml_vk_op_f32<vk_op_push_constants>(ctx, subctx, src0, src1, src2, dst, GGML_OP_CROSS_ENTROPY_LOSS_BACK, {
7828+
(uint32_t)nclasses,
7829+
(uint32_t)nrows,
7830+
0.0f,
7831+
0.0f
7832+
}, dryrun);
7833+
7834+
}
7835+
78147836
static void ggml_vk_soft_max(ggml_backend_vk_context * ctx, vk_context& subctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, bool dryrun = false) {
78157837
float * op_params = (float *)dst->op_params;
78167838

@@ -9086,6 +9108,7 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
90869108
case GGML_OP_RMS_NORM_BACK:
90879109
case GGML_OP_L2_NORM:
90889110
case GGML_OP_DIAG_MASK_INF:
9111+
case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
90899112
case GGML_OP_SOFT_MAX:
90909113
case GGML_OP_SOFT_MAX_BACK:
90919114
case GGML_OP_ROPE:
@@ -9328,6 +9351,14 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
93289351
case GGML_OP_DIAG_MASK_INF:
93299352
ggml_vk_diag_mask_inf(ctx, compute_ctx, src0, node, dryrun);
93309353

9354+
break;
9355+
9356+
case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
9357+
// std::cerr << "*** GGML_VK_BUILD_GRAPH: CROSS_ENTROPY_LOSS_BACK case hit, calling ggml_vk_cross_entropy_loss_back" << std::endl;
9358+
// std::cout << "*** GGML_VK_BUILD_GRAPH: CROSS_ENTROPY_LOSS_BACK case hit, calling ggml_vk_cross_entropy_loss_back" << std::endl;
9359+
// fflush(stdout); fflush(stderr);
9360+
ggml_vk_cross_entropy_loss_back(ctx, compute_ctx, src0, src1, src2, node, dryrun);
9361+
93319362
break;
93329363
case GGML_OP_SOFT_MAX:
93339364
ggml_vk_soft_max(ctx, compute_ctx, src0, src1, node, dryrun);
@@ -9492,6 +9523,7 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph *
94929523
case GGML_OP_RMS_NORM_BACK:
94939524
case GGML_OP_L2_NORM:
94949525
case GGML_OP_DIAG_MASK_INF:
9526+
case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
94959527
case GGML_OP_SOFT_MAX:
94969528
case GGML_OP_SOFT_MAX_BACK:
94979529
case GGML_OP_ROPE:
@@ -10685,6 +10717,8 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
1068510717
return true;
1068610718
case GGML_OP_CONV_TRANSPOSE_1D:
1068710719
return op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32;
10720+
case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
10721+
return op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32 && op->src[2]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32;
1068810722
default:
1068910723
return false;
1069010724
}
Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
#version 450
2+
3+
#extension GL_EXT_control_flow_attributes : enable
4+
5+
#include "generic_head.comp"
6+
#include "types.comp"
7+
8+
#define FLOAT_TYPE float
9+
10+
layout(constant_id = 0) const uint BLOCK_SIZE = 256;
11+
layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
12+
13+
layout (binding = 0) readonly buffer A {A_TYPE data_a[];}; // Grad(scalar)
14+
layout (binding = 1) readonly buffer B {B_TYPE data_b[];}; // logits => raw model outputs(unnormalized scored)
15+
layout (binding = 2) readonly buffer C {C_TYPE data_c[];}; // true labels(one hot encoded)
16+
layout (binding = 3) writeonly buffer D {D_TYPE data_d[];}; // output gradients
17+
18+
shared FLOAT_TYPE vals[BLOCK_SIZE];
19+
20+
void main() {
21+
const uint nclasses = p.KX;
22+
const uint nrows = p.KY;
23+
24+
const uint row = gl_WorkGroupID.z * 262144 + gl_WorkGroupID.y * 512 + gl_WorkGroupID.x;
25+
if (row >= nrows) {
26+
return;
27+
}
28+
29+
const uint tid = gl_LocalInvocationID.x;
30+
const uint warp_size = gl_WorkGroupSize.x;
31+
32+
const uint logits_offset = row * nclasses;
33+
const uint labels_offset = row * nclasses;
34+
const uint dst_offset = row * nclasses;
35+
36+
// Gradient scaling (grad / batch_size)
37+
const FLOAT_TYPE d_by_nrows = FLOAT_TYPE(data_a[0]) / FLOAT_TYPE(nrows);
38+
39+
// Get max value per thread
40+
FLOAT_TYPE thread_max = FLOAT_TYPE(uintBitsToFloat(0xFF800000)); // -INFINITY
41+
for (uint i = tid; i < nclasses; i += warp_size) {
42+
FLOAT_TYPE val = FLOAT_TYPE(data_b[logits_offset + i]);
43+
thread_max = max(thread_max, val);
44+
}
45+
46+
vals[tid] = thread_max;
47+
barrier();
48+
49+
// Get global maximum for the row(batch)
50+
[[unroll]]
51+
for (uint s = warp_size / 2; s > 0; s >>= 1) {
52+
if (tid < s) {
53+
vals[tid] = max(vals[tid], vals[tid + s]);
54+
}
55+
barrier();
56+
}
57+
58+
const FLOAT_TYPE row_max = vals[0];
59+
barrier();
60+
61+
// Compute sum of exp(logits - max) for softmax normalization
62+
FLOAT_TYPE thread_sum = FLOAT_TYPE(0.0);
63+
for (uint i = tid; i < nclasses; i += warp_size) {
64+
FLOAT_TYPE val = FLOAT_TYPE(data_b[logits_offset + i]);
65+
thread_sum += exp(val - row_max);
66+
}
67+
68+
vals[tid] = thread_sum;
69+
barrier();
70+
71+
[[unroll]]
72+
for (uint s = warp_size / 2; s > 0; s >>= 1) {
73+
if (tid < s) {
74+
vals[tid] += vals[tid + s];
75+
}
76+
barrier();
77+
}
78+
79+
const FLOAT_TYPE row_sum = vals[0];
80+
const FLOAT_TYPE sm_scale = FLOAT_TYPE(1.0) / row_sum;
81+
barrier();
82+
83+
// Compute final gradients: (softmax - labels) * d_by_nrows
84+
for (uint i = tid; i < nclasses; i += warp_size) {
85+
FLOAT_TYPE logit = FLOAT_TYPE(data_b[logits_offset + i]);
86+
FLOAT_TYPE softmax_val = exp(logit - row_max) * sm_scale;
87+
88+
FLOAT_TYPE label = FLOAT_TYPE(data_c[labels_offset + i]);
89+
90+
data_d[dst_offset + i] = D_TYPE((softmax_val - label) * d_by_nrows);
91+
}
92+
}

ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -613,6 +613,8 @@ void process_shaders() {
613613

614614
string_to_spv("diag_mask_inf_f32", "diag_mask_inf.comp", {{"A_TYPE", "float"}, {"D_TYPE", "float"}});
615615

616+
string_to_spv("cross_entropy_loss_back_f32", "cross_entropy_loss_back.comp", {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"C_TYPE", "float"}, {"D_TYPE", "float"}});
617+
616618
string_to_spv("soft_max_f32", "soft_max.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}}));
617619
string_to_spv("soft_max_f32_f16", "soft_max.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float16_t"}, {"D_TYPE", "float"}}));
618620
string_to_spv("soft_max_back_f32", "soft_max_back.comp", merge_maps(base_dict, {{"A_TYPE", "float"}, {"B_TYPE", "float"}, {"D_TYPE", "float"}}));

0 commit comments

Comments
 (0)