@@ -1949,6 +1949,7 @@ inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) {
19491949inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; }
19501950inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
19511951inline static void ggml_vec_leaky_relu_f32 (const int n, float * y, const float * x, const float ns) { for (int i = 0; i < n; ++i) y[i] = ((x[i] > 0.f) ? x[i] : 0.f) + ns * ((x[i] < 0.0f) ? x[i] : 0.f); }
1952+ inline static void ggml_vec_sigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = 1.f / (1.f + expf(-x[i])); }
19521953// TODO: optimize performance
19531954inline static void ggml_vec_hardswish_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i] * fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); }
19541955inline static void ggml_vec_hardsigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); }
@@ -2329,14 +2330,15 @@ static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = {
23292330 "TANH",
23302331 "ELU",
23312332 "RELU",
2333+ "SIGMOID",
23322334 "GELU",
23332335 "GELU_QUICK",
23342336 "SILU",
23352337 "HARDSWISH",
23362338 "HARDSIGMOID",
23372339};
23382340
2339- static_assert(GGML_UNARY_OP_COUNT == 12 , "GGML_UNARY_OP_COUNT != 12 ");
2341+ static_assert(GGML_UNARY_OP_COUNT == 13 , "GGML_UNARY_OP_COUNT != 13 ");
23402342
23412343
23422344static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
@@ -4561,6 +4563,20 @@ struct ggml_tensor * ggml_leaky_relu(
45614563 return result;
45624564}
45634565
4566+ // ggml_sigmoid
4567+
4568+ struct ggml_tensor * ggml_sigmoid(
4569+ struct ggml_context * ctx,
4570+ struct ggml_tensor * a) {
4571+ return ggml_unary(ctx, a, GGML_UNARY_OP_SIGMOID);
4572+ }
4573+
4574+ struct ggml_tensor * ggml_sigmoid_inplace(
4575+ struct ggml_context * ctx,
4576+ struct ggml_tensor * a) {
4577+ return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SIGMOID);
4578+ }
4579+
45644580// ggml_gelu
45654581
45664582struct ggml_tensor * ggml_gelu(
@@ -10852,6 +10868,52 @@ static void ggml_compute_forward_relu(
1085210868 }
1085310869}
1085410870
10871+ // ggml_compute_forward_sigmoid
10872+
10873+ static void ggml_compute_forward_sigmoid_f32(
10874+ const struct ggml_compute_params * params,
10875+ struct ggml_tensor * dst) {
10876+
10877+ const struct ggml_tensor * src0 = dst->src[0];
10878+
10879+ assert(params->ith == 0);
10880+ assert(ggml_are_same_shape(src0, dst));
10881+
10882+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
10883+ return;
10884+ }
10885+
10886+ const int n = ggml_nrows(src0);
10887+ const int nc = src0->ne[0];
10888+
10889+ assert(dst->nb[0] == sizeof(float));
10890+ assert(src0->nb[0] == sizeof(float));
10891+
10892+ for (int i = 0; i < n; i++) {
10893+ ggml_vec_sigmoid_f32(nc,
10894+ (float *) ((char *) dst->data + i*( dst->nb[1])),
10895+ (float *) ((char *) src0->data + i*(src0->nb[1])));
10896+ }
10897+ }
10898+
10899+ static void ggml_compute_forward_sigmoid(
10900+ const struct ggml_compute_params * params,
10901+ struct ggml_tensor * dst) {
10902+
10903+ const struct ggml_tensor * src0 = dst->src[0];
10904+
10905+ switch (src0->type) {
10906+ case GGML_TYPE_F32:
10907+ {
10908+ ggml_compute_forward_sigmoid_f32(params, dst);
10909+ } break;
10910+ default:
10911+ {
10912+ GGML_ASSERT(false);
10913+ } break;
10914+ }
10915+ }
10916+
1085510917// ggml_compute_forward_gelu
1085610918
1085710919static void ggml_compute_forward_gelu_f32(
@@ -16617,6 +16679,10 @@ static void ggml_compute_forward_unary(
1661716679 {
1661816680 ggml_compute_forward_relu(params, dst);
1661916681 } break;
16682+ case GGML_UNARY_OP_SIGMOID:
16683+ {
16684+ ggml_compute_forward_sigmoid(params, dst);
16685+ } break;
1662016686 case GGML_UNARY_OP_GELU:
1662116687 {
1662216688 ggml_compute_forward_gelu(params, dst);
@@ -18601,6 +18667,10 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
1860118667 zero_table);
1860218668 }
1860318669 } break;
18670+ case GGML_UNARY_OP_SIGMOID:
18671+ {
18672+ GGML_ASSERT(false); // TODO: not implemented
18673+ } break;
1860418674 case GGML_UNARY_OP_GELU:
1860518675 {
1860618676 GGML_ASSERT(false); // TODO: not implemented
@@ -19130,6 +19200,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads, int n_cur_
1913019200 case GGML_UNARY_OP_TANH:
1913119201 case GGML_UNARY_OP_ELU:
1913219202 case GGML_UNARY_OP_RELU:
19203+ case GGML_UNARY_OP_SIGMOID:
1913319204 case GGML_UNARY_OP_HARDSWISH: // to opt for multiple threads
1913419205 case GGML_UNARY_OP_HARDSIGMOID: // to opt for multiple threads
1913519206 {
0 commit comments