Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
245f391
graph : reuse hybrid graphs
ggerganov Oct 9, 2025
638e2c2
graph : reuse recurrent graphs
ggerganov Oct 9, 2025
0b9c1ae
metal : fix mul-mm condition + fix mul-mv permuted kernels
ggerganov Oct 9, 2025
1f02d93
graph : fix reuse check for recurrent inputs
ggerganov Oct 10, 2025
00f115f
memory : move the recurrent state into the memory context
ggerganov Oct 10, 2025
2744d61
Revert "memory : move the recurrent state into the memory context"
ggerganov Oct 10, 2025
ab3f3fe
Merge branch 'gg/metal-mul-mat-fixes' into gg/graph-mamba-reuse
gabe-l-hart Oct 10, 2025
8c23c43
Added: tri, cumsum. Still a mess.
gabe-l-hart Oct 10, 2025
2a2e79c
feat(tests): Add --verbose | -v flag to test-backend-ops to print ten…
gabe-l-hart Oct 10, 2025
092f740
test: Add cumsum tests to test-backend-ops
gabe-l-hart Oct 10, 2025
6949ce7
feat(ggml-cpu): Add cumsum support for f16 and bf16
gabe-l-hart Oct 10, 2025
f8fba60
feat(ggml-cpu): Add F16 and BF16 support for tri
gabe-l-hart Oct 13, 2025
058160a
test: Add test cases for tri
gabe-l-hart Oct 13, 2025
86ce3da
chore: TODOs to loosen assertions in tri for ggml_is_contiguous
gabe-l-hart Oct 13, 2025
3a8958f
feat(ggml-metal): Initial (slow) implementation of cumsum for metal
gabe-l-hart Oct 13, 2025
cbaed86
feat(ggml-metal): Add stubs for metal tri
gabe-l-hart Oct 13, 2025
e596469
test: Use looser nmse for lower-precision types for cumsum
gabe-l-hart Oct 13, 2025
3011a6e
Merge remote-tracking branch 'origin/master' into Mamba2SSD
gabe-l-hart Oct 13, 2025
112d339
test: Allow multiple verbose flags to fully print tensors
gabe-l-hart Oct 15, 2025
78e137f
feat(llama-gguf): Print out the tensor type in llama-gguf r
gabe-l-hart Sep 26, 2025
e5587cb
feat(ggml-metal): Efficient implementation of cumsum for metal
gabe-l-hart Oct 15, 2025
0468b99
test: More verbose printing and better cumsum tests
gabe-l-hart Oct 15, 2025
c71e35e
fix(ggml-metal): better granularity for support bool for CUMSUM and TRI
gabe-l-hart Oct 15, 2025
5f0d2a1
feat(ggml-metal): Metal impl of tri
gabe-l-hart Oct 15, 2025
426580d
Merge remote-tracking branch 'origin/master' into Mamba2SSD
gabe-l-hart Oct 15, 2025
ba3b8db
fix(ggml-cpu): Fix warnings from build with gcc
gabe-l-hart Oct 15, 2025
dfae909
feat(ggml-cuda): common implementation of prefix sum
gabe-l-hart Oct 16, 2025
d1f8658
feat(ggml-cuda): CUDA implementation of CUMSUM
gabe-l-hart Oct 16, 2025
5071fbd
feat(ggml-cuda): CUDA implementation of TRI
gabe-l-hart Oct 16, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion examples/gguf/gguf.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -184,8 +184,9 @@ static bool gguf_ex_read_1(const std::string & fname, bool check_data) {
const char * name = gguf_get_tensor_name (ctx, i);
const size_t size = gguf_get_tensor_size (ctx, i);
const size_t offset = gguf_get_tensor_offset(ctx, i);
const char * type = ggml_type_name(gguf_get_tensor_type(ctx, i));

printf("%s: tensor[%d]: name = %s, size = %zu, offset = %zu\n", __func__, i, name, size, offset);
printf("%s: tensor[%d]: name = %s, size = %zu, offset = %zu, type = %s\n", __func__, i, name, size, offset, type);
}
}

Expand Down
24 changes: 24 additions & 0 deletions ggml/include/ggml.h
Original file line number Diff line number Diff line change
Expand Up @@ -474,6 +474,7 @@ extern "C" {
GGML_OP_COS,
GGML_OP_SUM,
GGML_OP_SUM_ROWS,
GGML_OP_CUMSUM,
GGML_OP_MEAN,
GGML_OP_ARGMAX,
GGML_OP_COUNT_EQUAL,
Expand Down Expand Up @@ -529,6 +530,7 @@ extern "C" {
GGML_OP_TIMESTEP_EMBEDDING,
GGML_OP_ARGSORT,
GGML_OP_LEAKY_RELU,
GGML_OP_TRI,

GGML_OP_FLASH_ATTN_EXT,
GGML_OP_FLASH_ATTN_BACK,
Expand Down Expand Up @@ -615,6 +617,13 @@ extern "C" {
GGML_TENSOR_FLAG_LOSS = 8, // ...defines loss for numerical optimization (multiple loss tensors add up)
};

enum ggml_tri_type {
GGML_TRI_TYPE_UPPER_DIAG = 0,
GGML_TRI_TYPE_UPPER = 1,
GGML_TRI_TYPE_LOWER_DIAG = 2,
GGML_TRI_TYPE_LOWER = 3
};

struct ggml_init_params {
// memory pool
size_t mem_size; // bytes
Expand Down Expand Up @@ -978,6 +987,10 @@ extern "C" {
struct ggml_context * ctx,
struct ggml_tensor * a);

GGML_API struct ggml_tensor * ggml_cumsum(
struct ggml_context * ctx,
struct ggml_tensor * a);

// mean along rows
GGML_API struct ggml_tensor * ggml_mean(
struct ggml_context * ctx,
Expand Down Expand Up @@ -2141,6 +2154,17 @@ extern "C" {
int shift2,
int shift3);

// Make matrix into a triangular one (upper, upper + diagonal, lower or lower + diagonal) with constant value
GGML_API struct ggml_tensor * ggml_tri(
struct ggml_context * ctx,
struct ggml_tensor * a,
float constant,
enum ggml_tri_type tritype);

GGML_API struct ggml_tensor * ggml_tri_keep(
struct ggml_context * ctx,
struct ggml_tensor * a,
enum ggml_tri_type tritype);

// Ref: https://github.com/CompVis/stable-diffusion/blob/main/ldm/modules/diffusionmodules/util.py#L151
// timesteps: [N,]
Expand Down
10 changes: 10 additions & 0 deletions ggml/src/ggml-cpu/ggml-cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1736,6 +1736,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
{
ggml_compute_forward_sum_rows(params, tensor);
} break;
case GGML_OP_CUMSUM:
{
ggml_compute_forward_cumsum(params, tensor);
} break;
case GGML_OP_MEAN:
{
ggml_compute_forward_mean(params, tensor);
Expand Down Expand Up @@ -1948,6 +1952,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
{
ggml_compute_forward_leaky_relu(params, tensor);
} break;
case GGML_OP_TRI:
{
ggml_compute_forward_tri(params, tensor);
} break;
case GGML_OP_FLASH_ATTN_EXT:
{
ggml_compute_forward_flash_attn_ext(params, tensor);
Expand Down Expand Up @@ -2158,6 +2166,8 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) {
case GGML_OP_SUM_ROWS:
case GGML_OP_MEAN:
case GGML_OP_ARGMAX:
case GGML_OP_CUMSUM:
case GGML_OP_TRI:
{
n_tasks = 1;
} break;
Expand Down
228 changes: 228 additions & 0 deletions ggml/src/ggml-cpu/ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@

#include <float.h>
#include <algorithm>
#include <cmath>

// ggml_compute_forward_dup

Expand Down Expand Up @@ -1394,6 +1395,127 @@ void ggml_compute_forward_sum(
}
}

// ggml_compute_forward_cumsum

static void ggml_compute_forward_cumsum_f32(
const ggml_compute_params * params,
ggml_tensor * dst) {

const ggml_tensor * src0 = dst->src[0];

if (params->ith != 0) {
return;
}

GGML_ASSERT(src0->nb[0] == sizeof(float));
GGML_ASSERT(dst->nb[0] == sizeof(float));

GGML_TENSOR_UNARY_OP_LOCALS

GGML_ASSERT(ne0 == ne00);
GGML_ASSERT(ne1 == ne01);
GGML_ASSERT(ne2 == ne02);
GGML_ASSERT(ne3 == ne03);

for (int64_t i3 = 0; i3 < ne03; i3++) {
for (int64_t i2 = 0; i2 < ne02; i2++) {
for (int64_t i1 = 0; i1 < ne01; i1++) {
float * src_row = (float *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03);
float * dst_row = (float *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3);
ggml_vec_cumsum_f32(ne00, dst_row, src_row);
}
}
}
}

static void ggml_compute_forward_cumsum_f16(
const ggml_compute_params * params,
ggml_tensor * dst) {

const ggml_tensor * src0 = dst->src[0];

if (params->ith != 0) {
return;
}

GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t));
GGML_ASSERT(dst->nb[0] == sizeof(ggml_fp16_t));

GGML_TENSOR_UNARY_OP_LOCALS

GGML_ASSERT(ne0 == ne00);
GGML_ASSERT(ne1 == ne01);
GGML_ASSERT(ne2 == ne02);
GGML_ASSERT(ne3 == ne03);

for (int64_t i3 = 0; i3 < ne03; i3++) {
for (int64_t i2 = 0; i2 < ne02; i2++) {
for (int64_t i1 = 0; i1 < ne01; i1++) {
ggml_fp16_t * src_row = (ggml_fp16_t *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03);
ggml_fp16_t * dst_row = (ggml_fp16_t *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3);
ggml_vec_cumsum_f16(ne00, dst_row, src_row);
}
}
}
}

static void ggml_compute_forward_cumsum_bf16(
const ggml_compute_params * params,
ggml_tensor * dst) {

const ggml_tensor * src0 = dst->src[0];

if (params->ith != 0) {
return;
}

GGML_ASSERT(src0->nb[0] == sizeof(ggml_bf16_t));
GGML_ASSERT(dst->nb[0] == sizeof(ggml_bf16_t));

GGML_TENSOR_UNARY_OP_LOCALS

GGML_ASSERT(ne0 == ne00);
GGML_ASSERT(ne1 == ne01);
GGML_ASSERT(ne2 == ne02);
GGML_ASSERT(ne3 == ne03);

for (int64_t i3 = 0; i3 < ne03; i3++) {
for (int64_t i2 = 0; i2 < ne02; i2++) {
for (int64_t i1 = 0; i1 < ne01; i1++) {
ggml_bf16_t * src_row = (ggml_bf16_t *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03);
ggml_bf16_t * dst_row = (ggml_bf16_t *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3);
ggml_vec_cumsum_bf16(ne00, dst_row, src_row);
}
}
}
}

void ggml_compute_forward_cumsum(
const ggml_compute_params * params,
ggml_tensor * dst) {

const ggml_tensor * src0 = dst->src[0];

switch (src0->type) {
case GGML_TYPE_F32:
{
ggml_compute_forward_cumsum_f32(params, dst);
} break;
case GGML_TYPE_F16:
{
ggml_compute_forward_cumsum_f16(params, dst);
} break;
case GGML_TYPE_BF16:
{
ggml_compute_forward_cumsum_bf16(params, dst);
} break;
default:
{
GGML_ABORT("fatal error");
}
}
}

// ggml_compute_forward_sum_rows

static void ggml_compute_forward_sum_rows_f32(
Expand Down Expand Up @@ -2140,6 +2262,112 @@ static void ggml_compute_forward_gelu(
}
}

// ggml_compute_tri

static void ggml_compute_forward_tri_f32(const ggml_compute_params * params, ggml_tensor * dst) {
const ggml_tensor * src0 = dst->src[0];

const ggml_tri_type ttype = (ggml_tri_type) dst->op_params[0];
const float c = ggml_get_op_params_f32(dst, 1);
const bool keep_org_val = isnan(c);

// TODO: Is ggml_is_contiguous_rows safe and sufficient?
GGML_ASSERT(ggml_is_contiguous(src0));
GGML_ASSERT(src0->ne[0] == src0->ne[1]);

GGML_TENSOR_UNARY_OP_LOCALS

const auto [ir0, ir1] = get_thread_range(params, src0);

for (int64_t ir = ir0; ir < ir1; ++ir) {
const int64_t i03 = ir/(ne02*ne01);
const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);

float * dst_ptr = (float *)((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
float * src = (float *)((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01 );
ggml_vec_tri_f32(ne0, i01, dst_ptr, src, keep_org_val, c, ttype);
}

}

static void ggml_compute_forward_tri_f16(const ggml_compute_params * params, ggml_tensor * dst) {
const ggml_tensor * src0 = dst->src[0];

const ggml_tri_type ttype = (ggml_tri_type) dst->op_params[0];
const float c = ggml_get_op_params_f32(dst, 1);
const bool keep_org_val = isnan(c);

// TODO: Is ggml_is_contiguous_rows safe and sufficient?
GGML_ASSERT(ggml_is_contiguous(src0));
GGML_ASSERT(src0->ne[0] == src0->ne[1]);

GGML_TENSOR_UNARY_OP_LOCALS

const auto [ir0, ir1] = get_thread_range(params, src0);

for (int64_t ir = ir0; ir < ir1; ++ir) {
const int64_t i03 = ir/(ne02*ne01);
const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);

ggml_fp16_t * dst_ptr = (ggml_fp16_t *)((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
ggml_fp16_t * src = (ggml_fp16_t *)((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01 );
ggml_vec_tri_f16(ne0, i01, dst_ptr, src, keep_org_val, GGML_FP32_TO_FP16(c), ttype);
}

}

static void ggml_compute_forward_tri_bf16(const ggml_compute_params * params, ggml_tensor * dst) {
const ggml_tensor * src0 = dst->src[0];

const ggml_tri_type ttype = (ggml_tri_type) dst->op_params[0];
const float c = ggml_get_op_params_f32(dst, 1);
const bool keep_org_val = isnan(c);

// TODO: Is ggml_is_contiguous_rows safe and sufficient?
GGML_ASSERT(ggml_is_contiguous(src0));
GGML_ASSERT(src0->ne[0] == src0->ne[1]);

GGML_TENSOR_UNARY_OP_LOCALS

const auto [ir0, ir1] = get_thread_range(params, src0);

for (int64_t ir = ir0; ir < ir1; ++ir) {
const int64_t i03 = ir/(ne02*ne01);
const int64_t i02 = (ir - i03*ne02*ne01)/ne01;
const int64_t i01 = (ir - i03*ne02*ne01 - i02*ne01);

ggml_bf16_t * dst_ptr = (ggml_bf16_t *)((char *) dst->data + i03*nb3 + i02*nb2 + i01*nb1 );
ggml_bf16_t * src = (ggml_bf16_t *)((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01 );
ggml_vec_tri_bf16(ne0, i01, dst_ptr, src, keep_org_val, GGML_FP32_TO_BF16(c), ttype);
}

}

void ggml_compute_forward_tri(const ggml_compute_params * params, ggml_tensor * dst) {
const ggml_tensor * src0 = dst->src[0];

switch (src0->type) {
case GGML_TYPE_F32:
{
ggml_compute_forward_tri_f32(params, dst);
} break;
case GGML_TYPE_F16:
{
ggml_compute_forward_tri_f16(params, dst);
} break;
case GGML_TYPE_BF16:
{
ggml_compute_forward_tri_bf16(params, dst);
} break;
default:
{
GGML_ABORT("fatal error");
}
}
}

// ggml_compute_forward_gelu_erf

static void ggml_compute_forward_gelu_erf_f32(
Expand Down
2 changes: 2 additions & 0 deletions ggml/src/ggml-cpu/ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ void ggml_compute_forward_add1(const struct ggml_compute_params * params, struct
void ggml_compute_forward_acc(const struct ggml_compute_params * params, struct ggml_tensor * dst);
void ggml_compute_forward_sum(const struct ggml_compute_params * params, struct ggml_tensor * dst);
void ggml_compute_forward_sum_rows(const struct ggml_compute_params * params, struct ggml_tensor * dst);
void ggml_compute_forward_cumsum(const struct ggml_compute_params * params, struct ggml_tensor * dst);
void ggml_compute_forward_mean(const struct ggml_compute_params * params, struct ggml_tensor * dst);
void ggml_compute_forward_argmax(const struct ggml_compute_params * params, struct ggml_tensor * dst);
void ggml_compute_forward_count_equal(const struct ggml_compute_params * params, struct ggml_tensor * dst);
Expand Down Expand Up @@ -85,6 +86,7 @@ void ggml_compute_forward_arange(const struct ggml_compute_params * params, stru
void ggml_compute_forward_timestep_embedding(const struct ggml_compute_params * params, struct ggml_tensor * dst);
void ggml_compute_forward_argsort(const struct ggml_compute_params * params, struct ggml_tensor * dst);
void ggml_compute_forward_leaky_relu(const struct ggml_compute_params * params, struct ggml_tensor * dst);
void ggml_compute_forward_tri(const struct ggml_compute_params * params, struct ggml_tensor * dst);
void ggml_compute_forward_flash_attn_ext(const struct ggml_compute_params * params, struct ggml_tensor * dst);
void ggml_compute_forward_flash_attn_back(
const struct ggml_compute_params * params,
Expand Down
Loading
Loading