Skip to content

Commit e1b2ace

Browse files
hjc4869ggerganov
authored andcommitted
Add --no-op-offload to improve -ot pp perf in MoE models like llama4 400B (llama/13386)
1 parent 6db0e01 commit e1b2ace

File tree

2 files changed

+8
-4
lines changed

2 files changed

+8
-4
lines changed

ggml/include/ggml-backend.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -248,7 +248,7 @@ extern "C" {
248248
// preferrably to run on the same backend as the buffer
249249
ggml_backend_buffer_set_usage(buf_weights, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
250250
251-
sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, GGML_DEFAULT_GRAPH_SIZE, false);
251+
sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, NULL, num_backends, GGML_DEFAULT_GRAPH_SIZE, false, true);
252252
253253
// initialize buffers from a max size graph (optional)
254254
reserve_graph = build_graph(sched, max_batch_size);
@@ -289,7 +289,7 @@ extern "C" {
289289
typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data);
290290

291291
// Initialize a backend scheduler, backends with low index are given priority over backends with high index
292-
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
292+
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel, bool op_offload);
293293
GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
294294

295295
// Initialize backend buffers from a measure graph

ggml/src/ggml-backend.cpp

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -674,6 +674,8 @@ struct ggml_backend_sched {
674674
char * context_buffer;
675675
size_t context_buffer_size;
676676

677+
bool op_offload;
678+
677679
int debug;
678680
};
679681

@@ -766,7 +768,7 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st
766768
if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
767769
int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor);
768770
// check if a backend with higher prio wants to offload the op
769-
if (src_backend_id == sched->n_backends - 1 && ggml_backend_buffer_is_host(src->buffer)) {
771+
if (sched->op_offload && src_backend_id == sched->n_backends - 1 && ggml_backend_buffer_is_host(src->buffer)) {
770772
for (int b = 0; b < src_backend_id; b++) {
771773
if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) {
772774
SET_CAUSE(tensor, "1.off");
@@ -1452,7 +1454,8 @@ ggml_backend_sched_t ggml_backend_sched_new(
14521454
ggml_backend_buffer_type_t * bufts,
14531455
int n_backends,
14541456
size_t graph_size,
1455-
bool parallel) {
1457+
bool parallel,
1458+
bool op_offload) {
14561459
GGML_ASSERT(n_backends > 0);
14571460
GGML_ASSERT(n_backends <= GGML_SCHED_MAX_BACKENDS);
14581461
GGML_ASSERT(ggml_backend_dev_type(ggml_backend_get_device(backends[n_backends - 1])) == GGML_BACKEND_DEVICE_TYPE_CPU);
@@ -1497,6 +1500,7 @@ ggml_backend_sched_t ggml_backend_sched_new(
14971500
}
14981501

14991502
sched->galloc = ggml_gallocr_new_n(sched->bufts, n_backends);
1503+
sched->op_offload = op_offload;
15001504

15011505
ggml_backend_sched_reset(sched);
15021506

0 commit comments

Comments
 (0)