Skip to content

Commit 5682a37

Browse files
authored
sched : copy only the used experts when offloading prompt processing (ggml-org#15346)
1 parent 1bc664a commit 5682a37

File tree

1 file changed

+87
-9
lines changed

1 file changed

+87
-9
lines changed

ggml/src/ggml-backend.cpp

Lines changed: 87 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,8 @@
1919
#include <stdio.h>
2020
#include <stdlib.h>
2121
#include <string.h>
22-
#include <string>
23-
#include <vector>
2422
#include <algorithm>
23+
#include <vector>
2524

2625
#ifdef __APPLE__
2726
#include <sys/types.h>
@@ -1352,6 +1351,10 @@ static bool ggml_backend_sched_alloc_splits(ggml_backend_sched_t sched) {
13521351
static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t sched) {
13531352
struct ggml_backend_sched_split * splits = sched->splits;
13541353

1354+
ggml_tensor * prev_ids_tensor = nullptr;
1355+
std::vector<int32_t> ids;
1356+
std::vector<ggml_bitset_t> used_ids;
1357+
13551358
for (int i = 0; i < sched->n_splits; i++) {
13561359
struct ggml_backend_sched_split * split = &splits[i];
13571360
int split_backend_id = split->backend_id;
@@ -1378,16 +1381,91 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
13781381
} else {
13791382
ggml_backend_synchronize(split_backend);
13801383
}
1381-
// try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
1382-
// TODO: add public function to facilitate this, since applications do not have direct access to the backend interface
1383-
if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) {
1384+
1385+
// when offloading MoE weights, we can reduce the amount of data copied by copying only the experts that are used
1386+
ggml_tensor * node = split->graph.nodes[0];
1387+
if (split->graph.n_nodes > 0 &&
1388+
ggml_backend_buffer_get_usage(input->buffer) == GGML_BACKEND_BUFFER_USAGE_WEIGHTS &&
1389+
ggml_backend_buffer_is_host(input->buffer) && (
1390+
(node->src[0] == input_cpy && node->op == GGML_OP_MUL_MAT_ID)
1391+
//|| (node->src[1] == input_cpy && node->op == GGML_OP_ADD_ID) /* GGML_OP_ADD_ID weights are small and not worth splitting */
1392+
)) {
1393+
1394+
const int64_t n_expert = node->op == GGML_OP_MUL_MAT_ID ? input->ne[2] : input->ne[1];
1395+
const size_t expert_size = node->op == GGML_OP_MUL_MAT_ID ? input->nb[2] : input->nb[1];
1396+
13841397
ggml_backend_synchronize(input_backend);
1385-
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
1386-
ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
1387-
} else {
1398+
1399+
// get the ids
1400+
ggml_tensor * ids_tensor = node->src[2];
1401+
if (ids_tensor != prev_ids_tensor) {
1402+
ids.resize(ggml_nbytes(ids_tensor) / sizeof(int32_t));
1403+
ggml_backend_tensor_get_async(split_backend, ids_tensor, ids.data(), 0, ggml_nbytes(ids_tensor));
13881404
ggml_backend_synchronize(split_backend);
1405+
1406+
// find the used experts
1407+
used_ids.clear();
1408+
used_ids.resize(ggml_bitset_size(n_expert));
1409+
for (int64_t i1 = 0; i1 < ids_tensor->ne[1]; i1++) {
1410+
for (int64_t i0 = 0; i0 < ids_tensor->ne[0]; i0++) {
1411+
int32_t id = ids[i1 * ids_tensor->nb[1]/sizeof(int32_t) + i0 * ids_tensor->nb[0]/sizeof(int32_t)];
1412+
ggml_bitset_set(used_ids.data(), id);
1413+
}
1414+
}
1415+
1416+
prev_ids_tensor = ids_tensor;
1417+
}
1418+
1419+
// group consecutive experts and copy them together
1420+
auto copy_experts = [&](int32_t first_id, int32_t last_id) {
1421+
const size_t expert_offset = first_id * expert_size;
1422+
const size_t expert_size_copy = (last_id - first_id + 1) * expert_size;
1423+
const size_t padding = std::min<size_t>(expert_size, 512);
1424+
const size_t padding_end = last_id < n_expert - 1 ? padding : 0;
1425+
1426+
ggml_backend_tensor_set_async(split_backend,
1427+
input_cpy,
1428+
(const uint8_t *)input->data + expert_offset, expert_offset,
1429+
// copy a bit extra at the to ensure there are no NaNs in the padding of the last expert
1430+
// this is necessary for MMQ in the CUDA backend
1431+
expert_size_copy + padding_end);
1432+
};
1433+
1434+
int id = 0;
1435+
while (!ggml_bitset_get(used_ids.data(), id)) {
1436+
id++;
1437+
}
1438+
int32_t first_id = id;
1439+
int32_t last_id = first_id;
1440+
1441+
for (++id; id < n_expert; ++id) {
1442+
if (!ggml_bitset_get(used_ids.data(), id)) {
1443+
continue;
1444+
}
1445+
1446+
if (id == last_id + 1) {
1447+
last_id = id;
1448+
continue;
1449+
}
1450+
1451+
copy_experts(first_id, last_id);
1452+
1453+
first_id = id;
1454+
last_id = id;
1455+
}
1456+
copy_experts(first_id, last_id);
1457+
} else {
1458+
// try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
1459+
// TODO: add public function to facilitate this, since applications do not have direct access to the backend interface
1460+
if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) {
1461+
ggml_backend_synchronize(input_backend);
1462+
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
1463+
ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
1464+
} else {
1465+
ggml_backend_synchronize(split_backend);
1466+
}
1467+
ggml_backend_tensor_copy(input, input_cpy);
13891468
}
1390-
ggml_backend_tensor_copy(input, input_cpy);
13911469
}
13921470
}
13931471
}

0 commit comments

Comments
 (0)