Skip to content

Commit 6e99fb2

Browse files
committed
minor improvements and code cleanup
ggml-ci
1 parent 497474b commit 6e99fb2

File tree

1 file changed

+51
-37
lines changed

1 file changed

+51
-37
lines changed

ggml/src/ggml-backend.cpp

Lines changed: 51 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
#include <string.h>
2222
#include <algorithm>
2323
#include <vector>
24-
#include <set>
2524

2625
#ifdef __APPLE__
2726
#include <sys/types.h>
@@ -1352,6 +1351,10 @@ static bool ggml_backend_sched_alloc_splits(ggml_backend_sched_t sched) {
13521351
static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t sched) {
13531352
struct ggml_backend_sched_split * splits = sched->splits;
13541353

1354+
ggml_tensor * prev_ids_tensor = nullptr;
1355+
std::vector<int32_t> ids;
1356+
std::vector<ggml_bitset_t> used_ids;
1357+
13551358
for (int i = 0; i < sched->n_splits; i++) {
13561359
struct ggml_backend_sched_split * split = &splits[i];
13571360
int split_backend_id = split->backend_id;
@@ -1379,54 +1382,66 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
13791382
ggml_backend_synchronize(split_backend);
13801383
}
13811384

1382-
#if 1
1385+
// when offloading MoE weights, we can reduce the amount of data copied by copying only the experts that are used
13831386
ggml_tensor * node = split->graph.nodes[0];
13841387
if (split->graph.n_nodes > 0 &&
13851388
ggml_backend_buffer_get_usage(input->buffer) == GGML_BACKEND_BUFFER_USAGE_WEIGHTS &&
13861389
ggml_backend_buffer_is_host(input->buffer) && (
13871390
(node->src[0] == input_cpy && node->op == GGML_OP_MUL_MAT_ID)
1388-
/*|| (node->src[1] == input_cpy && node->op == GGML_OP_ADD_ID) */)) {
1391+
//|| (node->src[1] == input_cpy && node->op == GGML_OP_ADD_ID) /* GGML_OP_ADD_ID weights are small and not worth splitting */
1392+
)) {
1393+
1394+
const int64_t n_expert = node->op == GGML_OP_MUL_MAT_ID ? input->ne[2] : input->ne[1];
1395+
const size_t expert_size = node->op == GGML_OP_MUL_MAT_ID ? input->nb[2] : input->nb[1];
13891396

13901397
ggml_backend_synchronize(input_backend);
13911398

1392-
// find the ids
1399+
// get the ids
13931400
ggml_tensor * ids_tensor = node->src[2];
1394-
std::vector<int32_t> ids(ggml_nbytes(ids_tensor) / sizeof(int32_t));
1395-
ggml_backend_tensor_get_async(split_backend, ids_tensor, ids.data(), 0, ggml_nbytes(ids_tensor));
1396-
1397-
ggml_backend_synchronize(split_backend);
1401+
if (ids_tensor != prev_ids_tensor) {
1402+
ids.resize(ggml_nbytes(ids_tensor) / sizeof(int32_t));
1403+
ggml_backend_tensor_get_async(split_backend, ids_tensor, ids.data(), 0, ggml_nbytes(ids_tensor));
1404+
ggml_backend_synchronize(split_backend);
13981405

1399-
std::set<int32_t> unique_ids;
1400-
for (int64_t i1 = 0; i1 < ids_tensor->ne[1]; i1++) {
1401-
for (int64_t i0 = 0; i0 < ids_tensor->ne[0]; i0++) {
1402-
int32_t id = ids[i1 * ids_tensor->nb[1]/sizeof(int32_t) + i0 * ids_tensor->nb[0]/sizeof(int32_t)];
1403-
unique_ids.insert(id);
1406+
// find the used experts
1407+
used_ids.clear();
1408+
used_ids.resize(ggml_bitset_size(n_expert));
1409+
for (int64_t i1 = 0; i1 < ids_tensor->ne[1]; i1++) {
1410+
for (int64_t i0 = 0; i0 < ids_tensor->ne[0]; i0++) {
1411+
int32_t id = ids[i1 * ids_tensor->nb[1]/sizeof(int32_t) + i0 * ids_tensor->nb[0]/sizeof(int32_t)];
1412+
ggml_bitset_set(used_ids.data(), id);
1413+
}
14041414
}
1415+
1416+
prev_ids_tensor = ids_tensor;
14051417
}
14061418

14071419
// group consecutive experts and copy them together
1408-
GGML_ASSERT(!unique_ids.empty());
1409-
1410-
auto it = unique_ids.begin();
1411-
int32_t first_id = *it;
1412-
int32_t last_id = first_id;
1413-
14141420
auto copy_experts = [&](int32_t first_id, int32_t last_id) {
1415-
const size_t expert_size = node->op == GGML_OP_MUL_MAT_ID ? input->nb[2] : input->nb[1];
14161421
const size_t expert_offset = first_id * expert_size;
14171422
const size_t expert_size_copy = (last_id - first_id + 1) * expert_size;
1418-
const size_t padding = 512;
1419-
const size_t padding_end = last_id < input->ne[2] - 1 ? std::min<size_t>(expert_size, padding) : 0;
1423+
const size_t padding = std::min<size_t>(expert_size, 512);
1424+
const size_t padding_end = last_id < n_expert - 1 ? padding : 0;
14201425

14211426
ggml_backend_tensor_set_async(split_backend,
14221427
input_cpy,
14231428
(const uint8_t *)input->data + expert_offset, expert_offset,
1424-
// copy a bit extra to ensure there are no NaNs in the padding
1429+
// copy a bit extra at the to ensure there are no NaNs in the padding of the last expert
1430+
// this is necessary for MMQ in the CUDA backend
14251431
expert_size_copy + padding_end);
14261432
};
14271433

1428-
for (++it; it != unique_ids.end(); ++it) {
1429-
const int32_t id = *it;
1434+
int id = 0;
1435+
while (!ggml_bitset_get(used_ids.data(), id)) {
1436+
id++;
1437+
}
1438+
int32_t first_id = id;
1439+
int32_t last_id = first_id;
1440+
1441+
for (++id; id < n_expert; ++id) {
1442+
if (!ggml_bitset_get(used_ids.data(), id)) {
1443+
continue;
1444+
}
14301445

14311446
if (id == last_id + 1) {
14321447
last_id = id;
@@ -1439,19 +1454,18 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
14391454
last_id = id;
14401455
}
14411456
copy_experts(first_id, last_id);
1442-
} else
1443-
#endif
1444-
1445-
// try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
1446-
// TODO: add public function to facilitate this, since applications do not have direct access to the backend interface
1447-
if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) {
1448-
ggml_backend_synchronize(input_backend);
1449-
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
1450-
ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
1451-
} else {
1452-
ggml_backend_synchronize(split_backend);
1457+
} else {
1458+
// try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
1459+
// TODO: add public function to facilitate this, since applications do not have direct access to the backend interface
1460+
if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) {
1461+
ggml_backend_synchronize(input_backend);
1462+
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
1463+
ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
1464+
} else {
1465+
ggml_backend_synchronize(split_backend);
1466+
}
1467+
ggml_backend_tensor_copy(input, input_cpy);
14531468
}
1454-
ggml_backend_tensor_copy(input, input_cpy);
14551469
}
14561470
}
14571471
}

0 commit comments

Comments
 (0)