Skip to content

Commit 497474b

Browse files
committed
sched : copy only the used experts when offloading prompt processing
1 parent 7aeee88 commit 497474b

File tree

1 file changed

+66
-2
lines changed

1 file changed

+66
-2
lines changed

ggml/src/ggml-backend.cpp

Lines changed: 66 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,9 @@
1919
#include <stdio.h>
2020
#include <stdlib.h>
2121
#include <string.h>
22-
#include <string>
23-
#include <vector>
2422
#include <algorithm>
23+
#include <vector>
24+
#include <set>
2525

2626
#ifdef __APPLE__
2727
#include <sys/types.h>
@@ -1378,6 +1378,70 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
13781378
} else {
13791379
ggml_backend_synchronize(split_backend);
13801380
}
1381+
1382+
#if 1
1383+
ggml_tensor * node = split->graph.nodes[0];
1384+
if (split->graph.n_nodes > 0 &&
1385+
ggml_backend_buffer_get_usage(input->buffer) == GGML_BACKEND_BUFFER_USAGE_WEIGHTS &&
1386+
ggml_backend_buffer_is_host(input->buffer) && (
1387+
(node->src[0] == input_cpy && node->op == GGML_OP_MUL_MAT_ID)
1388+
/*|| (node->src[1] == input_cpy && node->op == GGML_OP_ADD_ID) */)) {
1389+
1390+
ggml_backend_synchronize(input_backend);
1391+
1392+
// find the ids
1393+
ggml_tensor * ids_tensor = node->src[2];
1394+
std::vector<int32_t> ids(ggml_nbytes(ids_tensor) / sizeof(int32_t));
1395+
ggml_backend_tensor_get_async(split_backend, ids_tensor, ids.data(), 0, ggml_nbytes(ids_tensor));
1396+
1397+
ggml_backend_synchronize(split_backend);
1398+
1399+
std::set<int32_t> unique_ids;
1400+
for (int64_t i1 = 0; i1 < ids_tensor->ne[1]; i1++) {
1401+
for (int64_t i0 = 0; i0 < ids_tensor->ne[0]; i0++) {
1402+
int32_t id = ids[i1 * ids_tensor->nb[1]/sizeof(int32_t) + i0 * ids_tensor->nb[0]/sizeof(int32_t)];
1403+
unique_ids.insert(id);
1404+
}
1405+
}
1406+
1407+
// group consecutive experts and copy them together
1408+
GGML_ASSERT(!unique_ids.empty());
1409+
1410+
auto it = unique_ids.begin();
1411+
int32_t first_id = *it;
1412+
int32_t last_id = first_id;
1413+
1414+
auto copy_experts = [&](int32_t first_id, int32_t last_id) {
1415+
const size_t expert_size = node->op == GGML_OP_MUL_MAT_ID ? input->nb[2] : input->nb[1];
1416+
const size_t expert_offset = first_id * expert_size;
1417+
const size_t expert_size_copy = (last_id - first_id + 1) * expert_size;
1418+
const size_t padding = 512;
1419+
const size_t padding_end = last_id < input->ne[2] - 1 ? std::min<size_t>(expert_size, padding) : 0;
1420+
1421+
ggml_backend_tensor_set_async(split_backend,
1422+
input_cpy,
1423+
(const uint8_t *)input->data + expert_offset, expert_offset,
1424+
// copy a bit extra to ensure there are no NaNs in the padding
1425+
expert_size_copy + padding_end);
1426+
};
1427+
1428+
for (++it; it != unique_ids.end(); ++it) {
1429+
const int32_t id = *it;
1430+
1431+
if (id == last_id + 1) {
1432+
last_id = id;
1433+
continue;
1434+
}
1435+
1436+
copy_experts(first_id, last_id);
1437+
1438+
first_id = id;
1439+
last_id = id;
1440+
}
1441+
copy_experts(first_id, last_id);
1442+
} else
1443+
#endif
1444+
13811445
// try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
13821446
// TODO: add public function to facilitate this, since applications do not have direct access to the backend interface
13831447
if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) {

0 commit comments

Comments
 (0)