Skip to content

Commit 3df2244

Browse files
Gadflyiislaren
andauthored
llama : add --no-host to disable host buffers (ggml-org#16310)
* implement --no-host to disable host buffer * fix equal_mparams * move no-host enumeration order together with other model params --------- Co-authored-by: slaren <[email protected]>
1 parent c08002a commit 3df2244

File tree

6 files changed

+56
-10
lines changed

6 files changed

+56
-10
lines changed

common/arg.cpp

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2584,6 +2584,13 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
25842584
params.no_extra_bufts = true;
25852585
}
25862586
).set_env("LLAMA_ARG_NO_REPACK"));
2587+
add_opt(common_arg(
2588+
{"--no-host"},
2589+
"bypass host buffer allowing extra buffers to be used",
2590+
[](common_params & params) {
2591+
params.no_host = true;
2592+
}
2593+
).set_env("LLAMA_ARG_NO_HOST"));
25872594
add_opt(common_arg(
25882595
{"-ctk", "--cache-type-k"}, "TYPE",
25892596
string_format(

common/common.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1133,6 +1133,7 @@ struct llama_model_params common_model_params_to_llama(common_params & params) {
11331133
mparams.use_mlock = params.use_mlock;
11341134
mparams.check_tensors = params.check_tensors;
11351135
mparams.use_extra_bufts = !params.no_extra_bufts;
1136+
mparams.no_host = params.no_host;
11361137

11371138
if (params.kv_overrides.empty()) {
11381139
mparams.kv_overrides = NULL;

common/common.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -392,6 +392,7 @@ struct common_params {
392392
bool check_tensors = false; // validate tensor data
393393
bool no_op_offload = false; // globally disable offload host tensor operations to device
394394
bool no_extra_bufts = false; // disable extra buffer types (used for weight repacking)
395+
bool no_host = false; // bypass host buffer allowing extra buffers to be used
395396

396397
bool single_turn = false; // single turn chat conversation
397398

include/llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -296,6 +296,7 @@ extern "C" {
296296
bool use_mlock; // force system to keep model in RAM
297297
bool check_tensors; // validate model tensor data
298298
bool use_extra_bufts; // use extra buffer types (used for weight repacking)
299+
bool no_host; // bypass host buffer allowing extra buffers to be used
299300
};
300301

301302
// NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations

src/llama-model.cpp

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -310,7 +310,7 @@ static ggml_backend_buffer_type_t select_weight_buft(const llama_hparams & hpara
310310
}
311311

312312
// CPU: ACCEL -> GPU host -> CPU extra -> CPU
313-
static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & devices, bool use_extra_bufts) {
313+
static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & devices, bool use_extra_bufts, bool no_host) {
314314
buft_list_t buft_list;
315315

316316
// add ACCEL buffer types
@@ -331,11 +331,13 @@ static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & de
331331
// generally, this will be done using the first device in the list
332332
// a better approach would be to handle this on a weight-by-weight basis using the offload_op
333333
// function of the device to determine if it would benefit from being stored in a host buffer
334-
for (auto * dev : devices) {
335-
ggml_backend_buffer_type_t buft = ggml_backend_dev_host_buffer_type(dev);
336-
if (buft) {
337-
buft_list.emplace_back(dev, buft);
338-
break;
334+
if (!no_host) {
335+
for (auto * dev : devices) {
336+
ggml_backend_buffer_type_t buft = ggml_backend_dev_host_buffer_type(dev);
337+
if (buft) {
338+
buft_list.emplace_back(dev, buft);
339+
break;
340+
}
339341
}
340342
}
341343

@@ -2083,7 +2085,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
20832085
LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s)\n", __func__, ml.use_mmap ? "true" : "false");
20842086

20852087
// build a list of buffer types for the CPU and GPU devices
2086-
pimpl->cpu_buft_list = make_cpu_buft_list(devices, params.use_extra_bufts);
2088+
pimpl->cpu_buft_list = make_cpu_buft_list(devices, params.use_extra_bufts, params.no_host);
20872089
for (auto * dev : devices) {
20882090
buft_list_t buft_list = make_gpu_buft_list(dev, split_mode, tensor_split);
20892091
// add CPU buffer types as a fallback
@@ -19865,6 +19867,7 @@ llama_model_params llama_model_default_params() {
1986519867
/*.use_mlock =*/ false,
1986619868
/*.check_tensors =*/ false,
1986719869
/*.use_extra_bufts =*/ true,
19870+
/*.no_host =*/ false,
1986819871
};
1986919872

1987019873
return result;

tools/llama-bench/llama-bench.cpp

Lines changed: 36 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -336,6 +336,7 @@ struct cmd_params {
336336
std::vector<bool> use_mmap;
337337
std::vector<bool> embeddings;
338338
std::vector<bool> no_op_offload;
339+
std::vector<bool> no_host;
339340
ggml_numa_strategy numa;
340341
int reps;
341342
ggml_sched_priority prio;
@@ -373,6 +374,7 @@ static const cmd_params cmd_params_defaults = {
373374
/* use_mmap */ { true },
374375
/* embeddings */ { false },
375376
/* no_op_offload */ { false },
377+
/* no_host */ { false },
376378
/* numa */ GGML_NUMA_STRATEGY_DISABLED,
377379
/* reps */ 5,
378380
/* prio */ GGML_SCHED_PRIO_NORMAL,
@@ -453,6 +455,8 @@ static void print_usage(int /* argc */, char ** argv) {
453455
printf(" -ot --override-tensor <tensor name pattern>=<buffer type>;...\n");
454456
printf(" (default: disabled)\n");
455457
printf(" -nopo, --no-op-offload <0|1> (default: 0)\n");
458+
printf(" --no-host <0|1> (default: %s)\n",
459+
join(cmd_params_defaults.no_host, ",").c_str());
456460
printf("\n");
457461
printf(
458462
"Multiple values can be given for each parameter by separating them with ','\n"
@@ -782,6 +786,13 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
782786
}
783787
auto p = string_split<bool>(argv[i], split_delim);
784788
params.no_op_offload.insert(params.no_op_offload.end(), p.begin(), p.end());
789+
} else if (arg == "--no-host") {
790+
if (++i >= argc) {
791+
invalid_param = true;
792+
break;
793+
}
794+
auto p = string_split<bool>(argv[i], split_delim);
795+
params.no_host.insert(params.no_host.end(), p.begin(), p.end());
785796
} else if (arg == "-ts" || arg == "--tensor-split") {
786797
if (++i >= argc) {
787798
invalid_param = true;
@@ -1003,6 +1014,9 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
10031014
if (params.no_op_offload.empty()) {
10041015
params.no_op_offload = cmd_params_defaults.no_op_offload;
10051016
}
1017+
if (params.no_host.empty()) {
1018+
params.no_host = cmd_params_defaults.no_host;
1019+
}
10061020
if (params.n_threads.empty()) {
10071021
params.n_threads = cmd_params_defaults.n_threads;
10081022
}
@@ -1044,6 +1058,7 @@ struct cmd_params_instance {
10441058
bool use_mmap;
10451059
bool embeddings;
10461060
bool no_op_offload;
1061+
bool no_host;
10471062

10481063
llama_model_params to_llama_mparams() const {
10491064
llama_model_params mparams = llama_model_default_params();
@@ -1056,6 +1071,7 @@ struct cmd_params_instance {
10561071
mparams.main_gpu = main_gpu;
10571072
mparams.tensor_split = tensor_split.data();
10581073
mparams.use_mmap = use_mmap;
1074+
mparams.no_host = no_host;
10591075

10601076
if (n_cpu_moe <= 0) {
10611077
if (tensor_buft_overrides.empty()) {
@@ -1101,6 +1117,7 @@ struct cmd_params_instance {
11011117
split_mode == other.split_mode &&
11021118
main_gpu == other.main_gpu && use_mmap == other.use_mmap && tensor_split == other.tensor_split &&
11031119
devices == other.devices &&
1120+
no_host == other.no_host &&
11041121
vec_tensor_buft_override_equal(tensor_buft_overrides, other.tensor_buft_overrides);
11051122
}
11061123

@@ -1136,6 +1153,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
11361153
for (const auto & ts : params.tensor_split)
11371154
for (const auto & ot : params.tensor_buft_overrides)
11381155
for (const auto & mmp : params.use_mmap)
1156+
for (const auto & noh : params.no_host)
11391157
for (const auto & embd : params.embeddings)
11401158
for (const auto & nopo : params.no_op_offload)
11411159
for (const auto & nb : params.n_batch)
@@ -1178,6 +1196,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
11781196
/* .use_mmap = */ mmp,
11791197
/* .embeddings = */ embd,
11801198
/* .no_op_offload= */ nopo,
1199+
/* .no_host = */ noh,
11811200
};
11821201
instances.push_back(instance);
11831202
}
@@ -1211,6 +1230,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
12111230
/* .use_mmap = */ mmp,
12121231
/* .embeddings = */ embd,
12131232
/* .no_op_offload= */ nopo,
1233+
/* .no_host = */ noh,
12141234
};
12151235
instances.push_back(instance);
12161236
}
@@ -1244,6 +1264,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
12441264
/* .use_mmap = */ mmp,
12451265
/* .embeddings = */ embd,
12461266
/* .no_op_offload= */ nopo,
1267+
/* .no_host = */ noh,
12471268
};
12481269
instances.push_back(instance);
12491270
}
@@ -1282,6 +1303,7 @@ struct test {
12821303
bool use_mmap;
12831304
bool embeddings;
12841305
bool no_op_offload;
1306+
bool no_host;
12851307
int n_prompt;
12861308
int n_gen;
12871309
int n_depth;
@@ -1318,6 +1340,7 @@ struct test {
13181340
use_mmap = inst.use_mmap;
13191341
embeddings = inst.embeddings;
13201342
no_op_offload = inst.no_op_offload;
1343+
no_host = inst.no_host;
13211344
n_prompt = inst.n_prompt;
13221345
n_gen = inst.n_gen;
13231346
n_depth = inst.n_depth;
@@ -1375,8 +1398,8 @@ struct test {
13751398
"type_k", "type_v", "n_gpu_layers", "n_cpu_moe", "split_mode",
13761399
"main_gpu", "no_kv_offload", "flash_attn", "devices", "tensor_split",
13771400
"tensor_buft_overrides", "use_mmap", "embeddings", "no_op_offload",
1378-
"n_prompt", "n_gen", "n_depth", "test_time", "avg_ns",
1379-
"stddev_ns", "avg_ts", "stddev_ts"
1401+
"no_host", "n_prompt", "n_gen", "n_depth", "test_time",
1402+
"avg_ns", "stddev_ns", "avg_ts", "stddev_ts"
13801403
};
13811404
return fields;
13821405
}
@@ -1391,7 +1414,7 @@ struct test {
13911414
return INT;
13921415
}
13931416
if (field == "f16_kv" || field == "no_kv_offload" || field == "cpu_strict" || field == "flash_attn" ||
1394-
field == "use_mmap" || field == "embeddings") {
1417+
field == "use_mmap" || field == "embeddings" || field == "no_host") {
13951418
return BOOL;
13961419
}
13971420
if (field == "avg_ts" || field == "stddev_ts") {
@@ -1466,6 +1489,7 @@ struct test {
14661489
std::to_string(use_mmap),
14671490
std::to_string(embeddings),
14681491
std::to_string(no_op_offload),
1492+
std::to_string(no_host),
14691493
std::to_string(n_prompt),
14701494
std::to_string(n_gen),
14711495
std::to_string(n_depth),
@@ -1654,6 +1678,9 @@ struct markdown_printer : public printer {
16541678
if (field == "no_op_offload") {
16551679
return 4;
16561680
}
1681+
if (field == "no_host") {
1682+
return 4;
1683+
}
16571684

16581685
int width = std::max((int) field.length(), 10);
16591686

@@ -1688,6 +1715,9 @@ struct markdown_printer : public printer {
16881715
if (field == "no_op_offload") {
16891716
return "nopo";
16901717
}
1718+
if (field == "no_host") {
1719+
return "noh";
1720+
}
16911721
if (field == "devices") {
16921722
return "dev";
16931723
}
@@ -1768,6 +1798,9 @@ struct markdown_printer : public printer {
17681798
if (params.no_op_offload.size() > 1 || params.no_op_offload != cmd_params_defaults.no_op_offload) {
17691799
fields.emplace_back("no_op_offload");
17701800
}
1801+
if (params.no_host.size() > 1 || params.no_host != cmd_params_defaults.no_host) {
1802+
fields.emplace_back("no_host");
1803+
}
17711804
fields.emplace_back("test");
17721805
fields.emplace_back("t/s");
17731806

0 commit comments

Comments
 (0)