Skip to content

Commit e81b8e4

Browse files
llama: use FA + max. GPU layers by default (ggml-org#15434)
* llama: use max. GPU layers by default, auto -fa * ggml-backend: abort instead of segfault
1 parent 38ad381 commit e81b8e4

File tree

19 files changed

+235
-72
lines changed

19 files changed

+235
-72
lines changed

common/arg.cpp

Lines changed: 12 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1545,10 +1545,18 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
15451545
}
15461546
).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY, LLAMA_EXAMPLE_RETRIEVAL}));
15471547
add_opt(common_arg(
1548-
{"-fa", "--flash-attn"},
1549-
string_format("enable Flash Attention (default: %s)", params.flash_attn ? "enabled" : "disabled"),
1550-
[](common_params & params) {
1551-
params.flash_attn = true;
1548+
{"-fa", "--flash-attn"}, "FA",
1549+
string_format("set Flash Attention use ('on', 'off', or 'auto', default: '%s')", llama_flash_attn_type_name(params.flash_attn_type)),
1550+
[](common_params & params, const std::string & value) {
1551+
if (value == "on" || value == "enabled") {
1552+
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_ENABLED;
1553+
} else if (value == "off" || value == "disabled") {
1554+
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_DISABLED;
1555+
} else if (value == "auto") {
1556+
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_AUTO;
1557+
} else {
1558+
throw std::runtime_error(string_format("error: unkown value for --flash-attn: '%s'\n", value.c_str()));
1559+
}
15521560
}
15531561
).set_env("LLAMA_ARG_FLASH_ATTN"));
15541562
add_opt(common_arg(
@@ -3459,8 +3467,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
34593467
params.model.hf_repo = "ggml-org/Qwen2.5-Coder-1.5B-Q8_0-GGUF";
34603468
params.model.hf_file = "qwen2.5-coder-1.5b-q8_0.gguf";
34613469
params.port = 8012;
3462-
params.n_gpu_layers = 99;
3463-
params.flash_attn = true;
34643470
params.n_ubatch = 1024;
34653471
params.n_batch = 1024;
34663472
params.n_ctx = 0;
@@ -3475,8 +3481,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
34753481
params.model.hf_repo = "ggml-org/Qwen2.5-Coder-3B-Q8_0-GGUF";
34763482
params.model.hf_file = "qwen2.5-coder-3b-q8_0.gguf";
34773483
params.port = 8012;
3478-
params.n_gpu_layers = 99;
3479-
params.flash_attn = true;
34803484
params.n_ubatch = 1024;
34813485
params.n_batch = 1024;
34823486
params.n_ctx = 0;
@@ -3491,8 +3495,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
34913495
params.model.hf_repo = "ggml-org/Qwen2.5-Coder-7B-Q8_0-GGUF";
34923496
params.model.hf_file = "qwen2.5-coder-7b-q8_0.gguf";
34933497
params.port = 8012;
3494-
params.n_gpu_layers = 99;
3495-
params.flash_attn = true;
34963498
params.n_ubatch = 1024;
34973499
params.n_batch = 1024;
34983500
params.n_ctx = 0;
@@ -3508,10 +3510,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
35083510
params.model.hf_file = "qwen2.5-coder-7b-q8_0.gguf";
35093511
params.speculative.model.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF";
35103512
params.speculative.model.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf";
3511-
params.speculative.n_gpu_layers = 99;
35123513
params.port = 8012;
3513-
params.n_gpu_layers = 99;
3514-
params.flash_attn = true;
35153514
params.n_ubatch = 1024;
35163515
params.n_batch = 1024;
35173516
params.n_ctx = 0;
@@ -3527,10 +3526,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
35273526
params.model.hf_file = "qwen2.5-coder-14b-q8_0.gguf";
35283527
params.speculative.model.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF";
35293528
params.speculative.model.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf";
3530-
params.speculative.n_gpu_layers = 99;
35313529
params.port = 8012;
3532-
params.n_gpu_layers = 99;
3533-
params.flash_attn = true;
35343530
params.n_ubatch = 1024;
35353531
params.n_batch = 1024;
35363532
params.n_ctx = 0;
@@ -3545,8 +3541,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
35453541
params.model.hf_repo = "ggml-org/Qwen3-Coder-30B-A3B-Instruct-Q8_0-GGUF";
35463542
params.model.hf_file = "qwen3-coder-30b-a3b-instruct-q8_0.gguf";
35473543
params.port = 8012;
3548-
params.n_gpu_layers = 99;
3549-
params.flash_attn = true;
35503544
params.n_ubatch = 1024;
35513545
params.n_batch = 1024;
35523546
params.n_ctx = 0;

common/common.cpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -901,7 +901,8 @@ struct common_init_result common_init_from_params(common_params & params) {
901901

902902
llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams);
903903
if (model == NULL) {
904-
LOG_ERR("%s: failed to load model '%s'\n", __func__, params.model.path.c_str());
904+
LOG_ERR("%s: failed to load model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n",
905+
__func__, params.model.path.c_str());
905906
return iparams;
906907
}
907908

@@ -911,7 +912,8 @@ struct common_init_result common_init_from_params(common_params & params) {
911912

912913
llama_context * lctx = llama_init_from_model(model, cparams);
913914
if (lctx == NULL) {
914-
LOG_ERR("%s: failed to create context with model '%s'\n", __func__, params.model.path.c_str());
915+
LOG_ERR("%s: failed to create context with model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n",
916+
__func__, params.model.path.c_str());
915917
llama_model_free(model);
916918
return iparams;
917919
}
@@ -1157,10 +1159,10 @@ struct llama_context_params common_context_params_to_llama(const common_params &
11571159
cparams.yarn_orig_ctx = params.yarn_orig_ctx;
11581160
cparams.pooling_type = params.pooling_type;
11591161
cparams.attention_type = params.attention_type;
1162+
cparams.flash_attn_type = params.flash_attn_type;
11601163
cparams.cb_eval = params.cb_eval;
11611164
cparams.cb_eval_user_data = params.cb_eval_user_data;
11621165
cparams.offload_kqv = !params.no_kv_offload;
1163-
cparams.flash_attn = params.flash_attn;
11641166
cparams.no_perf = params.no_perf;
11651167
cparams.op_offload = !params.no_op_offload;
11661168
cparams.swa_full = params.swa_full;

common/common.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -312,6 +312,7 @@ struct common_params {
312312
enum llama_rope_scaling_type rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
313313
enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_UNSPECIFIED; // pooling type for embeddings
314314
enum llama_attention_type attention_type = LLAMA_ATTENTION_TYPE_UNSPECIFIED; // attention type for embeddings
315+
enum llama_flash_attn_type flash_attn_type = LLAMA_FLASH_ATTN_TYPE_AUTO; // whether to use Flash Attention
315316

316317
struct common_params_sampling sampling;
317318
struct common_params_speculative speculative;
@@ -375,7 +376,6 @@ struct common_params {
375376
bool multiline_input = false; // reverse the usage of `\`
376377
bool simple_io = false; // improves compatibility with subprocesses and limited consoles
377378
bool cont_batching = true; // insert new sequences for decoding on-the-fly
378-
bool flash_attn = false; // flash attention
379379
bool no_perf = false; // disable performance metrics
380380
bool ctx_shift = false; // context shift on infinite text generation
381381
bool swa_full = false; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)

examples/diffusion/diffusion-cli.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -564,7 +564,7 @@ int main(int argc, char ** argv) {
564564
ctx_params.n_ctx = params.n_ctx;
565565
ctx_params.n_batch = params.n_batch;
566566
ctx_params.n_ubatch = params.n_ubatch;
567-
ctx_params.flash_attn = params.flash_attn;
567+
ctx_params.flash_attn_type = params.flash_attn_type;
568568
ctx_params.no_perf = params.no_perf;
569569
ctx_params.type_k = params.cache_type_k;
570570
ctx_params.type_v = params.cache_type_v;

0 commit comments

Comments
 (0)