Skip to content

Commit d515ab8

Browse files
committed
Merge branch 'master' into mradermacher
2 parents eacac4c + a812838 commit d515ab8

File tree

128 files changed

+5274
-1514
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

128 files changed

+5274
-1514
lines changed

.clang-format

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ AllowShortIfStatementsOnASingleLine: Never
2222
AllowShortLambdasOnASingleLine: Inline
2323
AllowShortLoopsOnASingleLine: false
2424
AlwaysBreakBeforeMultilineStrings: true
25-
BinPackArguments: false
25+
BinPackArguments: true
2626
BinPackParameters: false # OnePerLine
2727
BitFieldColonSpacing: Both
2828
BreakBeforeBraces: Custom # Attach

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
137137
- [X] [Trillion-7B-preview](https://huggingface.co/trillionlabs/Trillion-7B-preview)
138138
- [x] [Ling models](https://huggingface.co/collections/inclusionAI/ling-67c51c85b34a7ea0aba94c32)
139139
- [x] [LFM2 models](https://huggingface.co/collections/LiquidAI/lfm2-686d721927015b2ad73eaa38)
140+
- [x] [Hunyuan models](https://huggingface.co/collections/tencent/hunyuan-dense-model-6890632cda26b19119c9c5e7)
140141

141142
#### Multimodal
142143

ci/run.sh

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -386,10 +386,10 @@ function gg_run_open_llama_7b_v2 {
386386

387387
(time ./bin/llama-imatrix --model ${model_f16} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log
388388

389-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
390-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
391-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
392-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
389+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
390+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
391+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
392+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
393393

394394
function check_ppl {
395395
qnt="$1"
@@ -520,8 +520,8 @@ function gg_run_pythia_1_4b {
520520

521521
(time ./bin/llama-imatrix --model ${model_f16} -f ${wiki_test_60} -ngl 99 -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log
522522

523-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
524-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
523+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
524+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
525525

526526
function check_ppl {
527527
qnt="$1"
@@ -651,10 +651,10 @@ function gg_run_pythia_2_8b {
651651

652652
(time ./bin/llama-imatrix --model ${model_f16} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log
653653

654-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
655-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
656-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
657-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
654+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
655+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
656+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
657+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
658658

659659
function check_ppl {
660660
qnt="$1"

common/arg.cpp

Lines changed: 20 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1545,10 +1545,18 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
15451545
}
15461546
).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY, LLAMA_EXAMPLE_RETRIEVAL}));
15471547
add_opt(common_arg(
1548-
{"-fa", "--flash-attn"},
1549-
string_format("enable Flash Attention (default: %s)", params.flash_attn ? "enabled" : "disabled"),
1550-
[](common_params & params) {
1551-
params.flash_attn = true;
1548+
{"-fa", "--flash-attn"}, "FA",
1549+
string_format("set Flash Attention use ('on', 'off', or 'auto', default: '%s')", llama_flash_attn_type_name(params.flash_attn_type)),
1550+
[](common_params & params, const std::string & value) {
1551+
if (value == "on" || value == "enabled" || value == "1") {
1552+
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_ENABLED;
1553+
} else if (value == "off" || value == "disabled" || value == "0") {
1554+
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_DISABLED;
1555+
} else if (value == "auto" || value == "-1") {
1556+
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_AUTO;
1557+
} else {
1558+
throw std::runtime_error(string_format("error: unkown value for --flash-attn: '%s'\n", value.c_str()));
1559+
}
15521560
}
15531561
).set_env("LLAMA_ARG_FLASH_ATTN"));
15541562
add_opt(common_arg(
@@ -2458,7 +2466,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
24582466
).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_N_CPU_MOE_DRAFT"));
24592467
add_opt(common_arg(
24602468
{"-ngl", "--gpu-layers", "--n-gpu-layers"}, "N",
2461-
"number of layers to store in VRAM",
2469+
string_format("max. number of layers to store in VRAM (default: %d)", params.n_gpu_layers),
24622470
[](common_params & params, int value) {
24632471
params.n_gpu_layers = value;
24642472
if (!llama_supports_gpu_offload()) {
@@ -2954,20 +2962,20 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
29542962
params.endpoint_metrics = true;
29552963
}
29562964
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_METRICS"));
2957-
add_opt(common_arg(
2958-
{"--slots"},
2959-
string_format("enable slots monitoring endpoint (default: %s)", params.endpoint_slots ? "enabled" : "disabled"),
2960-
[](common_params & params) {
2961-
params.endpoint_slots = true;
2962-
}
2963-
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_SLOTS"));
29642965
add_opt(common_arg(
29652966
{"--props"},
29662967
string_format("enable changing global properties via POST /props (default: %s)", params.endpoint_props ? "enabled" : "disabled"),
29672968
[](common_params & params) {
29682969
params.endpoint_props = true;
29692970
}
29702971
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_PROPS"));
2972+
add_opt(common_arg(
2973+
{"--slots"},
2974+
string_format("enable slots monitoring endpoint (default: %s)", params.endpoint_slots ? "enabled" : "disabled"),
2975+
[](common_params & params) {
2976+
params.endpoint_slots = true;
2977+
}
2978+
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_SLOTS"));
29712979
add_opt(common_arg(
29722980
{"--no-slots"},
29732981
"disables slots monitoring endpoint",
@@ -3459,8 +3467,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
34593467
params.model.hf_repo = "ggml-org/Qwen2.5-Coder-1.5B-Q8_0-GGUF";
34603468
params.model.hf_file = "qwen2.5-coder-1.5b-q8_0.gguf";
34613469
params.port = 8012;
3462-
params.n_gpu_layers = 99;
3463-
params.flash_attn = true;
34643470
params.n_ubatch = 1024;
34653471
params.n_batch = 1024;
34663472
params.n_ctx = 0;
@@ -3475,8 +3481,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
34753481
params.model.hf_repo = "ggml-org/Qwen2.5-Coder-3B-Q8_0-GGUF";
34763482
params.model.hf_file = "qwen2.5-coder-3b-q8_0.gguf";
34773483
params.port = 8012;
3478-
params.n_gpu_layers = 99;
3479-
params.flash_attn = true;
34803484
params.n_ubatch = 1024;
34813485
params.n_batch = 1024;
34823486
params.n_ctx = 0;
@@ -3491,8 +3495,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
34913495
params.model.hf_repo = "ggml-org/Qwen2.5-Coder-7B-Q8_0-GGUF";
34923496
params.model.hf_file = "qwen2.5-coder-7b-q8_0.gguf";
34933497
params.port = 8012;
3494-
params.n_gpu_layers = 99;
3495-
params.flash_attn = true;
34963498
params.n_ubatch = 1024;
34973499
params.n_batch = 1024;
34983500
params.n_ctx = 0;
@@ -3508,10 +3510,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
35083510
params.model.hf_file = "qwen2.5-coder-7b-q8_0.gguf";
35093511
params.speculative.model.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF";
35103512
params.speculative.model.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf";
3511-
params.speculative.n_gpu_layers = 99;
35123513
params.port = 8012;
3513-
params.n_gpu_layers = 99;
3514-
params.flash_attn = true;
35153514
params.n_ubatch = 1024;
35163515
params.n_batch = 1024;
35173516
params.n_ctx = 0;
@@ -3527,10 +3526,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
35273526
params.model.hf_file = "qwen2.5-coder-14b-q8_0.gguf";
35283527
params.speculative.model.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF";
35293528
params.speculative.model.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf";
3530-
params.speculative.n_gpu_layers = 99;
35313529
params.port = 8012;
3532-
params.n_gpu_layers = 99;
3533-
params.flash_attn = true;
35343530
params.n_ubatch = 1024;
35353531
params.n_batch = 1024;
35363532
params.n_ctx = 0;
@@ -3545,8 +3541,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
35453541
params.model.hf_repo = "ggml-org/Qwen3-Coder-30B-A3B-Instruct-Q8_0-GGUF";
35463542
params.model.hf_file = "qwen3-coder-30b-a3b-instruct-q8_0.gguf";
35473543
params.port = 8012;
3548-
params.n_gpu_layers = 99;
3549-
params.flash_attn = true;
35503544
params.n_ubatch = 1024;
35513545
params.n_batch = 1024;
35523546
params.n_ctx = 0;

common/chat.cpp

Lines changed: 98 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -623,6 +623,7 @@ const char * common_chat_format_name(common_chat_format format) {
623623
case COMMON_CHAT_FORMAT_GRANITE: return "Granite";
624624
case COMMON_CHAT_FORMAT_GPT_OSS: return "GPT-OSS";
625625
case COMMON_CHAT_FORMAT_SEED_OSS: return "Seed-OSS";
626+
case COMMON_CHAT_FORMAT_NEMOTRON_V2: return "Nemotron V2";
626627
default:
627628
throw std::runtime_error("Unknown chat format");
628629
}
@@ -1184,6 +1185,67 @@ static common_chat_params common_chat_params_init_llama_3_x(const common_chat_te
11841185
});
11851186
return data;
11861187
}
1188+
1189+
static common_chat_params common_chat_params_init_nemotron_v2(const common_chat_template & tmpl, const struct templates_params & inputs) {
1190+
common_chat_params data;
1191+
1192+
// Generate the prompt using the apply() function with the template
1193+
data.prompt = apply(tmpl, inputs);
1194+
data.format = COMMON_CHAT_FORMAT_NEMOTRON_V2;
1195+
1196+
// Handle thinking tags appropriately based on inputs.enable_thinking
1197+
if (string_ends_with(data.prompt, "<think>\n")) {
1198+
if (!inputs.enable_thinking) {
1199+
data.prompt += "</think>";
1200+
} else {
1201+
data.thinking_forced_open = true;
1202+
}
1203+
}
1204+
1205+
// When tools are present, build grammar for the <TOOLCALL> format, similar to CommandR, but without tool call ID
1206+
if (!inputs.tools.is_null() && inputs.tools.is_array() && !inputs.tools.empty()) {
1207+
data.grammar_lazy = true;
1208+
data.grammar = build_grammar([&](const common_grammar_builder & builder) {
1209+
auto schemas = json::array();
1210+
foreach_function(inputs.tools, [&](const json & tool) {
1211+
const auto & function = tool.at("function");
1212+
schemas.push_back({
1213+
{ "type", "object" },
1214+
{ "properties",
1215+
{
1216+
{ "name",
1217+
{
1218+
{ "type", "string" },
1219+
{ "const", function.at("name") },
1220+
} },
1221+
{ "arguments", function.at("parameters") },
1222+
} },
1223+
{ "required", json::array({ "name", "arguments" }) },
1224+
});
1225+
});
1226+
auto schema = json{
1227+
{ "type", "array" },
1228+
{ "items", schemas.size() == 1 ? schemas[0] : json{ { "anyOf", schemas } } },
1229+
{ "minItems", 1 },
1230+
};
1231+
if (!inputs.parallel_tool_calls) {
1232+
schema["maxItems"] = 1;
1233+
}
1234+
builder.add_rule("root",
1235+
std::string(data.thinking_forced_open ? "( \"</think>\" space )? " : "") +
1236+
"\"<TOOLCALL>\" " + builder.add_schema("tool_calls", schema) +
1237+
" \"</TOOLCALL>\"");
1238+
});
1239+
data.grammar_triggers.push_back({ COMMON_GRAMMAR_TRIGGER_TYPE_PATTERN_FULL,
1240+
// If thinking_forced_open, then we capture the </think> tag in the grammar,
1241+
// (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
1242+
std::string(data.thinking_forced_open ?
1243+
"[\\s\\S]*?(</think>\\s*)" :
1244+
"(?:<think>[\\s\\S]*?</think>\\s*)?") +
1245+
"(<TOOLCALL>)[\\s\\S]*" });
1246+
}
1247+
return data;
1248+
}
11871249
static void common_chat_parse_llama_3_1(common_chat_msg_parser & builder, bool with_builtin_tools = false) {
11881250
if (!builder.syntax().parse_tool_calls) {
11891251
builder.add_content(builder.consume_rest());
@@ -1830,7 +1892,7 @@ static common_chat_params common_chat_params_init_hermes_2_pro(const common_chat
18301892
// If thinking_forced_open, then we capture the </think> tag in the grammar,
18311893
// (important for required tool choice) and in the trigger's first capture (decides what is sent to the grammar)
18321894
std::string(data.thinking_forced_open ? "[\\s\\S]*?(</think>\\s*)" : "(?:<think>[\\s\\S]*?</think>\\s*)?") + (
1833-
"(\\s*"
1895+
"\\s*("
18341896
"(?:<tool_call>"
18351897
"|<function"
18361898
"|(?:```(?:json|xml)?\n\\s*)?(?:<function_call>|<tools>|<xml><json>|<response>)?"
@@ -2060,6 +2122,33 @@ static void common_chat_parse_granite(common_chat_msg_parser & builder) {
20602122
}
20612123
}
20622124

2125+
static void common_chat_parse_nemotron_v2(common_chat_msg_parser & builder) {
2126+
// Parse thinking tags
2127+
builder.try_parse_reasoning("<think>", "</think>");
2128+
if (!builder.syntax().parse_tool_calls) {
2129+
builder.add_content(builder.consume_rest());
2130+
return;
2131+
}
2132+
2133+
// Look for tool calls
2134+
static const common_regex tool_call_regex(regex_escape("<TOOLCALL>"));
2135+
if (auto res = builder.try_find_regex(tool_call_regex)) {
2136+
builder.move_to(res->groups[0].end);
2137+
2138+
// Expect JSON array of tool calls
2139+
auto tool_calls_data = builder.consume_json();
2140+
if (tool_calls_data.json.is_array()) {
2141+
if (!builder.try_consume_literal("</TOOLCALL>")) {
2142+
throw common_chat_msg_partial_exception("Incomplete tool call");
2143+
}
2144+
builder.add_tool_calls(tool_calls_data.json);
2145+
} else {
2146+
throw common_chat_msg_partial_exception("Incomplete tool call");
2147+
}
2148+
}
2149+
builder.add_content(builder.consume_rest());
2150+
}
2151+
20632152
static void common_chat_parse_seed_oss(common_chat_msg_parser & builder) {
20642153
// Parse thinking tags first - this handles the main reasoning content
20652154
builder.try_parse_reasoning("<seed:think>", "</seed:think>");
@@ -2293,6 +2382,11 @@ static common_chat_params common_chat_templates_apply_jinja(
22932382
return common_chat_params_init_seed_oss(tmpl, params, inputs);
22942383
}
22952384

2385+
// Nemotron v2
2386+
if (src.find("<SPECIAL_10>") != std::string::npos) {
2387+
return common_chat_params_init_nemotron_v2(tmpl, params);
2388+
}
2389+
22962390
// Use generic handler when mixing tools + JSON schema.
22972391
// TODO: support that mix in handlers below.
22982392
if ((params.tools.is_array() && params.json_schema.is_object())) {
@@ -2454,6 +2548,9 @@ static void common_chat_parse(common_chat_msg_parser & builder) {
24542548
case COMMON_CHAT_FORMAT_SEED_OSS:
24552549
common_chat_parse_seed_oss(builder);
24562550
break;
2551+
case COMMON_CHAT_FORMAT_NEMOTRON_V2:
2552+
common_chat_parse_nemotron_v2(builder);
2553+
break;
24572554
default:
24582555
throw std::runtime_error(std::string("Unsupported format: ") + common_chat_format_name(builder.syntax().format));
24592556
}

common/chat.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ enum common_chat_format {
112112
COMMON_CHAT_FORMAT_GRANITE,
113113
COMMON_CHAT_FORMAT_GPT_OSS,
114114
COMMON_CHAT_FORMAT_SEED_OSS,
115+
COMMON_CHAT_FORMAT_NEMOTRON_V2,
115116

116117
COMMON_CHAT_FORMAT_COUNT, // Not a format, just the # formats
117118
};

common/common.cpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -901,7 +901,8 @@ struct common_init_result common_init_from_params(common_params & params) {
901901

902902
llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams);
903903
if (model == NULL) {
904-
LOG_ERR("%s: failed to load model '%s'\n", __func__, params.model.path.c_str());
904+
LOG_ERR("%s: failed to load model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n",
905+
__func__, params.model.path.c_str());
905906
return iparams;
906907
}
907908

@@ -915,7 +916,8 @@ struct common_init_result common_init_from_params(common_params & params) {
915916
LOG_ERR("%s: Dryrun completed!\n", __func__);
916917
exit(0);
917918
}
918-
LOG_ERR("%s: failed to create context with model '%s'\n", __func__, params.model.path.c_str());
919+
LOG_ERR("%s: failed to create context with model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n",
920+
__func__, params.model.path.c_str());
919921
llama_model_free(model);
920922
return iparams;
921923
}
@@ -1161,10 +1163,10 @@ struct llama_context_params common_context_params_to_llama(const common_params &
11611163
cparams.yarn_orig_ctx = params.yarn_orig_ctx;
11621164
cparams.pooling_type = params.pooling_type;
11631165
cparams.attention_type = params.attention_type;
1166+
cparams.flash_attn_type = params.flash_attn_type;
11641167
cparams.cb_eval = params.cb_eval;
11651168
cparams.cb_eval_user_data = params.cb_eval_user_data;
11661169
cparams.offload_kqv = !params.no_kv_offload;
1167-
cparams.flash_attn = params.flash_attn;
11681170
cparams.no_perf = params.no_perf;
11691171
cparams.op_offload = !params.no_op_offload;
11701172
cparams.swa_full = params.swa_full;

0 commit comments

Comments
 (0)