Skip to content

Commit 5688afa

Browse files
authored
Merge branch 'ggml-org:master' into fix_qwen_reasoning_tool_calling_required
2 parents 310701b + b66df9d commit 5688afa

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

71 files changed

+2572
-984
lines changed

ci/run.sh

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -386,10 +386,10 @@ function gg_run_open_llama_7b_v2 {
386386

387387
(time ./bin/llama-imatrix --model ${model_f16} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log
388388

389-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
390-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
391-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
392-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
389+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
390+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
391+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
392+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
393393

394394
function check_ppl {
395395
qnt="$1"
@@ -520,8 +520,8 @@ function gg_run_pythia_1_4b {
520520

521521
(time ./bin/llama-imatrix --model ${model_f16} -f ${wiki_test_60} -ngl 99 -c 128 -b 128 --chunks 1 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log
522522

523-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
524-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
523+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
524+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
525525

526526
function check_ppl {
527527
qnt="$1"
@@ -651,10 +651,10 @@ function gg_run_pythia_2_8b {
651651

652652
(time ./bin/llama-imatrix --model ${model_f16} -f ${wiki_test} -t 1 -ngl 99 -c 2048 -b 512 --chunks 4 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log
653653

654-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
655-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
656-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
657-
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
654+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
655+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 0 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
656+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa off ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
657+
(time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 0 -fa on ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
658658

659659
function check_ppl {
660660
qnt="$1"

common/arg.cpp

Lines changed: 19 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -1545,10 +1545,18 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
15451545
}
15461546
).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY, LLAMA_EXAMPLE_RETRIEVAL}));
15471547
add_opt(common_arg(
1548-
{"-fa", "--flash-attn"},
1549-
string_format("enable Flash Attention (default: %s)", params.flash_attn ? "enabled" : "disabled"),
1550-
[](common_params & params) {
1551-
params.flash_attn = true;
1548+
{"-fa", "--flash-attn"}, "FA",
1549+
string_format("set Flash Attention use ('on', 'off', or 'auto', default: '%s')", llama_flash_attn_type_name(params.flash_attn_type)),
1550+
[](common_params & params, const std::string & value) {
1551+
if (value == "on" || value == "enabled") {
1552+
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_ENABLED;
1553+
} else if (value == "off" || value == "disabled") {
1554+
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_DISABLED;
1555+
} else if (value == "auto") {
1556+
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_AUTO;
1557+
} else {
1558+
throw std::runtime_error(string_format("error: unkown value for --flash-attn: '%s'\n", value.c_str()));
1559+
}
15521560
}
15531561
).set_env("LLAMA_ARG_FLASH_ATTN"));
15541562
add_opt(common_arg(
@@ -2954,20 +2962,20 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
29542962
params.endpoint_metrics = true;
29552963
}
29562964
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_METRICS"));
2957-
add_opt(common_arg(
2958-
{"--slots"},
2959-
string_format("enable slots monitoring endpoint (default: %s)", params.endpoint_slots ? "enabled" : "disabled"),
2960-
[](common_params & params) {
2961-
params.endpoint_slots = true;
2962-
}
2963-
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_SLOTS"));
29642965
add_opt(common_arg(
29652966
{"--props"},
29662967
string_format("enable changing global properties via POST /props (default: %s)", params.endpoint_props ? "enabled" : "disabled"),
29672968
[](common_params & params) {
29682969
params.endpoint_props = true;
29692970
}
29702971
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_PROPS"));
2972+
add_opt(common_arg(
2973+
{"--slots"},
2974+
string_format("enable slots monitoring endpoint (default: %s)", params.endpoint_slots ? "enabled" : "disabled"),
2975+
[](common_params & params) {
2976+
params.endpoint_slots = true;
2977+
}
2978+
).set_examples({LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_ENDPOINT_SLOTS"));
29712979
add_opt(common_arg(
29722980
{"--no-slots"},
29732981
"disables slots monitoring endpoint",
@@ -3459,8 +3467,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
34593467
params.model.hf_repo = "ggml-org/Qwen2.5-Coder-1.5B-Q8_0-GGUF";
34603468
params.model.hf_file = "qwen2.5-coder-1.5b-q8_0.gguf";
34613469
params.port = 8012;
3462-
params.n_gpu_layers = 99;
3463-
params.flash_attn = true;
34643470
params.n_ubatch = 1024;
34653471
params.n_batch = 1024;
34663472
params.n_ctx = 0;
@@ -3475,8 +3481,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
34753481
params.model.hf_repo = "ggml-org/Qwen2.5-Coder-3B-Q8_0-GGUF";
34763482
params.model.hf_file = "qwen2.5-coder-3b-q8_0.gguf";
34773483
params.port = 8012;
3478-
params.n_gpu_layers = 99;
3479-
params.flash_attn = true;
34803484
params.n_ubatch = 1024;
34813485
params.n_batch = 1024;
34823486
params.n_ctx = 0;
@@ -3491,8 +3495,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
34913495
params.model.hf_repo = "ggml-org/Qwen2.5-Coder-7B-Q8_0-GGUF";
34923496
params.model.hf_file = "qwen2.5-coder-7b-q8_0.gguf";
34933497
params.port = 8012;
3494-
params.n_gpu_layers = 99;
3495-
params.flash_attn = true;
34963498
params.n_ubatch = 1024;
34973499
params.n_batch = 1024;
34983500
params.n_ctx = 0;
@@ -3508,10 +3510,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
35083510
params.model.hf_file = "qwen2.5-coder-7b-q8_0.gguf";
35093511
params.speculative.model.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF";
35103512
params.speculative.model.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf";
3511-
params.speculative.n_gpu_layers = 99;
35123513
params.port = 8012;
3513-
params.n_gpu_layers = 99;
3514-
params.flash_attn = true;
35153514
params.n_ubatch = 1024;
35163515
params.n_batch = 1024;
35173516
params.n_ctx = 0;
@@ -3527,10 +3526,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
35273526
params.model.hf_file = "qwen2.5-coder-14b-q8_0.gguf";
35283527
params.speculative.model.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF";
35293528
params.speculative.model.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf";
3530-
params.speculative.n_gpu_layers = 99;
35313529
params.port = 8012;
3532-
params.n_gpu_layers = 99;
3533-
params.flash_attn = true;
35343530
params.n_ubatch = 1024;
35353531
params.n_batch = 1024;
35363532
params.n_ctx = 0;
@@ -3545,8 +3541,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
35453541
params.model.hf_repo = "ggml-org/Qwen3-Coder-30B-A3B-Instruct-Q8_0-GGUF";
35463542
params.model.hf_file = "qwen3-coder-30b-a3b-instruct-q8_0.gguf";
35473543
params.port = 8012;
3548-
params.n_gpu_layers = 99;
3549-
params.flash_attn = true;
35503544
params.n_ubatch = 1024;
35513545
params.n_batch = 1024;
35523546
params.n_ctx = 0;

common/chat.cpp

Lines changed: 152 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -622,6 +622,7 @@ const char * common_chat_format_name(common_chat_format format) {
622622
case COMMON_CHAT_FORMAT_COMMAND_R7B: return "Command R7B";
623623
case COMMON_CHAT_FORMAT_GRANITE: return "Granite";
624624
case COMMON_CHAT_FORMAT_GPT_OSS: return "GPT-OSS";
625+
case COMMON_CHAT_FORMAT_SEED_OSS: return "Seed-OSS";
625626
default:
626627
throw std::runtime_error("Unknown chat format");
627628
}
@@ -2088,6 +2089,94 @@ static void common_chat_parse_granite(common_chat_msg_parser & builder) {
20882089
}
20892090
}
20902091

2092+
static void common_chat_parse_seed_oss(common_chat_msg_parser & builder) {
2093+
// Parse thinking tags first - this handles the main reasoning content
2094+
builder.try_parse_reasoning("<seed:think>", "</seed:think>");
2095+
2096+
if (!builder.syntax().parse_tool_calls) {
2097+
builder.add_content(builder.consume_rest());
2098+
return;
2099+
}
2100+
2101+
// Parse tool calls - Seed-OSS uses <seed:tool_call> format
2102+
static const common_regex tool_call_begin_regex("<seed:tool_call>");
2103+
static const common_regex tool_call_end_regex("</seed:tool_call>");
2104+
static const common_regex function_regex("<function=([^>]+)>");
2105+
static const common_regex param_regex("<parameter=([^>]+)>");
2106+
2107+
while (auto tool_res = builder.try_find_regex(tool_call_begin_regex)) {
2108+
builder.consume_spaces(); // Consume whitespace after <seed:tool_call>
2109+
2110+
// Look for function call inside tool call, ignore any content before it
2111+
if (auto func_res = builder.try_find_regex(function_regex, std::string::npos, false)) {
2112+
auto function_name = builder.str(func_res->groups[1]);
2113+
2114+
// Parse Seed-OSS parameters <parameter=name>value</parameter>
2115+
json args = json::object();
2116+
// Parse all parameters
2117+
while (auto param_res = builder.try_find_regex(param_regex, std::string::npos, false)) {
2118+
// again, ignore noise around parameters
2119+
auto param_name = builder.str(param_res->groups[1]);
2120+
builder.move_to(param_res->groups[0].end);
2121+
builder.consume_spaces(); // Consume whitespace after parameter
2122+
auto savedPos = builder.pos();
2123+
if (auto param_parse = builder.try_find_literal("</parameter>")) {
2124+
auto param = param_parse->prelude;
2125+
builder.move_to(savedPos);
2126+
try {
2127+
if (auto param_res = builder.try_consume_json()) {
2128+
args[param_name] = param_res->json;
2129+
} else {
2130+
args[param_name] = param;
2131+
}
2132+
} catch (json::exception &) {
2133+
args[param_name] = param;
2134+
}
2135+
} else {
2136+
throw common_chat_msg_partial_exception("Incomplete tool parameter");
2137+
}
2138+
}
2139+
// Look for closing function tag
2140+
auto end_func = builder.try_find_literal("</function>");
2141+
if (end_func) {
2142+
builder.move_to(end_func->groups[0].end);
2143+
builder.consume_spaces(); // Consume whitespace after </function>
2144+
2145+
// Add the tool call with parsed arguments, but only if we REALLY got the literal
2146+
auto eaten_fragment = builder.input().substr(end_func->groups[0].begin, end_func->groups[0].end);
2147+
auto funlen = std::string("</function>").length();
2148+
if (eaten_fragment.length() >= funlen && eaten_fragment.substr(0, funlen) == std::string("</function>")) {
2149+
if (!builder.add_tool_call(function_name, "", args.dump())) {
2150+
throw common_chat_msg_partial_exception("Incomplete tool call");
2151+
}
2152+
} else {
2153+
throw common_chat_msg_partial_exception("Incomplete tool call");
2154+
}
2155+
} else {
2156+
throw common_chat_msg_partial_exception("Incomplete tool call");
2157+
}
2158+
// Look for closing tool call tag
2159+
if (auto end_tool = builder.try_find_regex(tool_call_end_regex, std::string::npos, false)) {
2160+
builder.move_to(end_tool->groups[0].end);
2161+
builder.consume_spaces(); // Consume trailing whitespace after tool call
2162+
} else {
2163+
throw common_chat_msg_partial_exception("Incomplete tool call");
2164+
}
2165+
} else {
2166+
// No function found - don't consume content here, let it be handled at the end
2167+
break;
2168+
}
2169+
}
2170+
2171+
// Consume any remaining whitespace after all tool call processing
2172+
builder.consume_spaces();
2173+
auto remaining = builder.consume_rest();
2174+
// If there's any non-whitespace content remaining, add it as content
2175+
if (!string_strip(remaining).empty()) {
2176+
builder.add_content(remaining);
2177+
}
2178+
}
2179+
20912180
static common_chat_params common_chat_params_init_without_tools(const common_chat_template & tmpl, const struct templates_params & inputs) {
20922181
common_chat_params data;
20932182
data.prompt = apply(tmpl, inputs);
@@ -2104,8 +2193,62 @@ static common_chat_params common_chat_params_init_without_tools(const common_cha
21042193
return data;
21052194
}
21062195

2196+
static common_chat_params common_chat_params_init_seed_oss(
2197+
const common_chat_template & tmpl,
2198+
templates_params & params,
2199+
const common_chat_templates_inputs & inputs)
2200+
{
2201+
common_chat_params data;
2202+
data.prompt = apply(tmpl, params);
2203+
data.format = COMMON_CHAT_FORMAT_SEED_OSS;
2204+
if (string_ends_with(data.prompt, "<seed:think>")) {
2205+
if (!inputs.enable_thinking) {
2206+
data.prompt += "</seed:think>";
2207+
} else {
2208+
data.thinking_forced_open = true;
2209+
}
2210+
}
2211+
2212+
if (params.tools.is_array() && !params.tools.empty()) {
2213+
data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
2214+
data.grammar = build_grammar([&](const common_grammar_builder & builder) {
2215+
std::vector<std::string> tool_rules;
2216+
foreach_function(params.tools, [&](const json & tool) {
2217+
const auto & function = tool.at("function");
2218+
std::string name = function.at("name");
2219+
auto parameters = function.at("parameters");
2220+
builder.resolve_refs(parameters);
2221+
2222+
// Create rule for Seed-OSS function call format
2223+
std::string param_rules;
2224+
if (parameters.contains("properties")) {
2225+
for (const auto & [key, value] : parameters.at("properties").items()) {
2226+
param_rules += "\"<parameter=" + key + ">\"" + builder.add_schema(name + "-arg-" + key, value) +
2227+
"\"</parameter>\"";
2228+
}
2229+
}
2230+
2231+
tool_rules.push_back(builder.add_rule(name + "-call",
2232+
"\"<seed:tool_call>\" space \"<function=" + name + ">\" space " +
2233+
param_rules +
2234+
" \"</function>\" space \"</seed:tool_call>\""));
2235+
});
2236+
2237+
data.grammar_triggers.push_back({ COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<seed:tool_call>" });
2238+
2239+
data.preserved_tokens = {
2240+
"<seed:think>", "</seed:think>", "<seed:tool_call>", "</seed:tool_call>",
2241+
"<function=", "</function>", "<parameter=", "</parameter>",
2242+
};
2243+
2244+
builder.add_rule("root", string_join(tool_rules, " | "));
2245+
});
2246+
}
2247+
return data;
2248+
}
2249+
21072250
static common_chat_params common_chat_templates_apply_jinja(
2108-
const struct common_chat_templates * tmpls,
2251+
const struct common_chat_templates * tmpls,
21092252
const struct common_chat_templates_inputs & inputs)
21102253
{
21112254
templates_params params;
@@ -2174,6 +2317,11 @@ static common_chat_params common_chat_templates_apply_jinja(
21742317
return common_chat_params_init_gpt_oss(tmpl, params);
21752318
}
21762319

2320+
// Seed-OSS
2321+
if (src.find("<seed:think>") != std::string::npos) {
2322+
return common_chat_params_init_seed_oss(tmpl, params, inputs);
2323+
}
2324+
21772325
// Use generic handler when mixing tools + JSON schema.
21782326
// TODO: support that mix in handlers below.
21792327
if ((params.tools.is_array() && params.json_schema.is_object())) {
@@ -2332,6 +2480,9 @@ static void common_chat_parse(common_chat_msg_parser & builder) {
23322480
case COMMON_CHAT_FORMAT_GPT_OSS:
23332481
common_chat_parse_gpt_oss(builder);
23342482
break;
2483+
case COMMON_CHAT_FORMAT_SEED_OSS:
2484+
common_chat_parse_seed_oss(builder);
2485+
break;
23352486
default:
23362487
throw std::runtime_error(std::string("Unsupported format: ") + common_chat_format_name(builder.syntax().format));
23372488
}

common/chat.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,7 @@ enum common_chat_format {
111111
COMMON_CHAT_FORMAT_COMMAND_R7B,
112112
COMMON_CHAT_FORMAT_GRANITE,
113113
COMMON_CHAT_FORMAT_GPT_OSS,
114+
COMMON_CHAT_FORMAT_SEED_OSS,
114115

115116
COMMON_CHAT_FORMAT_COUNT, // Not a format, just the # formats
116117
};

common/common.cpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -901,7 +901,8 @@ struct common_init_result common_init_from_params(common_params & params) {
901901

902902
llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams);
903903
if (model == NULL) {
904-
LOG_ERR("%s: failed to load model '%s'\n", __func__, params.model.path.c_str());
904+
LOG_ERR("%s: failed to load model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n",
905+
__func__, params.model.path.c_str());
905906
return iparams;
906907
}
907908

@@ -911,7 +912,8 @@ struct common_init_result common_init_from_params(common_params & params) {
911912

912913
llama_context * lctx = llama_init_from_model(model, cparams);
913914
if (lctx == NULL) {
914-
LOG_ERR("%s: failed to create context with model '%s'\n", __func__, params.model.path.c_str());
915+
LOG_ERR("%s: failed to create context with model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n",
916+
__func__, params.model.path.c_str());
915917
llama_model_free(model);
916918
return iparams;
917919
}
@@ -1157,10 +1159,10 @@ struct llama_context_params common_context_params_to_llama(const common_params &
11571159
cparams.yarn_orig_ctx = params.yarn_orig_ctx;
11581160
cparams.pooling_type = params.pooling_type;
11591161
cparams.attention_type = params.attention_type;
1162+
cparams.flash_attn_type = params.flash_attn_type;
11601163
cparams.cb_eval = params.cb_eval;
11611164
cparams.cb_eval_user_data = params.cb_eval_user_data;
11621165
cparams.offload_kqv = !params.no_kv_offload;
1163-
cparams.flash_attn = params.flash_attn;
11641166
cparams.no_perf = params.no_perf;
11651167
cparams.op_offload = !params.no_op_offload;
11661168
cparams.swa_full = params.swa_full;

0 commit comments

Comments
 (0)