Skip to content

Commit f43719f

Browse files
committed
Merge branch 'master' into qwen3-coder_tool_call_parser
2 parents ca51625 + bbbf5ec commit f43719f

File tree

119 files changed

+5608
-2083
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

119 files changed

+5608
-2083
lines changed

common/arg.cpp

Lines changed: 29 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1106,7 +1106,7 @@ static void common_params_print_completion(common_params_context & ctx_arg) {
11061106
printf("\"\n\n");
11071107

11081108
printf(" case \"$prev\" in\n");
1109-
printf(" --model)\n");
1109+
printf(" --model|-m)\n");
11101110
printf(" COMPREPLY=( $(compgen -f -X '!*.gguf' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
11111111
printf(" return 0\n");
11121112
printf(" ;;\n");
@@ -1545,10 +1545,18 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
15451545
}
15461546
).set_examples({LLAMA_EXAMPLE_IMATRIX, LLAMA_EXAMPLE_PERPLEXITY, LLAMA_EXAMPLE_RETRIEVAL}));
15471547
add_opt(common_arg(
1548-
{"-fa", "--flash-attn"},
1549-
string_format("enable Flash Attention (default: %s)", params.flash_attn ? "enabled" : "disabled"),
1550-
[](common_params & params) {
1551-
params.flash_attn = true;
1548+
{"-fa", "--flash-attn"}, "FA",
1549+
string_format("set Flash Attention use ('on', 'off', or 'auto', default: '%s')", llama_flash_attn_type_name(params.flash_attn_type)),
1550+
[](common_params & params, const std::string & value) {
1551+
if (value == "on" || value == "enabled") {
1552+
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_ENABLED;
1553+
} else if (value == "off" || value == "disabled") {
1554+
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_DISABLED;
1555+
} else if (value == "auto") {
1556+
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_AUTO;
1557+
} else {
1558+
throw std::runtime_error(string_format("error: unkown value for --flash-attn: '%s'\n", value.c_str()));
1559+
}
15521560
}
15531561
).set_env("LLAMA_ARG_FLASH_ATTN"));
15541562
add_opt(common_arg(
@@ -2555,15 +2563,15 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
25552563
{"--lora"}, "FNAME",
25562564
"path to LoRA adapter (can be repeated to use multiple adapters)",
25572565
[](common_params & params, const std::string & value) {
2558-
params.lora_adapters.push_back({ std::string(value), 1.0, nullptr });
2566+
params.lora_adapters.push_back({ std::string(value), 1.0, "", "", nullptr });
25592567
}
25602568
// we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
25612569
).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
25622570
add_opt(common_arg(
25632571
{"--lora-scaled"}, "FNAME", "SCALE",
25642572
"path to LoRA adapter with user defined scaling (can be repeated to use multiple adapters)",
25652573
[](common_params & params, const std::string & fname, const std::string & scale) {
2566-
params.lora_adapters.push_back({ fname, std::stof(scale), nullptr });
2574+
params.lora_adapters.push_back({ fname, std::stof(scale), "", "", nullptr });
25672575
}
25682576
// we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
25692577
).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
@@ -3459,8 +3467,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
34593467
params.model.hf_repo = "ggml-org/Qwen2.5-Coder-1.5B-Q8_0-GGUF";
34603468
params.model.hf_file = "qwen2.5-coder-1.5b-q8_0.gguf";
34613469
params.port = 8012;
3462-
params.n_gpu_layers = 99;
3463-
params.flash_attn = true;
34643470
params.n_ubatch = 1024;
34653471
params.n_batch = 1024;
34663472
params.n_ctx = 0;
@@ -3475,8 +3481,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
34753481
params.model.hf_repo = "ggml-org/Qwen2.5-Coder-3B-Q8_0-GGUF";
34763482
params.model.hf_file = "qwen2.5-coder-3b-q8_0.gguf";
34773483
params.port = 8012;
3478-
params.n_gpu_layers = 99;
3479-
params.flash_attn = true;
34803484
params.n_ubatch = 1024;
34813485
params.n_batch = 1024;
34823486
params.n_ctx = 0;
@@ -3491,8 +3495,6 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
34913495
params.model.hf_repo = "ggml-org/Qwen2.5-Coder-7B-Q8_0-GGUF";
34923496
params.model.hf_file = "qwen2.5-coder-7b-q8_0.gguf";
34933497
params.port = 8012;
3494-
params.n_gpu_layers = 99;
3495-
params.flash_attn = true;
34963498
params.n_ubatch = 1024;
34973499
params.n_batch = 1024;
34983500
params.n_ctx = 0;
@@ -3508,10 +3510,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
35083510
params.model.hf_file = "qwen2.5-coder-7b-q8_0.gguf";
35093511
params.speculative.model.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF";
35103512
params.speculative.model.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf";
3511-
params.speculative.n_gpu_layers = 99;
35123513
params.port = 8012;
3513-
params.n_gpu_layers = 99;
3514-
params.flash_attn = true;
35153514
params.n_ubatch = 1024;
35163515
params.n_batch = 1024;
35173516
params.n_ctx = 0;
@@ -3527,10 +3526,21 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
35273526
params.model.hf_file = "qwen2.5-coder-14b-q8_0.gguf";
35283527
params.speculative.model.hf_repo = "ggml-org/Qwen2.5-Coder-0.5B-Q8_0-GGUF";
35293528
params.speculative.model.hf_file = "qwen2.5-coder-0.5b-q8_0.gguf";
3530-
params.speculative.n_gpu_layers = 99;
35313529
params.port = 8012;
3532-
params.n_gpu_layers = 99;
3533-
params.flash_attn = true;
3530+
params.n_ubatch = 1024;
3531+
params.n_batch = 1024;
3532+
params.n_ctx = 0;
3533+
params.n_cache_reuse = 256;
3534+
}
3535+
).set_examples({LLAMA_EXAMPLE_SERVER}));
3536+
3537+
add_opt(common_arg(
3538+
{"--fim-qwen-30b-default"},
3539+
string_format("use default Qwen 3 Coder 30B A3B Instruct (note: can download weights from the internet)"),
3540+
[](common_params & params) {
3541+
params.model.hf_repo = "ggml-org/Qwen3-Coder-30B-A3B-Instruct-Q8_0-GGUF";
3542+
params.model.hf_file = "qwen3-coder-30b-a3b-instruct-q8_0.gguf";
3543+
params.port = 8012;
35343544
params.n_ubatch = 1024;
35353545
params.n_batch = 1024;
35363546
params.n_ctx = 0;

common/chat.cpp

Lines changed: 152 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -624,6 +624,7 @@ const char * common_chat_format_name(common_chat_format format) {
624624
case COMMON_CHAT_FORMAT_QWEN3_CODER_XML: return "Qwen3 Coder XML";
625625
case COMMON_CHAT_FORMAT_GRANITE: return "Granite";
626626
case COMMON_CHAT_FORMAT_GPT_OSS: return "GPT-OSS";
627+
case COMMON_CHAT_FORMAT_SEED_OSS: return "Seed-OSS";
627628
default:
628629
throw std::runtime_error("Unknown chat format");
629630
}
@@ -2225,11 +2226,6 @@ static common_chat_params common_chat_params_init_qwen3_coder_xml(const common_c
22252226
}
22262227

22272228
static void common_chat_parse_qwen3_coder_xml(common_chat_msg_parser & builder) {
2228-
if (!builder.syntax().parse_tool_calls) {
2229-
builder.add_content(builder.consume_rest());
2230-
return;
2231-
}
2232-
22332229
std::string content = builder.consume_rest();
22342230

22352231
// Try to parse Qwen3-Coder XML format
@@ -2243,6 +2239,94 @@ static void common_chat_parse_qwen3_coder_xml(common_chat_msg_parser & builder)
22432239
builder.add_content(content);
22442240
}
22452241

2242+
static void common_chat_parse_seed_oss(common_chat_msg_parser & builder) {
2243+
// Parse thinking tags first - this handles the main reasoning content
2244+
builder.try_parse_reasoning("<seed:think>", "</seed:think>");
2245+
2246+
if (!builder.syntax().parse_tool_calls) {
2247+
builder.add_content(builder.consume_rest());
2248+
return;
2249+
}
2250+
2251+
// Parse tool calls - Seed-OSS uses <seed:tool_call> format
2252+
static const common_regex tool_call_begin_regex("<seed:tool_call>");
2253+
static const common_regex tool_call_end_regex("</seed:tool_call>");
2254+
static const common_regex function_regex("<function=([^>]+)>");
2255+
static const common_regex param_regex("<parameter=([^>]+)>");
2256+
2257+
while (auto tool_res = builder.try_find_regex(tool_call_begin_regex)) {
2258+
builder.consume_spaces(); // Consume whitespace after <seed:tool_call>
2259+
2260+
// Look for function call inside tool call, ignore any content before it
2261+
if (auto func_res = builder.try_find_regex(function_regex, std::string::npos, false)) {
2262+
auto function_name = builder.str(func_res->groups[1]);
2263+
2264+
// Parse Seed-OSS parameters <parameter=name>value</parameter>
2265+
json args = json::object();
2266+
// Parse all parameters
2267+
while (auto param_res = builder.try_find_regex(param_regex, std::string::npos, false)) {
2268+
// again, ignore noise around parameters
2269+
auto param_name = builder.str(param_res->groups[1]);
2270+
builder.move_to(param_res->groups[0].end);
2271+
builder.consume_spaces(); // Consume whitespace after parameter
2272+
auto savedPos = builder.pos();
2273+
if (auto param_parse = builder.try_find_literal("</parameter>")) {
2274+
auto param = param_parse->prelude;
2275+
builder.move_to(savedPos);
2276+
try {
2277+
if (auto param_res = builder.try_consume_json()) {
2278+
args[param_name] = param_res->json;
2279+
} else {
2280+
args[param_name] = param;
2281+
}
2282+
} catch (json::exception &) {
2283+
args[param_name] = param;
2284+
}
2285+
} else {
2286+
throw common_chat_msg_partial_exception("Incomplete tool parameter");
2287+
}
2288+
}
2289+
// Look for closing function tag
2290+
auto end_func = builder.try_find_literal("</function>");
2291+
if (end_func) {
2292+
builder.move_to(end_func->groups[0].end);
2293+
builder.consume_spaces(); // Consume whitespace after </function>
2294+
2295+
// Add the tool call with parsed arguments, but only if we REALLY got the literal
2296+
auto eaten_fragment = builder.input().substr(end_func->groups[0].begin, end_func->groups[0].end);
2297+
auto funlen = std::string("</function>").length();
2298+
if (eaten_fragment.length() >= funlen && eaten_fragment.substr(0, funlen) == std::string("</function>")) {
2299+
if (!builder.add_tool_call(function_name, "", args.dump())) {
2300+
throw common_chat_msg_partial_exception("Incomplete tool call");
2301+
}
2302+
} else {
2303+
throw common_chat_msg_partial_exception("Incomplete tool call");
2304+
}
2305+
} else {
2306+
throw common_chat_msg_partial_exception("Incomplete tool call");
2307+
}
2308+
// Look for closing tool call tag
2309+
if (auto end_tool = builder.try_find_regex(tool_call_end_regex, std::string::npos, false)) {
2310+
builder.move_to(end_tool->groups[0].end);
2311+
builder.consume_spaces(); // Consume trailing whitespace after tool call
2312+
} else {
2313+
throw common_chat_msg_partial_exception("Incomplete tool call");
2314+
}
2315+
} else {
2316+
// No function found - don't consume content here, let it be handled at the end
2317+
break;
2318+
}
2319+
}
2320+
2321+
// Consume any remaining whitespace after all tool call processing
2322+
builder.consume_spaces();
2323+
auto remaining = builder.consume_rest();
2324+
// If there's any non-whitespace content remaining, add it as content
2325+
if (!string_strip(remaining).empty()) {
2326+
builder.add_content(remaining);
2327+
}
2328+
}
2329+
22462330
static common_chat_params common_chat_params_init_without_tools(const common_chat_template & tmpl, const struct templates_params & inputs) {
22472331
common_chat_params data;
22482332
data.prompt = apply(tmpl, inputs);
@@ -2259,8 +2343,62 @@ static common_chat_params common_chat_params_init_without_tools(const common_cha
22592343
return data;
22602344
}
22612345

2346+
static common_chat_params common_chat_params_init_seed_oss(
2347+
const common_chat_template & tmpl,
2348+
templates_params & params,
2349+
const common_chat_templates_inputs & inputs)
2350+
{
2351+
common_chat_params data;
2352+
data.prompt = apply(tmpl, params);
2353+
data.format = COMMON_CHAT_FORMAT_SEED_OSS;
2354+
if (string_ends_with(data.prompt, "<seed:think>")) {
2355+
if (!inputs.enable_thinking) {
2356+
data.prompt += "</seed:think>";
2357+
} else {
2358+
data.thinking_forced_open = true;
2359+
}
2360+
}
2361+
2362+
if (params.tools.is_array() && !params.tools.empty()) {
2363+
data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
2364+
data.grammar = build_grammar([&](const common_grammar_builder & builder) {
2365+
std::vector<std::string> tool_rules;
2366+
foreach_function(params.tools, [&](const json & tool) {
2367+
const auto & function = tool.at("function");
2368+
std::string name = function.at("name");
2369+
auto parameters = function.at("parameters");
2370+
builder.resolve_refs(parameters);
2371+
2372+
// Create rule for Seed-OSS function call format
2373+
std::string param_rules;
2374+
if (parameters.contains("properties")) {
2375+
for (const auto & [key, value] : parameters.at("properties").items()) {
2376+
param_rules += "\"<parameter=" + key + ">\"" + builder.add_schema(name + "-arg-" + key, value) +
2377+
"\"</parameter>\"";
2378+
}
2379+
}
2380+
2381+
tool_rules.push_back(builder.add_rule(name + "-call",
2382+
"\"<seed:tool_call>\" space \"<function=" + name + ">\" space " +
2383+
param_rules +
2384+
" \"</function>\" space \"</seed:tool_call>\""));
2385+
});
2386+
2387+
data.grammar_triggers.push_back({ COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<seed:tool_call>" });
2388+
2389+
data.preserved_tokens = {
2390+
"<seed:think>", "</seed:think>", "<seed:tool_call>", "</seed:tool_call>",
2391+
"<function=", "</function>", "<parameter=", "</parameter>",
2392+
};
2393+
2394+
builder.add_rule("root", string_join(tool_rules, " | "));
2395+
});
2396+
}
2397+
return data;
2398+
}
2399+
22622400
static common_chat_params common_chat_templates_apply_jinja(
2263-
const struct common_chat_templates * tmpls,
2401+
const struct common_chat_templates * tmpls,
22642402
const struct common_chat_templates_inputs & inputs)
22652403
{
22662404
templates_params params;
@@ -2338,6 +2476,11 @@ static common_chat_params common_chat_templates_apply_jinja(
23382476
return common_chat_params_init_gpt_oss(tmpl, params);
23392477
}
23402478

2479+
// Seed-OSS
2480+
if (src.find("<seed:think>") != std::string::npos) {
2481+
return common_chat_params_init_seed_oss(tmpl, params, inputs);
2482+
}
2483+
23412484
// Use generic handler when mixing tools + JSON schema.
23422485
// TODO: support that mix in handlers below.
23432486
if ((params.tools.is_array() && params.json_schema.is_object())) {
@@ -2500,6 +2643,9 @@ static void common_chat_parse(common_chat_msg_parser & builder) {
25002643
case COMMON_CHAT_FORMAT_GPT_OSS:
25012644
common_chat_parse_gpt_oss(builder);
25022645
break;
2646+
case COMMON_CHAT_FORMAT_SEED_OSS:
2647+
common_chat_parse_seed_oss(builder);
2648+
break;
25032649
default:
25042650
throw std::runtime_error(std::string("Unsupported format: ") + common_chat_format_name(builder.syntax().format));
25052651
}

common/chat.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ enum common_chat_format {
112112
COMMON_CHAT_FORMAT_QWEN3_CODER_XML,
113113
COMMON_CHAT_FORMAT_GRANITE,
114114
COMMON_CHAT_FORMAT_GPT_OSS,
115+
COMMON_CHAT_FORMAT_SEED_OSS,
115116

116117
COMMON_CHAT_FORMAT_COUNT, // Not a format, just the # formats
117118
};

common/common.cpp

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -901,7 +901,8 @@ struct common_init_result common_init_from_params(common_params & params) {
901901

902902
llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams);
903903
if (model == NULL) {
904-
LOG_ERR("%s: failed to load model '%s'\n", __func__, params.model.path.c_str());
904+
LOG_ERR("%s: failed to load model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n",
905+
__func__, params.model.path.c_str());
905906
return iparams;
906907
}
907908

@@ -911,7 +912,8 @@ struct common_init_result common_init_from_params(common_params & params) {
911912

912913
llama_context * lctx = llama_init_from_model(model, cparams);
913914
if (lctx == NULL) {
914-
LOG_ERR("%s: failed to create context with model '%s'\n", __func__, params.model.path.c_str());
915+
LOG_ERR("%s: failed to create context with model '%s', try reducing --n-gpu-layers if you're running out of VRAM\n",
916+
__func__, params.model.path.c_str());
915917
llama_model_free(model);
916918
return iparams;
917919
}
@@ -988,7 +990,12 @@ struct common_init_result common_init_from_params(common_params & params) {
988990
return iparams;
989991
}
990992

993+
char buf[1024];
991994
la.ptr = lora.get();
995+
llama_adapter_meta_val_str(la.ptr, "adapter.lora.task_name", buf, sizeof(buf));
996+
la.task_name = buf;
997+
llama_adapter_meta_val_str(la.ptr, "adapter.lora.prompt_prefix", buf, sizeof(buf));
998+
la.prompt_prefix = buf;
992999
iparams.lora.emplace_back(std::move(lora)); // copy to list of loaded adapters
9931000
}
9941001

@@ -1152,10 +1159,10 @@ struct llama_context_params common_context_params_to_llama(const common_params &
11521159
cparams.yarn_orig_ctx = params.yarn_orig_ctx;
11531160
cparams.pooling_type = params.pooling_type;
11541161
cparams.attention_type = params.attention_type;
1162+
cparams.flash_attn_type = params.flash_attn_type;
11551163
cparams.cb_eval = params.cb_eval;
11561164
cparams.cb_eval_user_data = params.cb_eval_user_data;
11571165
cparams.offload_kqv = !params.no_kv_offload;
1158-
cparams.flash_attn = params.flash_attn;
11591166
cparams.no_perf = params.no_perf;
11601167
cparams.op_offload = !params.no_op_offload;
11611168
cparams.swa_full = params.swa_full;

common/common.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,9 @@ struct common_adapter_lora_info {
3434
std::string path;
3535
float scale;
3636

37+
std::string task_name;
38+
std::string prompt_prefix;
39+
3740
struct llama_adapter_lora * ptr;
3841
};
3942

@@ -309,6 +312,7 @@ struct common_params {
309312
enum llama_rope_scaling_type rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
310313
enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_UNSPECIFIED; // pooling type for embeddings
311314
enum llama_attention_type attention_type = LLAMA_ATTENTION_TYPE_UNSPECIFIED; // attention type for embeddings
315+
enum llama_flash_attn_type flash_attn_type = LLAMA_FLASH_ATTN_TYPE_AUTO; // whether to use Flash Attention
312316

313317
struct common_params_sampling sampling;
314318
struct common_params_speculative speculative;
@@ -372,7 +376,6 @@ struct common_params {
372376
bool multiline_input = false; // reverse the usage of `\`
373377
bool simple_io = false; // improves compatibility with subprocesses and limited consoles
374378
bool cont_batching = true; // insert new sequences for decoding on-the-fly
375-
bool flash_attn = false; // flash attention
376379
bool no_perf = false; // disable performance metrics
377380
bool ctx_shift = false; // context shift on infinite text generation
378381
bool swa_full = false; // use full-size SWA cache (https://github.com/ggml-org/llama.cpp/pull/13194#issuecomment-2868343055)

0 commit comments

Comments
 (0)