Skip to content

Commit 70dd25b

Browse files
committed
Merge branch 'master' into imatrix
2 parents 6371902 + 696fccf commit 70dd25b

File tree

135 files changed

+9600
-2952
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

135 files changed

+9600
-2952
lines changed

.devops/vulkan.Dockerfile

Lines changed: 23 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,30 @@ ARG UBUNTU_VERSION=24.04
22

33
FROM ubuntu:$UBUNTU_VERSION AS build
44

5-
# Install build tools
6-
RUN apt update && apt install -y git build-essential cmake wget
5+
# Ref: https://vulkan.lunarg.com/doc/sdk/latest/linux/getting_started.html
76

8-
# Install Vulkan SDK and cURL
9-
RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
10-
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-noble.list https://packages.lunarg.com/vulkan/lunarg-vulkan-noble.list && \
11-
apt update -y && \
12-
apt-get install -y vulkan-sdk libcurl4-openssl-dev curl
7+
# Install build tools
8+
RUN apt update && apt install -y git build-essential cmake wget xz-utils
9+
10+
# Install Vulkan SDK
11+
ARG VULKAN_VERSION=1.4.321.1
12+
RUN ARCH=$(uname -m) && \
13+
wget -qO /tmp/vulkan-sdk.tar.xz https://sdk.lunarg.com/sdk/download/${VULKAN_VERSION}/linux/vulkan-sdk-linux-${ARCH}-${VULKAN_VERSION}.tar.xz && \
14+
mkdir -p /opt/vulkan && \
15+
tar -xf /tmp/vulkan-sdk.tar.xz -C /tmp --strip-components=1 && \
16+
mv /tmp/${ARCH}/* /opt/vulkan/ && \
17+
rm -rf /tmp/*
18+
19+
# Install cURL and Vulkan SDK dependencies
20+
RUN apt install -y libcurl4-openssl-dev curl \
21+
libxcb-xinput0 libxcb-xinerama0 libxcb-cursor-dev
22+
23+
# Set environment variables
24+
ENV VULKAN_SDK=/opt/vulkan
25+
ENV PATH=$VULKAN_SDK/bin:$PATH
26+
ENV LD_LIBRARY_PATH=$VULKAN_SDK/lib:$LD_LIBRARY_PATH
27+
ENV CMAKE_PREFIX_PATH=$VULKAN_SDK:$CMAKE_PREFIX_PATH
28+
ENV PKG_CONFIG_PATH=$VULKAN_SDK/lib/pkgconfig:$PKG_CONFIG_PATH
1329

1430
# Build it
1531
WORKDIR /app

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
151151
- [x] [Bunny](https://github.com/BAAI-DCAI/Bunny)
152152
- [x] [GLM-EDGE](https://huggingface.co/models?search=glm-edge)
153153
- [x] [Qwen2-VL](https://huggingface.co/collections/Qwen/qwen2-vl-66cee7455501d7126940800d)
154+
- [x] [LFM2-VL](https://huggingface.co/collections/LiquidAI/lfm2-vl-68963bbc84a610f7638d5ffa)
154155

155156
</details>
156157

common/arg.cpp

Lines changed: 24 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1106,7 +1106,7 @@ static void common_params_print_completion(common_params_context & ctx_arg) {
11061106
printf("\"\n\n");
11071107

11081108
printf(" case \"$prev\" in\n");
1109-
printf(" --model)\n");
1109+
printf(" --model|-m)\n");
11101110
printf(" COMPREPLY=( $(compgen -f -X '!*.gguf' -- \"$cur\") $(compgen -d -- \"$cur\") )\n");
11111111
printf(" return 0\n");
11121112
printf(" ;;\n");
@@ -1755,7 +1755,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
17551755
[](common_params & params) {
17561756
params.warmup = false;
17571757
}
1758-
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_RETRIEVAL}));
1758+
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_EMBEDDING, LLAMA_EXAMPLE_RETRIEVAL, LLAMA_EXAMPLE_PERPLEXITY}));
17591759
add_opt(common_arg(
17601760
{"--spm-infill"},
17611761
string_format(
@@ -2254,9 +2254,11 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
22542254
).set_examples({LLAMA_EXAMPLE_PERPLEXITY}));
22552255
add_opt(common_arg(
22562256
{"-dt", "--defrag-thold"}, "N",
2257-
string_format("KV cache defragmentation threshold (default: %.1f, < 0 - disabled)", (double)params.defrag_thold),
2257+
string_format("KV cache defragmentation threshold (DEPRECATED)"),
22582258
[](common_params & params, const std::string & value) {
2259-
params.defrag_thold = std::stof(value);
2259+
GGML_UNUSED(params);
2260+
GGML_UNUSED(value);
2261+
LOG_WRN("DEPRECATED: --defrag-thold is deprecated and no longer necessary to specify\n");
22602262
}
22612263
).set_env("LLAMA_ARG_DEFRAG_THOLD"));
22622264
add_opt(common_arg(
@@ -2553,15 +2555,15 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
25532555
{"--lora"}, "FNAME",
25542556
"path to LoRA adapter (can be repeated to use multiple adapters)",
25552557
[](common_params & params, const std::string & value) {
2556-
params.lora_adapters.push_back({ std::string(value), 1.0, nullptr });
2558+
params.lora_adapters.push_back({ std::string(value), 1.0, "", "", nullptr });
25572559
}
25582560
// we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
25592561
).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
25602562
add_opt(common_arg(
25612563
{"--lora-scaled"}, "FNAME", "SCALE",
25622564
"path to LoRA adapter with user defined scaling (can be repeated to use multiple adapters)",
25632565
[](common_params & params, const std::string & fname, const std::string & scale) {
2564-
params.lora_adapters.push_back({ fname, std::stof(scale), nullptr });
2566+
params.lora_adapters.push_back({ fname, std::stof(scale), "", "", nullptr });
25652567
}
25662568
// we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg
25672569
).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA}));
@@ -3543,6 +3545,22 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
35433545
}
35443546
).set_examples({LLAMA_EXAMPLE_SERVER}));
35453547

3548+
add_opt(common_arg(
3549+
{"--fim-qwen-30b-default"},
3550+
string_format("use default Qwen 3 Coder 30B A3B Instruct (note: can download weights from the internet)"),
3551+
[](common_params & params) {
3552+
params.model.hf_repo = "ggml-org/Qwen3-Coder-30B-A3B-Instruct-Q8_0-GGUF";
3553+
params.model.hf_file = "qwen3-coder-30b-a3b-instruct-q8_0.gguf";
3554+
params.port = 8012;
3555+
params.n_gpu_layers = 99;
3556+
params.flash_attn = true;
3557+
params.n_ubatch = 1024;
3558+
params.n_batch = 1024;
3559+
params.n_ctx = 0;
3560+
params.n_cache_reuse = 256;
3561+
}
3562+
).set_examples({LLAMA_EXAMPLE_SERVER}));
3563+
35463564
add_opt(common_arg(
35473565
{ "--diffusion-steps" }, "N",
35483566
string_format("number of diffusion steps (default: %d)", params.diffusion.steps),

common/chat.cpp

Lines changed: 173 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -622,6 +622,7 @@ const char * common_chat_format_name(common_chat_format format) {
622622
case COMMON_CHAT_FORMAT_COMMAND_R7B: return "Command R7B";
623623
case COMMON_CHAT_FORMAT_GRANITE: return "Granite";
624624
case COMMON_CHAT_FORMAT_GPT_OSS: return "GPT-OSS";
625+
case COMMON_CHAT_FORMAT_SEED_OSS: return "Seed-OSS";
625626
default:
626627
throw std::runtime_error("Unknown chat format");
627628
}
@@ -1361,6 +1362,26 @@ static common_chat_params common_chat_params_init_gpt_oss(const common_chat_temp
13611362
"<|end|>",
13621363
};
13631364

1365+
if (!inputs.json_schema.is_null()) {
1366+
data.grammar_lazy = false;
1367+
data.grammar = build_grammar([&](const common_grammar_builder & builder) {
1368+
auto schema = inputs.json_schema;
1369+
builder.resolve_refs(schema);
1370+
1371+
auto not_end = builder.add_rule("not-end",
1372+
"[^<] | \"<\" [^|] | \"<|\" [^e] | \"<|e\" [^n] | \"<|en\" [^d] | \"<|end\" [^|] | \"<|end|\" [^>]");
1373+
auto analysis = builder.add_rule("analysis",
1374+
"\"<|channel|>analysis<|message|>\" ( " + not_end + " )* \"<|end|>\"");
1375+
auto constraint = builder.add_rule("constraint", "\"<|constrain|>\"? [a-zA-Z0-9_-]+");
1376+
auto final = builder.add_rule("final",
1377+
"\"<|channel|>final\" ( \" \" " + constraint + " )? \"<|message|>\" " +
1378+
builder.add_schema("response", schema)
1379+
);
1380+
1381+
builder.add_rule("root", "( " + analysis + " \"<|start|>assistant\" )? " + final);
1382+
});
1383+
}
1384+
13641385
if (inputs.tools.is_array() && !inputs.tools.empty()) {
13651386
data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
13661387
data.grammar = build_grammar([&](const common_grammar_builder & builder) {
@@ -2039,6 +2060,94 @@ static void common_chat_parse_granite(common_chat_msg_parser & builder) {
20392060
}
20402061
}
20412062

2063+
static void common_chat_parse_seed_oss(common_chat_msg_parser & builder) {
2064+
// Parse thinking tags first - this handles the main reasoning content
2065+
builder.try_parse_reasoning("<seed:think>", "</seed:think>");
2066+
2067+
if (!builder.syntax().parse_tool_calls) {
2068+
builder.add_content(builder.consume_rest());
2069+
return;
2070+
}
2071+
2072+
// Parse tool calls - Seed-OSS uses <seed:tool_call> format
2073+
static const common_regex tool_call_begin_regex("<seed:tool_call>");
2074+
static const common_regex tool_call_end_regex("</seed:tool_call>");
2075+
static const common_regex function_regex("<function=([^>]+)>");
2076+
static const common_regex param_regex("<parameter=([^>]+)>");
2077+
2078+
while (auto tool_res = builder.try_find_regex(tool_call_begin_regex)) {
2079+
builder.consume_spaces(); // Consume whitespace after <seed:tool_call>
2080+
2081+
// Look for function call inside tool call, ignore any content before it
2082+
if (auto func_res = builder.try_find_regex(function_regex, std::string::npos, false)) {
2083+
auto function_name = builder.str(func_res->groups[1]);
2084+
2085+
// Parse Seed-OSS parameters <parameter=name>value</parameter>
2086+
json args = json::object();
2087+
// Parse all parameters
2088+
while (auto param_res = builder.try_find_regex(param_regex, std::string::npos, false)) {
2089+
// again, ignore noise around parameters
2090+
auto param_name = builder.str(param_res->groups[1]);
2091+
builder.move_to(param_res->groups[0].end);
2092+
builder.consume_spaces(); // Consume whitespace after parameter
2093+
auto savedPos = builder.pos();
2094+
if (auto param_parse = builder.try_find_literal("</parameter>")) {
2095+
auto param = param_parse->prelude;
2096+
builder.move_to(savedPos);
2097+
try {
2098+
if (auto param_res = builder.try_consume_json()) {
2099+
args[param_name] = param_res->json;
2100+
} else {
2101+
args[param_name] = param;
2102+
}
2103+
} catch (json::exception &) {
2104+
args[param_name] = param;
2105+
}
2106+
} else {
2107+
throw common_chat_msg_partial_exception("Incomplete tool parameter");
2108+
}
2109+
}
2110+
// Look for closing function tag
2111+
auto end_func = builder.try_find_literal("</function>");
2112+
if (end_func) {
2113+
builder.move_to(end_func->groups[0].end);
2114+
builder.consume_spaces(); // Consume whitespace after </function>
2115+
2116+
// Add the tool call with parsed arguments, but only if we REALLY got the literal
2117+
auto eaten_fragment = builder.input().substr(end_func->groups[0].begin, end_func->groups[0].end);
2118+
auto funlen = std::string("</function>").length();
2119+
if (eaten_fragment.length() >= funlen && eaten_fragment.substr(0, funlen) == std::string("</function>")) {
2120+
if (!builder.add_tool_call(function_name, "", args.dump())) {
2121+
throw common_chat_msg_partial_exception("Incomplete tool call");
2122+
}
2123+
} else {
2124+
throw common_chat_msg_partial_exception("Incomplete tool call");
2125+
}
2126+
} else {
2127+
throw common_chat_msg_partial_exception("Incomplete tool call");
2128+
}
2129+
// Look for closing tool call tag
2130+
if (auto end_tool = builder.try_find_regex(tool_call_end_regex, std::string::npos, false)) {
2131+
builder.move_to(end_tool->groups[0].end);
2132+
builder.consume_spaces(); // Consume trailing whitespace after tool call
2133+
} else {
2134+
throw common_chat_msg_partial_exception("Incomplete tool call");
2135+
}
2136+
} else {
2137+
// No function found - don't consume content here, let it be handled at the end
2138+
break;
2139+
}
2140+
}
2141+
2142+
// Consume any remaining whitespace after all tool call processing
2143+
builder.consume_spaces();
2144+
auto remaining = builder.consume_rest();
2145+
// If there's any non-whitespace content remaining, add it as content
2146+
if (!string_strip(remaining).empty()) {
2147+
builder.add_content(remaining);
2148+
}
2149+
}
2150+
20422151
static common_chat_params common_chat_params_init_without_tools(const common_chat_template & tmpl, const struct templates_params & inputs) {
20432152
common_chat_params data;
20442153
data.prompt = apply(tmpl, inputs);
@@ -2055,8 +2164,62 @@ static common_chat_params common_chat_params_init_without_tools(const common_cha
20552164
return data;
20562165
}
20572166

2167+
static common_chat_params common_chat_params_init_seed_oss(
2168+
const common_chat_template & tmpl,
2169+
templates_params & params,
2170+
const common_chat_templates_inputs & inputs)
2171+
{
2172+
common_chat_params data;
2173+
data.prompt = apply(tmpl, params);
2174+
data.format = COMMON_CHAT_FORMAT_SEED_OSS;
2175+
if (string_ends_with(data.prompt, "<seed:think>")) {
2176+
if (!inputs.enable_thinking) {
2177+
data.prompt += "</seed:think>";
2178+
} else {
2179+
data.thinking_forced_open = true;
2180+
}
2181+
}
2182+
2183+
if (params.tools.is_array() && !params.tools.empty()) {
2184+
data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
2185+
data.grammar = build_grammar([&](const common_grammar_builder & builder) {
2186+
std::vector<std::string> tool_rules;
2187+
foreach_function(params.tools, [&](const json & tool) {
2188+
const auto & function = tool.at("function");
2189+
std::string name = function.at("name");
2190+
auto parameters = function.at("parameters");
2191+
builder.resolve_refs(parameters);
2192+
2193+
// Create rule for Seed-OSS function call format
2194+
std::string param_rules;
2195+
if (parameters.contains("properties")) {
2196+
for (const auto & [key, value] : parameters.at("properties").items()) {
2197+
param_rules += "\"<parameter=" + key + ">\"" + builder.add_schema(name + "-arg-" + key, value) +
2198+
"\"</parameter>\"";
2199+
}
2200+
}
2201+
2202+
tool_rules.push_back(builder.add_rule(name + "-call",
2203+
"\"<seed:tool_call>\" space \"<function=" + name + ">\" space " +
2204+
param_rules +
2205+
" \"</function>\" space \"</seed:tool_call>\""));
2206+
});
2207+
2208+
data.grammar_triggers.push_back({ COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<seed:tool_call>" });
2209+
2210+
data.preserved_tokens = {
2211+
"<seed:think>", "</seed:think>", "<seed:tool_call>", "</seed:tool_call>",
2212+
"<function=", "</function>", "<parameter=", "</parameter>",
2213+
};
2214+
2215+
builder.add_rule("root", string_join(tool_rules, " | "));
2216+
});
2217+
}
2218+
return data;
2219+
}
2220+
20582221
static common_chat_params common_chat_templates_apply_jinja(
2059-
const struct common_chat_templates * tmpls,
2222+
const struct common_chat_templates * tmpls,
20602223
const struct common_chat_templates_inputs & inputs)
20612224
{
20622225
templates_params params;
@@ -2121,10 +2284,15 @@ static common_chat_params common_chat_templates_apply_jinja(
21212284
}
21222285

21232286
// GPT-OSS
2124-
if (src.find("<|channel|>") != std::string::npos && params.json_schema.is_null()) {
2287+
if (src.find("<|channel|>") != std::string::npos) {
21252288
return common_chat_params_init_gpt_oss(tmpl, params);
21262289
}
21272290

2291+
// Seed-OSS
2292+
if (src.find("<seed:think>") != std::string::npos) {
2293+
return common_chat_params_init_seed_oss(tmpl, params, inputs);
2294+
}
2295+
21282296
// Use generic handler when mixing tools + JSON schema.
21292297
// TODO: support that mix in handlers below.
21302298
if ((params.tools.is_array() && params.json_schema.is_object())) {
@@ -2283,6 +2451,9 @@ static void common_chat_parse(common_chat_msg_parser & builder) {
22832451
case COMMON_CHAT_FORMAT_GPT_OSS:
22842452
common_chat_parse_gpt_oss(builder);
22852453
break;
2454+
case COMMON_CHAT_FORMAT_SEED_OSS:
2455+
common_chat_parse_seed_oss(builder);
2456+
break;
22862457
default:
22872458
throw std::runtime_error(std::string("Unsupported format: ") + common_chat_format_name(builder.syntax().format));
22882459
}

common/chat.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,7 @@ enum common_chat_format {
111111
COMMON_CHAT_FORMAT_COMMAND_R7B,
112112
COMMON_CHAT_FORMAT_GRANITE,
113113
COMMON_CHAT_FORMAT_GPT_OSS,
114+
COMMON_CHAT_FORMAT_SEED_OSS,
114115

115116
COMMON_CHAT_FORMAT_COUNT, // Not a format, just the # formats
116117
};

common/common.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -988,7 +988,12 @@ struct common_init_result common_init_from_params(common_params & params) {
988988
return iparams;
989989
}
990990

991+
char buf[1024];
991992
la.ptr = lora.get();
993+
llama_adapter_meta_val_str(la.ptr, "adapter.lora.task_name", buf, sizeof(buf));
994+
la.task_name = buf;
995+
llama_adapter_meta_val_str(la.ptr, "adapter.lora.prompt_prefix", buf, sizeof(buf));
996+
la.prompt_prefix = buf;
992997
iparams.lora.emplace_back(std::move(lora)); // copy to list of loaded adapters
993998
}
994999

@@ -1152,7 +1157,6 @@ struct llama_context_params common_context_params_to_llama(const common_params &
11521157
cparams.yarn_orig_ctx = params.yarn_orig_ctx;
11531158
cparams.pooling_type = params.pooling_type;
11541159
cparams.attention_type = params.attention_type;
1155-
cparams.defrag_thold = params.defrag_thold;
11561160
cparams.cb_eval = params.cb_eval;
11571161
cparams.cb_eval_user_data = params.cb_eval_user_data;
11581162
cparams.offload_kqv = !params.no_kv_offload;

common/common.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,9 @@ struct common_adapter_lora_info {
3434
std::string path;
3535
float scale;
3636

37+
std::string task_name;
38+
std::string prompt_prefix;
39+
3740
struct llama_adapter_lora * ptr;
3841
};
3942

@@ -288,7 +291,6 @@ struct common_params {
288291
float yarn_beta_fast = 32.0f; // YaRN low correction dim
289292
float yarn_beta_slow = 1.0f; // YaRN high correction dim
290293
int32_t yarn_orig_ctx = 0; // YaRN original context length
291-
float defrag_thold = 0.1f; // KV cache defragmentation threshold
292294

293295
// offload params
294296
std::vector<ggml_backend_dev_t> devices; // devices to use for offloading

0 commit comments

Comments
 (0)