Skip to content

Commit c80f284

Browse files
authored
Merge pull request #223 from menloresearch/update-dev-from-master-2025-08-30-00-11
Sync master with upstream release b6318
2 parents 8062559 + 600fe8a commit c80f284

File tree

16 files changed

+1350
-200
lines changed

16 files changed

+1350
-200
lines changed

common/chat.cpp

Lines changed: 152 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -622,6 +622,7 @@ const char * common_chat_format_name(common_chat_format format) {
622622
case COMMON_CHAT_FORMAT_COMMAND_R7B: return "Command R7B";
623623
case COMMON_CHAT_FORMAT_GRANITE: return "Granite";
624624
case COMMON_CHAT_FORMAT_GPT_OSS: return "GPT-OSS";
625+
case COMMON_CHAT_FORMAT_SEED_OSS: return "Seed-OSS";
625626
default:
626627
throw std::runtime_error("Unknown chat format");
627628
}
@@ -2059,6 +2060,94 @@ static void common_chat_parse_granite(common_chat_msg_parser & builder) {
20592060
}
20602061
}
20612062

2063+
static void common_chat_parse_seed_oss(common_chat_msg_parser & builder) {
2064+
// Parse thinking tags first - this handles the main reasoning content
2065+
builder.try_parse_reasoning("<seed:think>", "</seed:think>");
2066+
2067+
if (!builder.syntax().parse_tool_calls) {
2068+
builder.add_content(builder.consume_rest());
2069+
return;
2070+
}
2071+
2072+
// Parse tool calls - Seed-OSS uses <seed:tool_call> format
2073+
static const common_regex tool_call_begin_regex("<seed:tool_call>");
2074+
static const common_regex tool_call_end_regex("</seed:tool_call>");
2075+
static const common_regex function_regex("<function=([^>]+)>");
2076+
static const common_regex param_regex("<parameter=([^>]+)>");
2077+
2078+
while (auto tool_res = builder.try_find_regex(tool_call_begin_regex)) {
2079+
builder.consume_spaces(); // Consume whitespace after <seed:tool_call>
2080+
2081+
// Look for function call inside tool call, ignore any content before it
2082+
if (auto func_res = builder.try_find_regex(function_regex, std::string::npos, false)) {
2083+
auto function_name = builder.str(func_res->groups[1]);
2084+
2085+
// Parse Seed-OSS parameters <parameter=name>value</parameter>
2086+
json args = json::object();
2087+
// Parse all parameters
2088+
while (auto param_res = builder.try_find_regex(param_regex, std::string::npos, false)) {
2089+
// again, ignore noise around parameters
2090+
auto param_name = builder.str(param_res->groups[1]);
2091+
builder.move_to(param_res->groups[0].end);
2092+
builder.consume_spaces(); // Consume whitespace after parameter
2093+
auto savedPos = builder.pos();
2094+
if (auto param_parse = builder.try_find_literal("</parameter>")) {
2095+
auto param = param_parse->prelude;
2096+
builder.move_to(savedPos);
2097+
try {
2098+
if (auto param_res = builder.try_consume_json()) {
2099+
args[param_name] = param_res->json;
2100+
} else {
2101+
args[param_name] = param;
2102+
}
2103+
} catch (json::exception &) {
2104+
args[param_name] = param;
2105+
}
2106+
} else {
2107+
throw common_chat_msg_partial_exception("Incomplete tool parameter");
2108+
}
2109+
}
2110+
// Look for closing function tag
2111+
auto end_func = builder.try_find_literal("</function>");
2112+
if (end_func) {
2113+
builder.move_to(end_func->groups[0].end);
2114+
builder.consume_spaces(); // Consume whitespace after </function>
2115+
2116+
// Add the tool call with parsed arguments, but only if we REALLY got the literal
2117+
auto eaten_fragment = builder.input().substr(end_func->groups[0].begin, end_func->groups[0].end);
2118+
auto funlen = std::string("</function>").length();
2119+
if (eaten_fragment.length() >= funlen && eaten_fragment.substr(0, funlen) == std::string("</function>")) {
2120+
if (!builder.add_tool_call(function_name, "", args.dump())) {
2121+
throw common_chat_msg_partial_exception("Incomplete tool call");
2122+
}
2123+
} else {
2124+
throw common_chat_msg_partial_exception("Incomplete tool call");
2125+
}
2126+
} else {
2127+
throw common_chat_msg_partial_exception("Incomplete tool call");
2128+
}
2129+
// Look for closing tool call tag
2130+
if (auto end_tool = builder.try_find_regex(tool_call_end_regex, std::string::npos, false)) {
2131+
builder.move_to(end_tool->groups[0].end);
2132+
builder.consume_spaces(); // Consume trailing whitespace after tool call
2133+
} else {
2134+
throw common_chat_msg_partial_exception("Incomplete tool call");
2135+
}
2136+
} else {
2137+
// No function found - don't consume content here, let it be handled at the end
2138+
break;
2139+
}
2140+
}
2141+
2142+
// Consume any remaining whitespace after all tool call processing
2143+
builder.consume_spaces();
2144+
auto remaining = builder.consume_rest();
2145+
// If there's any non-whitespace content remaining, add it as content
2146+
if (!string_strip(remaining).empty()) {
2147+
builder.add_content(remaining);
2148+
}
2149+
}
2150+
20622151
static common_chat_params common_chat_params_init_without_tools(const common_chat_template & tmpl, const struct templates_params & inputs) {
20632152
common_chat_params data;
20642153
data.prompt = apply(tmpl, inputs);
@@ -2075,8 +2164,62 @@ static common_chat_params common_chat_params_init_without_tools(const common_cha
20752164
return data;
20762165
}
20772166

2167+
static common_chat_params common_chat_params_init_seed_oss(
2168+
const common_chat_template & tmpl,
2169+
templates_params & params,
2170+
const common_chat_templates_inputs & inputs)
2171+
{
2172+
common_chat_params data;
2173+
data.prompt = apply(tmpl, params);
2174+
data.format = COMMON_CHAT_FORMAT_SEED_OSS;
2175+
if (string_ends_with(data.prompt, "<seed:think>")) {
2176+
if (!inputs.enable_thinking) {
2177+
data.prompt += "</seed:think>";
2178+
} else {
2179+
data.thinking_forced_open = true;
2180+
}
2181+
}
2182+
2183+
if (params.tools.is_array() && !params.tools.empty()) {
2184+
data.grammar_lazy = inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_REQUIRED;
2185+
data.grammar = build_grammar([&](const common_grammar_builder & builder) {
2186+
std::vector<std::string> tool_rules;
2187+
foreach_function(params.tools, [&](const json & tool) {
2188+
const auto & function = tool.at("function");
2189+
std::string name = function.at("name");
2190+
auto parameters = function.at("parameters");
2191+
builder.resolve_refs(parameters);
2192+
2193+
// Create rule for Seed-OSS function call format
2194+
std::string param_rules;
2195+
if (parameters.contains("properties")) {
2196+
for (const auto & [key, value] : parameters.at("properties").items()) {
2197+
param_rules += "\"<parameter=" + key + ">\"" + builder.add_schema(name + "-arg-" + key, value) +
2198+
"\"</parameter>\"";
2199+
}
2200+
}
2201+
2202+
tool_rules.push_back(builder.add_rule(name + "-call",
2203+
"\"<seed:tool_call>\" space \"<function=" + name + ">\" space " +
2204+
param_rules +
2205+
" \"</function>\" space \"</seed:tool_call>\""));
2206+
});
2207+
2208+
data.grammar_triggers.push_back({ COMMON_GRAMMAR_TRIGGER_TYPE_WORD, "<seed:tool_call>" });
2209+
2210+
data.preserved_tokens = {
2211+
"<seed:think>", "</seed:think>", "<seed:tool_call>", "</seed:tool_call>",
2212+
"<function=", "</function>", "<parameter=", "</parameter>",
2213+
};
2214+
2215+
builder.add_rule("root", string_join(tool_rules, " | "));
2216+
});
2217+
}
2218+
return data;
2219+
}
2220+
20782221
static common_chat_params common_chat_templates_apply_jinja(
2079-
const struct common_chat_templates * tmpls,
2222+
const struct common_chat_templates * tmpls,
20802223
const struct common_chat_templates_inputs & inputs)
20812224
{
20822225
templates_params params;
@@ -2145,6 +2288,11 @@ static common_chat_params common_chat_templates_apply_jinja(
21452288
return common_chat_params_init_gpt_oss(tmpl, params);
21462289
}
21472290

2291+
// Seed-OSS
2292+
if (src.find("<seed:think>") != std::string::npos) {
2293+
return common_chat_params_init_seed_oss(tmpl, params, inputs);
2294+
}
2295+
21482296
// Use generic handler when mixing tools + JSON schema.
21492297
// TODO: support that mix in handlers below.
21502298
if ((params.tools.is_array() && params.json_schema.is_object())) {
@@ -2303,6 +2451,9 @@ static void common_chat_parse(common_chat_msg_parser & builder) {
23032451
case COMMON_CHAT_FORMAT_GPT_OSS:
23042452
common_chat_parse_gpt_oss(builder);
23052453
break;
2454+
case COMMON_CHAT_FORMAT_SEED_OSS:
2455+
common_chat_parse_seed_oss(builder);
2456+
break;
23062457
default:
23072458
throw std::runtime_error(std::string("Unsupported format: ") + common_chat_format_name(builder.syntax().format));
23082459
}

common/chat.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,7 @@ enum common_chat_format {
111111
COMMON_CHAT_FORMAT_COMMAND_R7B,
112112
COMMON_CHAT_FORMAT_GRANITE,
113113
COMMON_CHAT_FORMAT_GPT_OSS,
114+
COMMON_CHAT_FORMAT_SEED_OSS,
114115

115116
COMMON_CHAT_FORMAT_COUNT, // Not a format, just the # formats
116117
};

convert_hf_to_gguf.py

Lines changed: 58 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7546,9 +7546,13 @@ def __init__(self, *args, **kwargs):
75467546
]
75477547

75487548
# n_group and d_inner are used during reshape_tensors for mamba2
7549-
self.d_model = self.find_hparam(["hidden_size", "d_model"])
7550-
self.n_group = self.find_hparam(["n_groups"])
7551-
self.d_inner = self.find_hparam(["expand"]) * self.d_model
7549+
# NOTE: Explicitly include hparam prefix prefix for d_model to
7550+
# disambiguate with top-level head_dim
7551+
# NOTE 2: If needed for future models, this can be isolated in a method
7552+
# to separate the prefix setting and teh keys used
7553+
self.d_model = self.find_hparam([f"{self.hparam_prefixes[0]}_head_dim", "hidden_size", "d_model"])
7554+
self.n_group = self.find_hparam(["n_groups", "num_groups"])
7555+
self.d_inner = self.find_hparam(["expand", "num_heads"]) * self.d_model
75527556

75537557
def get_attn_layers(self):
75547558
# Explicit list of layer type names
@@ -7609,12 +7613,12 @@ def set_gguf_parameters(self):
76097613

76107614
## Mamba mixer params ##
76117615
self.gguf_writer.add_ssm_conv_kernel(self.find_hparam(["conv_kernel", "d_conv"]))
7612-
self.gguf_writer.add_ssm_state_size(self.find_hparam(["state_size", "d_state"]))
7616+
self.gguf_writer.add_ssm_state_size(self.find_hparam(["state_size", "d_state", "state_dim", "ssm_state_size"]))
76137617
self.gguf_writer.add_ssm_group_count(self.n_group)
76147618
self.gguf_writer.add_ssm_inner_size(self.d_inner)
76157619
# NOTE: The mamba_dt_rank is _not_ the right field for how this is used
76167620
# in llama.cpp
7617-
self.gguf_writer.add_ssm_time_step_rank(self.find_hparam(["n_heads"]))
7621+
self.gguf_writer.add_ssm_time_step_rank(self.find_hparam(["n_heads", "num_heads"]))
76187622

76197623
## Attention params ##
76207624
head_count_kv = self.find_hparam(["num_key_value_heads", "n_head_kv"])
@@ -7641,6 +7645,55 @@ def set_vocab(self):
76417645
Mamba2Model.set_vocab(self)
76427646

76437647

7648+
@ModelBase.register("NemotronHForCausalLM")
7649+
class NemotronHModel(GraniteHybridModel):
7650+
"""Hybrid mamba2/attention model from NVIDIA"""
7651+
model_arch = gguf.MODEL_ARCH.NEMOTRON_H
7652+
7653+
def __init__(self, *args, **kwargs):
7654+
super().__init__(*args, **kwargs)
7655+
7656+
# Save the top-level head_dim for later
7657+
self.head_dim = self.hparams.get("head_dim", self.hparams.get("attention_head_dim"))
7658+
assert self.head_dim is not None, "Could not find the attention head dim in config"
7659+
7660+
# Don't use expand to calculate d_inner
7661+
self.d_inner = self.find_hparam(["num_heads"]) * self.d_model
7662+
7663+
# Update the ssm / attn / mlp layers
7664+
# M: Mamba2, *: Attention, -: MLP
7665+
hybrid_override_pattern = self.hparams["hybrid_override_pattern"]
7666+
self._ssm_layers = [i for i, val in enumerate(hybrid_override_pattern) if val == "M"]
7667+
self._mlp_layers = [i for i, val in enumerate(hybrid_override_pattern) if val == "-"]
7668+
7669+
def get_attn_layers(self):
7670+
hybrid_override_pattern = self.hparams["hybrid_override_pattern"]
7671+
assert len(hybrid_override_pattern) == self.block_count, "Mismatch between hybrid override and num_hidden_layers!"
7672+
return [i for i, val in enumerate(hybrid_override_pattern) if val == "*"]
7673+
7674+
def set_gguf_parameters(self):
7675+
super().set_gguf_parameters()
7676+
7677+
self.gguf_writer.add_key_length(self.head_dim)
7678+
self.gguf_writer.add_value_length(self.head_dim)
7679+
7680+
# Set feed_forward_length
7681+
# NOTE: This will trigger an override warning. This is preferrable to
7682+
# duplicating all the parent logic
7683+
n_ff = self.find_hparam(["intermediate_size", "n_inner", "hidden_dim"])
7684+
self.gguf_writer.add_feed_forward_length([
7685+
n_ff if i in self._mlp_layers else 0 for i in range(self.block_count)
7686+
])
7687+
7688+
def set_vocab(self):
7689+
super().set_vocab()
7690+
7691+
# The tokenizer _does_ add a BOS token (via post_processor type
7692+
# TemplateProcessing) but does not set add_bos_token to true in the
7693+
# config, so we need to explicitly override it here.
7694+
self.gguf_writer.add_add_bos_token(True)
7695+
7696+
76447697
@ModelBase.register("BailingMoeForCausalLM")
76457698
class BailingMoeModel(TextModel):
76467699
model_arch = gguf.MODEL_ARCH.BAILINGMOE

0 commit comments

Comments
 (0)