Skip to content

Commit d225bed

Browse files
server: add minimax-m2 reasoning format override for MiniMax-M2 compatibility
MiniMax-M2 models require the complete <think>...</think> block including tags to be present in the context for proper reasoning. This mode injects a synthetic opening <think> tag in the stream while keeping all reasoning tags inline in message.content, ensuring the model receives the full reasoning block it needs. Changes: - Add COMMON_REASONING_FORMAT_MINIMAX_M2 enum value to common_reasoning_format - Implement minimax-m2 format parsing that bypasses reasoning extraction - Inject synthetic <think>\n chunk at slot start when minimax-m2 is active - Track injection state with minimax_reasoning_prefix_injected slot flag - Prepend <think>\n to generated_text for final response and chat parsing - Prevent client reasoning_format=auto from overriding server CLI setting - Add minimax-m2 to CLI help, README.md, and code documentation - Handle LLAMA_TOKEN_NULL in send_partial_response to skip token recording - Update process_token to preserve delta_to_send for streaming correctness
1 parent a860ced commit d225bed

File tree

6 files changed

+61
-13
lines changed

6 files changed

+61
-13
lines changed

common/arg.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3442,6 +3442,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
34423442
"- none: leaves thoughts unparsed in `message.content`\n"
34433443
"- deepseek: puts thoughts in `message.reasoning_content`\n"
34443444
"- deepseek-legacy: keeps `<think>` tags in `message.content` while also populating `message.reasoning_content`\n"
3445+
"- minimax-m2: streams a synthetic opening `<think>` and keeps `</think>` tags in `message.content`\n"
34453446
"(default: auto)",
34463447
[](common_params & params, const std::string & value) {
34473448
params.reasoning_format = common_reasoning_format_from_name(value);

common/chat-parser.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,8 @@ void common_chat_msg_parser::consume_literal(const std::string & literal) {
171171
bool common_chat_msg_parser::try_parse_reasoning(const std::string & start_think, const std::string & end_think) {
172172
std::string pending_reasoning_prefix;
173173

174-
if (syntax_.reasoning_format == COMMON_REASONING_FORMAT_NONE) {
174+
if (syntax_.reasoning_format == COMMON_REASONING_FORMAT_NONE ||
175+
syntax_.reasoning_format == COMMON_REASONING_FORMAT_MINIMAX_M2) {
175176
return false;
176177
}
177178

common/chat.cpp

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -654,6 +654,7 @@ const char * common_reasoning_format_name(common_reasoning_format format) {
654654
case COMMON_REASONING_FORMAT_AUTO: return "auto";
655655
case COMMON_REASONING_FORMAT_DEEPSEEK: return "deepseek";
656656
case COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY: return "deepseek-legacy";
657+
case COMMON_REASONING_FORMAT_MINIMAX_M2: return "minimax-m2";
657658
default:
658659
throw std::runtime_error("Unknown reasoning format");
659660
}
@@ -668,6 +669,8 @@ common_reasoning_format common_reasoning_format_from_name(const std::string & fo
668669
return COMMON_REASONING_FORMAT_DEEPSEEK;
669670
} else if (format == "deepseek-legacy") {
670671
return COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY;
672+
} else if (format == "minimax-m2") {
673+
return COMMON_REASONING_FORMAT_MINIMAX_M2;
671674
}
672675
throw std::runtime_error("Unknown reasoning format: " + format);
673676
}
@@ -1789,7 +1792,8 @@ static void common_chat_parse_deepseek_v3_1(common_chat_msg_parser & builder) {
17891792
// </think><|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>NAME\n```json\nJSON\n```<|tool▁call▁end|><|tool▁calls▁end|>
17901793
common_chat_parse_deepseek_v3_1_content(builder);
17911794
} else {
1792-
if (builder.syntax().reasoning_format == COMMON_REASONING_FORMAT_NONE) {
1795+
if (builder.syntax().reasoning_format == COMMON_REASONING_FORMAT_NONE ||
1796+
builder.syntax().reasoning_format == COMMON_REASONING_FORMAT_MINIMAX_M2) {
17931797
LOG_DBG("%s: reasoning_format none, adding content\n", __func__);
17941798
common_chat_parse_deepseek_v3_1_content(builder);
17951799
return;
@@ -2016,7 +2020,9 @@ static void common_chat_parse_gpt_oss(common_chat_msg_parser & builder) {
20162020

20172021
if (regex_match(analysis_regex, header)) {
20182022
builder.move_to(header_start_pos);
2019-
if (builder.syntax().reasoning_format == COMMON_REASONING_FORMAT_NONE || builder.syntax().reasoning_in_content) {
2023+
if (builder.syntax().reasoning_format == COMMON_REASONING_FORMAT_NONE ||
2024+
builder.syntax().reasoning_format == COMMON_REASONING_FORMAT_MINIMAX_M2 ||
2025+
builder.syntax().reasoning_in_content) {
20202026
builder.add_content(consume_end(true));
20212027
} else {
20222028
builder.try_parse_reasoning("<|channel|>analysis<|message|>", "<|end|>");

common/common.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -249,6 +249,7 @@ enum common_reasoning_format {
249249
COMMON_REASONING_FORMAT_AUTO, // Same as deepseek, using `message.reasoning_content`
250250
COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY, // Extract thinking tag contents and return as `message.reasoning_content`, or leave inline in <think> tags in stream mode
251251
COMMON_REASONING_FORMAT_DEEPSEEK, // Extract thinking tag contents and return as `message.reasoning_content`, including in streaming deltas.
252+
COMMON_REASONING_FORMAT_MINIMAX_M2, // Stream a synthetic opening <think> tag and keep </think> tags in `message.content` for MiniMax-M2 compatibility
252253
// do not extend this enum unless you absolutely have to
253254
// in most cases, use COMMON_REASONING_FORMAT_AUTO
254255
// see: https://github.com/ggml-org/llama.cpp/pull/15408

tools/server/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ The project is under active development, and we are [looking for feedback and co
190190
| `--no-slots` | disables slots monitoring endpoint<br/>(env: LLAMA_ARG_NO_ENDPOINT_SLOTS) |
191191
| `--slot-save-path PATH` | path to save slot kv cache (default: disabled) |
192192
| `--jinja` | use jinja template for chat (default: disabled)<br/>(env: LLAMA_ARG_JINJA) |
193-
| `--reasoning-format FORMAT` | controls whether thought tags are allowed and/or extracted from the response, and in which format they're returned; one of:<br/>- none: leaves thoughts unparsed in `message.content`<br/>- deepseek: puts thoughts in `message.reasoning_content`<br/>- deepseek-legacy: keeps `<think>` tags in `message.content` while also populating `message.reasoning_content`<br/>(default: deepseek)<br/>(env: LLAMA_ARG_THINK) |
193+
| `--reasoning-format FORMAT` | controls whether thought tags are allowed and/or extracted from the response, and in which format they're returned; one of:<br/>- none: leaves thoughts unparsed in `message.content`<br/>- deepseek: puts thoughts in `message.reasoning_content`<br/>- deepseek-legacy: keeps `<think>` tags in `message.content` while also populating `message.reasoning_content`<br/>- minimax-m2: Stream a synthetic opening <think> tag and keep </think> tags in `message.content` for MiniMax-M2 compatibility<br/>(default: deepseek)<br/>(env: LLAMA_ARG_THINK) |
194194
| `--reasoning-budget N` | controls the amount of thinking allowed; currently only one of: -1 for unrestricted thinking budget, or 0 to disable thinking (default: -1)<br/>(env: LLAMA_ARG_THINK_BUDGET) |
195195
| `--chat-template JINJA_TEMPLATE` | set custom jinja chat template (default: template taken from model's metadata)<br/>if suffix/prefix are specified, template will be disabled<br/>only commonly used templates are accepted (unless --jinja is set before this flag):<br/>list of built-in templates:<br/>bailing, chatglm3, chatglm4, chatml, command-r, deepseek, deepseek2, deepseek3, exaone3, exaone4, falcon3, gemma, gigachat, glmedge, gpt-oss, granite, hunyuan-dense, hunyuan-moe, kimi-k2, llama2, llama2-sys, llama2-sys-bos, llama2-sys-strip, llama3, llama4, megrez, minicpm, mistral-v1, mistral-v3, mistral-v3-tekken, mistral-v7, mistral-v7-tekken, monarch, openchat, orion, phi3, phi4, rwkv-world, seed_oss, smolvlm, vicuna, vicuna-orca, yandex, zephyr<br/>(env: LLAMA_ARG_CHAT_TEMPLATE) |
196196
| `--chat-template-file JINJA_TEMPLATE_FILE` | set custom jinja chat template file (default: template taken from model's metadata)<br/>if suffix/prefix are specified, template will be disabled<br/>only commonly used templates are accepted (unless --jinja is set before this flag):<br/>list of built-in templates:<br/>bailing, chatglm3, chatglm4, chatml, command-r, deepseek, deepseek2, deepseek3, exaone3, exaone4, falcon3, gemma, gigachat, glmedge, gpt-oss, granite, hunyuan-dense, hunyuan-moe, kimi-k2, llama2, llama2-sys, llama2-sys-bos, llama2-sys-strip, llama3, llama4, megrez, minicpm, mistral-v1, mistral-v3, mistral-v3-tekken, mistral-v7, mistral-v7-tekken, monarch, openchat, orion, phi3, phi4, rwkv-world, seed_oss, smolvlm, vicuna, vicuna-orca, yandex, zephyr<br/>(env: LLAMA_ARG_CHAT_TEMPLATE_FILE) |

tools/server/server.cpp

Lines changed: 48 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -443,7 +443,10 @@ struct server_task {
443443
}
444444
common_reasoning_format reasoning_format = params_base.reasoning_format;
445445
if (data.contains("reasoning_format")) {
446-
reasoning_format = common_reasoning_format_from_name(data.at("reasoning_format").get<std::string>());
446+
const auto requested = common_reasoning_format_from_name(data.at("reasoning_format").get<std::string>());
447+
if (requested != COMMON_REASONING_FORMAT_AUTO) {
448+
reasoning_format = requested;
449+
}
447450
}
448451
params.oaicompat_chat_syntax.reasoning_format = reasoning_format;
449452
params.oaicompat_chat_syntax.reasoning_in_content = params.stream && (reasoning_format == COMMON_REASONING_FORMAT_DEEPSEEK_LEGACY);
@@ -1660,6 +1663,7 @@ struct server_slot {
16601663
bool has_next_token = true;
16611664
bool has_new_line = false;
16621665
bool truncated = false;
1666+
bool minimax_reasoning_prefix_injected = false;
16631667

16641668
stop_type stop;
16651669

@@ -1730,6 +1734,7 @@ struct server_slot {
17301734
generated_text = "";
17311735
has_new_line = false;
17321736
truncated = false;
1737+
minimax_reasoning_prefix_injected = false;
17331738
stop = STOP_TYPE_NONE;
17341739
stopping_word = "";
17351740
n_sent_text = 0;
@@ -1856,9 +1861,13 @@ struct server_slot {
18561861
GGML_ASSERT(task);
18571862

18581863
auto previous_msg = chat_msg;
1859-
SRV_DBG("Parsing chat message: %s\n", generated_text.c_str());
1864+
std::string text_to_parse = generated_text;
1865+
if (minimax_reasoning_prefix_injected) {
1866+
text_to_parse.insert(0, "<think>\n");
1867+
}
1868+
SRV_DBG("Parsing chat message: %s\n", text_to_parse.c_str());
18601869
auto new_msg = common_chat_parse(
1861-
generated_text,
1870+
text_to_parse,
18621871
/* is_partial= */ stop != STOP_TYPE_EOS,
18631872
task->params.oaicompat_chat_syntax);
18641873
if (!new_msg.empty()) {
@@ -2832,6 +2841,19 @@ struct server_context {
28322841

28332842
slot.state = SLOT_STATE_STARTED;
28342843

2844+
const bool needs_minimax_prefix =
2845+
slot.task->params.oaicompat_chat_syntax.reasoning_format == COMMON_REASONING_FORMAT_MINIMAX_M2;
2846+
if (needs_minimax_prefix) {
2847+
slot.minimax_reasoning_prefix_injected = true;
2848+
if (slot.task->params.stream) {
2849+
completion_token_output prefix_chunk{};
2850+
prefix_chunk.tok = LLAMA_TOKEN_NULL;
2851+
prefix_chunk.prob = 0.0f;
2852+
prefix_chunk.text_to_send = "<think>\n";
2853+
send_partial_response(slot, prefix_chunk, false);
2854+
}
2855+
}
2856+
28352857
SLT_INF(slot, "%s", "processing task\n");
28362858

28372859
return true;
@@ -2887,7 +2909,10 @@ struct server_context {
28872909
result.text_to_send = "";
28882910
}
28892911

2912+
std::string delta_to_send = result.text_to_send;
2913+
result.text_to_send = token_str;
28902914
slot.add_token(result);
2915+
result.text_to_send = std::move(delta_to_send);
28912916
if (slot.task->params.stream) {
28922917
send_partial_response(slot, result, false);
28932918
}
@@ -3060,7 +3085,11 @@ struct server_context {
30603085
return true;
30613086
}
30623087

3063-
void send_partial_response(server_slot & slot, const completion_token_output & tkn, bool is_progress) {
3088+
void send_partial_response(
3089+
server_slot & slot,
3090+
const completion_token_output & tkn,
3091+
bool is_progress,
3092+
const std::vector<common_chat_msg_diff> * forced_diffs = nullptr) {
30643093
auto res = std::make_unique<server_task_result_cmpl_partial>();
30653094

30663095
res->id = slot.task->id;
@@ -3074,9 +3103,15 @@ struct server_context {
30743103
res->progress.time_ms = (ggml_time_us() - slot.t_start_process_prompt / 1000);
30753104
} else {
30763105
res->content = tkn.text_to_send;
3077-
res->tokens = { tkn.tok };
3106+
if (tkn.tok != LLAMA_TOKEN_NULL) {
3107+
res->tokens = { tkn.tok };
3108+
}
30783109

3079-
slot.update_chat_msg(res->oaicompat_msg_diffs);
3110+
if (forced_diffs) {
3111+
res->oaicompat_msg_diffs = *forced_diffs;
3112+
} else {
3113+
slot.update_chat_msg(res->oaicompat_msg_diffs);
3114+
}
30803115
}
30813116

30823117
res->n_decoded = slot.n_decoded;
@@ -3089,7 +3124,7 @@ struct server_context {
30893124
res->oaicompat_cmpl_id = slot.task->params.oaicompat_cmpl_id;
30903125

30913126
// populate res.probs_output
3092-
if (slot.task->params.sampling.n_probs > 0) {
3127+
if (slot.task->params.sampling.n_probs > 0 && tkn.tok != LLAMA_TOKEN_NULL) {
30933128
res->prob_output = tkn; // copy the token probs
30943129
}
30953130

@@ -3107,8 +3142,12 @@ struct server_context {
31073142
res->id = slot.task->id;
31083143
res->id_slot = slot.id;
31093144

3110-
res->index = slot.task->index;
3111-
res->content = slot.generated_text;
3145+
res->index = slot.task->index;
3146+
std::string response_content = slot.generated_text;
3147+
if (slot.minimax_reasoning_prefix_injected) {
3148+
response_content.insert(0, "<think>\n");
3149+
}
3150+
res->content = std::move(response_content);
31123151
res->tokens = std::move(slot.generated_tokens);
31133152
res->timings = slot.get_timings();
31143153
res->prompt = slot.task->tokens.detokenize(ctx, true);

0 commit comments

Comments
 (0)