Skip to content

Commit a76073c

Browse files
author
ochafik
committed
minimize diffs
1 parent 5e6f2a2 commit a76073c

File tree

3 files changed

+8
-24
lines changed

3 files changed

+8
-24
lines changed

common/chat.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -377,8 +377,8 @@ static common_chat_params common_chat_params_init_command_r7b(const common_chat_
377377
return data;
378378
}
379379
static common_chat_msg common_chat_parse_command_r7b(const std::string & input) {
380-
static std::regex response_regex("<\\|START_RESPONSE\\|>([\\s\\S\\n\\r]*?)<\\|END_RESPONSE\\|>");
381-
static std::regex thought_action_regex("<\\|START_THINKING\\|>([\\s\\S\\n\\r]*)<\\|END_THINKING\\|><\\|START_ACTION\\|>([\\s\\S\\n\\r]*?)<\\|END_ACTION\\|>");
380+
static std::regex response_regex("<\\|START_RESPONSE\\|>(.*?)<\\|END_RESPONSE\\|>");
381+
static std::regex thought_action_regex("<\\|START_THINKING\\|>([\\s\\S\\n\\r]*?)<\\|END_THINKING\\|><\\|START_ACTION\\|>([\\s\\S\\n\\r]*?)<\\|END_ACTION\\|>");
382382
std::smatch match;
383383

384384
common_chat_msg result;
@@ -576,7 +576,7 @@ static common_chat_params common_chat_params_init_deepseek_r1(const common_chat_
576576
}
577577
static common_chat_msg common_chat_parse_deepseek_r1(const std::string & input) {
578578
static std::regex trigger_regex("<|tool▁calls▁begin|>");
579-
static std::regex function_regex(R"(<|tool▁call▁begin|>function<|tool▁sep|>([^\n]+)\n```json\n)");
579+
static std::regex function_regex("<|tool▁call▁begin|>function<|tool▁sep|>([^\n]+)\n```json\n");
580580
static std::regex close_regex("```<|tool▁call▁end|>");
581581
static std::regex think_regex(R"(<think>([\s\S\n]*)</think>([\s\S\r\n]*))");
582582
auto msg = parse_json_tool_calls(input, trigger_regex, function_regex, close_regex);

common/common.cpp

Lines changed: 3 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1869,16 +1869,9 @@ std::string common_chat_format_example(const common_chat_template & tmpl, bool u
18691869
return common_chat_apply_template(tmpl, msgs, true, use_jinja);
18701870
}
18711871

1872-
#define CHATML_TEMPLATE_SRC \
1873-
"{%- for message in messages -%}\n" \
1874-
" {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>\n' -}}\n" \
1875-
"{%- endfor -%}\n" \
1876-
"{%- if add_generation_prompt -%}\n" \
1877-
" {{- '<|im_start|>assistant\n' -}}\n" \
1878-
"{%- endif -%})"
1879-
18801872
common_chat_templates common_chat_templates_from_model(const struct llama_model * model, const std::string & chat_template_override)
18811873
{
1874+
auto vocab = llama_model_get_vocab(model);
18821875
std::string default_template_src = chat_template_override == "chatml" ? CHATML_TEMPLATE_SRC : chat_template_override;
18831876
std::string template_tool_use_src = chat_template_override == "chatml" ? CHATML_TEMPLATE_SRC : "";
18841877
bool has_explicit_template = !chat_template_override.empty();
@@ -1908,11 +1901,6 @@ common_chat_templates common_chat_templates_from_model(const struct llama_model
19081901
)";
19091902
}
19101903
}
1911-
std::string token_bos;
1912-
std::string token_eos;
1913-
// TODO: update logic that adds BOS and EOS tokens to the tokenized prompt, in favour of the template.
1914-
#if 0
1915-
auto vocab = llama_model_get_vocab(model);
19161904
const auto get_token = [&](llama_token token, const char * name, const char * jinja_variable_name) {
19171905
if (token == LLAMA_TOKEN_NULL) {
19181906
if (default_template_src.find(jinja_variable_name) != std::string::npos
@@ -1924,9 +1912,8 @@ common_chat_templates common_chat_templates_from_model(const struct llama_model
19241912
return common_token_to_piece(vocab, token, true);
19251913
}
19261914
};
1927-
token_bos = get_token(llama_vocab_bos(vocab), "BOS", "bos_token");
1928-
token_eos = get_token(llama_vocab_eos(vocab), "EOS", "eos_token");
1929-
#endif
1915+
auto token_bos = get_token(llama_vocab_bos(vocab), "BOS", "bos_token");
1916+
auto token_eos = get_token(llama_vocab_eos(vocab), "EOS", "eos_token");
19301917
return {
19311918
has_explicit_template,
19321919
std::make_unique<minja::chat_template>(default_template_src, token_bos, token_eos),

examples/server/tests/unit/test_tool_call.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -252,7 +252,6 @@ def test_completion_without_tool_call_slow(template_name: str, n_predict: int, t
252252
@pytest.mark.slow
253253
@pytest.mark.parametrize("hf_repo,template_override", [
254254
("bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
255-
("bartowski/c4ai-command-r7b-12-2024-GGUF:Q4_K_M", ("CohereForAI/c4ai-command-r7b-12-2024", "tool_use")),
256255
("bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None),
257256
("bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None),
258257
("bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None),
@@ -263,9 +262,8 @@ def test_completion_without_tool_call_slow(template_name: str, n_predict: int, t
263262
("bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai/functionary-medium-v3.2", None)),
264263
("bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)),
265264
# ("bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)),
266-
# ("bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
267265
])
268-
def test_weather(hf_repo: str, template_override: Tuple[str, str | None] | None):
266+
def test_weather_tool_call(hf_repo: str, template_override: Tuple[str, str | None] | None):
269267
global server
270268
n_predict = 512
271269
server.n_slots = 1
@@ -313,9 +311,8 @@ def test_weather(hf_repo: str, template_override: Tuple[str, str | None] | None)
313311
(None, "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")),
314312
(None, "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch-Hermes-3-Llama-3.1-8B", "tool_use")),
315313
(None, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None),
316-
# (None, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None),
317314
])
318-
def test_hello_world(expected_arguments_override: str | None, hf_repo: str, template_override: Tuple[str, str | None] | None):
315+
def test_hello_world_tool_call(expected_arguments_override: str | None, hf_repo: str, template_override: Tuple[str, str | None] | None):
319316
global server
320317
server.n_slots = 1
321318
server.jinja = True

0 commit comments

Comments
 (0)