Skip to content

Commit 5f06d37

Browse files
committed
add --tool-call argument
1 parent 7e017cf commit 5f06d37

File tree

4 files changed

+23
-3
lines changed

4 files changed

+23
-3
lines changed

common/common.cpp

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -428,6 +428,7 @@ void gpt_params_parse_from_env(gpt_params & params) {
428428
get_env("LLAMA_ARG_CONT_BATCHING", params.cont_batching);
429429
get_env("LLAMA_ARG_HOST", params.hostname);
430430
get_env("LLAMA_ARG_PORT", params.port);
431+
get_env("LLAMA_ARG_TOOL_CALLS", params.enable_tool_calls);
431432
}
432433

433434
bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
@@ -1046,6 +1047,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
10461047
params.lora_init_without_apply = true;
10471048
return true;
10481049
}
1050+
if (arg == "--tool-call" || arg == "--tool-calls") {
1051+
params.enable_tool_calls = true;
1052+
return true;
1053+
}
10491054
if (arg == "--control-vector") {
10501055
CHECK_ARG
10511056
params.control_vectors.push_back({ 1.0f, argv[i], });
@@ -2036,6 +2041,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
20362041
options.push_back({ "server", "-sps, --slot-prompt-similarity SIMILARITY",
20372042
"how much the prompt of a request must match the prompt of a slot in order to use that slot (default: %.2f, 0.0 = disabled)\n", params.slot_prompt_similarity });
20382043
options.push_back({ "server", " --lora-init-without-apply", "load LoRA adapters without applying them (apply later via POST /lora-adapters) (default: %s)", params.lora_init_without_apply ? "enabled" : "disabled"});
2044+
options.push_back({ "server", " --tool-call(s)", "enable OAI tool calls for chat completion endpoint (default: %s)", params.enable_tool_calls ? "enabled" : "disabled"});
20392045

20402046
#ifndef LOG_DISABLE_LOGS
20412047
options.push_back({ "logging" });

common/common.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -221,6 +221,7 @@ struct gpt_params {
221221
std::string chat_template = "";
222222
std::string system_prompt = "";
223223
bool enable_chat_template = true;
224+
bool enable_tool_calls = false;
224225

225226
std::vector<std::string> api_keys;
226227

examples/server/server.cpp

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3071,6 +3071,7 @@ int main(int argc, char ** argv) {
30713071

30723072
if (body.contains("tools") && ctx_server.tool_format != LLAMA_TOOL_FORMAT_NOT_SUPPORTED) {
30733073
body["prompt"] = format_chat_with_tool(ctx_server.tool_format, body.at("messages"), body.at("tools"));
3074+
body.erase(body.find("tools"));
30743075
}
30753076

30763077
json data = oaicompat_completion_params_parse(ctx_server.model, body, params.chat_template);
@@ -3441,14 +3442,26 @@ int main(int argc, char ** argv) {
34413442
}
34423443

34433444
// decide if we can enable tool calls
3444-
ctx_server.tool_format = get_tool_format(ctx_server.ctx);
3445+
bool tool_call_support = false;
3446+
if (ctx_server.params.enable_tool_calls) {
3447+
ctx_server.tool_format = get_tool_format(ctx_server.ctx);
3448+
tool_call_support = ctx_server.tool_format != LLAMA_TOOL_FORMAT_NOT_SUPPORTED;
3449+
if (tool_call_support) {
3450+
LOG_WARNING("Tool call is EXPERIMENTAL and maybe unstable. Use with your own risk", {});
3451+
} else {
3452+
LOG_ERROR("Tool call is not supported for this model. Please remove --tool-call or use with a supported model", {});
3453+
clean_up();
3454+
t.join();
3455+
return 1;
3456+
}
3457+
}
34453458

34463459
// print sample chat example to make it clear which template is used
34473460
{
34483461
LOG_INFO("chat template", {
34493462
{"chat_example", llama_chat_format_example(ctx_server.model, params.chat_template)},
34503463
{"built_in", params.chat_template.empty()},
3451-
{"tool_call_enabled", ctx_server.tool_format != LLAMA_TOOL_FORMAT_NOT_SUPPORTED },
3464+
{"tool_call_support", tool_call_support},
34523465
});
34533466
}
34543467

examples/server/utils.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -397,7 +397,7 @@ static json oaicompat_completion_params_parse(
397397
}
398398

399399
// Params supported by OAI but unsupported by llama.cpp
400-
static const std::vector<std::string> unsupported_params { "tool_choice" };
400+
static const std::vector<std::string> unsupported_params { "tools", "tool_choice" };
401401
for (auto & param : unsupported_params) {
402402
if (body.contains(param)) {
403403
throw std::runtime_error("Unsupported param: " + param);

0 commit comments

Comments
 (0)