Skip to content

Commit 5d6bd84

Browse files
authored
server: remove default "gpt-3.5-turbo" model name (#17668)
* server: remove default "gpt-3.5-turbo" model name * do not reflect back model name from request * fix test
1 parent fd3abe8 commit 5d6bd84

File tree

5 files changed

+40
-18
lines changed

5 files changed

+40
-18
lines changed

tools/server/server-common.cpp

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1263,7 +1263,11 @@ json convert_anthropic_to_oai(const json & body) {
12631263
return oai_body;
12641264
}
12651265

1266-
json format_embeddings_response_oaicompat(const json & request, const json & embeddings, bool use_base64) {
1266+
json format_embeddings_response_oaicompat(
1267+
const json & request,
1268+
const std::string & model_name,
1269+
const json & embeddings,
1270+
bool use_base64) {
12671271
json data = json::array();
12681272
int32_t n_tokens = 0;
12691273
int i = 0;
@@ -1293,7 +1297,7 @@ json format_embeddings_response_oaicompat(const json & request, const json & emb
12931297
}
12941298

12951299
json res = json {
1296-
{"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
1300+
{"model", json_value(request, "model", model_name)},
12971301
{"object", "list"},
12981302
{"usage", json {
12991303
{"prompt_tokens", n_tokens},
@@ -1307,6 +1311,7 @@ json format_embeddings_response_oaicompat(const json & request, const json & emb
13071311

13081312
json format_response_rerank(
13091313
const json & request,
1314+
const std::string & model_name,
13101315
const json & ranks,
13111316
bool is_tei_format,
13121317
std::vector<std::string> & texts,
@@ -1338,7 +1343,7 @@ json format_response_rerank(
13381343
if (is_tei_format) return results;
13391344

13401345
json res = json{
1341-
{"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
1346+
{"model", json_value(request, "model", model_name)},
13421347
{"object", "list"},
13431348
{"usage", json{
13441349
{"prompt_tokens", n_tokens},

tools/server/server-common.h

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,6 @@
1313
#include <vector>
1414
#include <cinttypes>
1515

16-
#define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo"
17-
1816
const static std::string build_info("b" + std::to_string(LLAMA_BUILD_NUMBER) + "-" + LLAMA_COMMIT);
1917

2018
using json = nlohmann::ordered_json;
@@ -298,11 +296,16 @@ json oaicompat_chat_params_parse(
298296
json convert_anthropic_to_oai(const json & body);
299297

300298
// TODO: move it to server-task.cpp
301-
json format_embeddings_response_oaicompat(const json & request, const json & embeddings, bool use_base64 = false);
299+
json format_embeddings_response_oaicompat(
300+
const json & request,
301+
const std::string & model_name,
302+
const json & embeddings,
303+
bool use_base64 = false);
302304

303305
// TODO: move it to server-task.cpp
304306
json format_response_rerank(
305307
const json & request,
308+
const std::string & model_name,
306309
const json & ranks,
307310
bool is_tei_format,
308311
std::vector<std::string> & texts,

tools/server/server-context.cpp

Lines changed: 22 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <cinttypes>
1818
#include <memory>
1919
#include <unordered_set>
20+
#include <filesystem>
2021

2122
// fix problem with std::min and std::max
2223
#if defined(_WIN32)
@@ -518,6 +519,8 @@ struct server_context_impl {
518519
// Necessary similarity of prompt for slot selection
519520
float slot_prompt_similarity = 0.0f;
520521

522+
std::string model_name; // name of the loaded model, to be used by API
523+
521524
common_chat_templates_ptr chat_templates;
522525
oaicompat_parser_options oai_parser_opt;
523526

@@ -758,6 +761,18 @@ struct server_context_impl {
758761
}
759762
SRV_WRN("%s", "for more info see https://github.com/ggml-org/llama.cpp/pull/16391\n");
760763

764+
if (!params_base.model_alias.empty()) {
765+
// user explicitly specified model name
766+
model_name = params_base.model_alias;
767+
} else if (!params_base.model.name.empty()) {
768+
// use model name in registry format (for models in cache)
769+
model_name = params_base.model.name;
770+
} else {
771+
// fallback: derive model name from file name
772+
auto model_path = std::filesystem::path(params_base.model.path);
773+
model_name = model_path.filename().string();
774+
}
775+
761776
// thinking is enabled if:
762777
// 1. It's not explicitly disabled (reasoning_budget == 0)
763778
// 2. The chat template supports it
@@ -2611,7 +2626,7 @@ static std::unique_ptr<server_res_generator> handle_completions_impl(
26112626
// OAI-compat
26122627
task.params.res_type = res_type;
26132628
task.params.oaicompat_cmpl_id = completion_id;
2614-
// oaicompat_model is already populated by params_from_json_cmpl
2629+
task.params.oaicompat_model = ctx_server.model_name;
26152630

26162631
tasks.push_back(std::move(task));
26172632
}
@@ -2939,7 +2954,7 @@ void server_routes::init_routes() {
29392954
json data = {
29402955
{ "default_generation_settings", default_generation_settings_for_props },
29412956
{ "total_slots", ctx_server.params_base.n_parallel },
2942-
{ "model_alias", ctx_server.params_base.model_alias },
2957+
{ "model_alias", ctx_server.model_name },
29432958
{ "model_path", ctx_server.params_base.model.path },
29442959
{ "modalities", json {
29452960
{"vision", ctx_server.oai_parser_opt.allow_image},
@@ -3181,8 +3196,8 @@ void server_routes::init_routes() {
31813196
json models = {
31823197
{"models", {
31833198
{
3184-
{"name", params.model_alias.empty() ? params.model.path : params.model_alias},
3185-
{"model", params.model_alias.empty() ? params.model.path : params.model_alias},
3199+
{"name", ctx_server.model_name},
3200+
{"model", ctx_server.model_name},
31863201
{"modified_at", ""},
31873202
{"size", ""},
31883203
{"digest", ""}, // dummy value, llama.cpp does not support managing model file's hash
@@ -3204,7 +3219,7 @@ void server_routes::init_routes() {
32043219
{"object", "list"},
32053220
{"data", {
32063221
{
3207-
{"id", params.model_alias.empty() ? params.model.path : params.model_alias},
3222+
{"id", ctx_server.model_name},
32083223
{"object", "model"},
32093224
{"created", std::time(0)},
32103225
{"owned_by", "llamacpp"},
@@ -3351,6 +3366,7 @@ void server_routes::init_routes() {
33513366
// write JSON response
33523367
json root = format_response_rerank(
33533368
body,
3369+
ctx_server.model_name,
33543370
responses,
33553371
is_tei_format,
33563372
documents,
@@ -3613,7 +3629,7 @@ std::unique_ptr<server_res_generator> server_routes::handle_embeddings_impl(cons
36133629

36143630
// write JSON response
36153631
json root = res_type == TASK_RESPONSE_TYPE_OAI_EMBD
3616-
? format_embeddings_response_oaicompat(body, responses, use_base64)
3632+
? format_embeddings_response_oaicompat(body, ctx_server.model_name, responses, use_base64)
36173633
: json(responses);
36183634
res->ok(root);
36193635
return res;

tools/server/server-task.cpp

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -450,9 +450,6 @@ task_params server_task::params_from_json_cmpl(
450450
}
451451
}
452452

453-
std::string model_name = params_base.model_alias.empty() ? DEFAULT_OAICOMPAT_MODEL : params_base.model_alias;
454-
params.oaicompat_model = json_value(data, "model", model_name);
455-
456453
return params;
457454
}
458455

tools/server/tests/unit/test_chat_completion.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,8 @@ def test_chat_completion(model, system_prompt, user_prompt, max_tokens, re_conte
4141
assert res.status_code == 200
4242
assert "cmpl" in res.body["id"] # make sure the completion id has the expected format
4343
assert res.body["system_fingerprint"].startswith("b")
44-
assert res.body["model"] == model if model is not None else server.model_alias
44+
# we no longer reflect back the model name, see https://github.com/ggml-org/llama.cpp/pull/17668
45+
# assert res.body["model"] == model if model is not None else server.model_alias
4546
assert res.body["usage"]["prompt_tokens"] == n_prompt
4647
assert res.body["usage"]["completion_tokens"] == n_predicted
4748
choice = res.body["choices"][0]
@@ -59,7 +60,7 @@ def test_chat_completion(model, system_prompt, user_prompt, max_tokens, re_conte
5960
)
6061
def test_chat_completion_stream(system_prompt, user_prompt, max_tokens, re_content, n_prompt, n_predicted, finish_reason):
6162
global server
62-
server.model_alias = None # try using DEFAULT_OAICOMPAT_MODEL
63+
server.model_alias = "llama-test-model"
6364
server.start()
6465
res = server.make_stream_request("POST", "/chat/completions", data={
6566
"max_tokens": max_tokens,
@@ -81,7 +82,7 @@ def test_chat_completion_stream(system_prompt, user_prompt, max_tokens, re_conte
8182
else:
8283
assert "role" not in choice["delta"]
8384
assert data["system_fingerprint"].startswith("b")
84-
assert "gpt-3.5" in data["model"] # DEFAULT_OAICOMPAT_MODEL, maybe changed in the future
85+
assert data["model"] == "llama-test-model"
8586
if last_cmpl_id is None:
8687
last_cmpl_id = data["id"]
8788
assert last_cmpl_id == data["id"] # make sure the completion id is the same for all events in the stream

0 commit comments

Comments
 (0)