Skip to content

Commit aad0ba9

Browse files
committed
server : fix "model" in chat completion response
1 parent c02e5ab commit aad0ba9

File tree

1 file changed

+7
-1
lines changed

1 file changed

+7
-1
lines changed

examples/server/utils.hpp

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
#include <string>
2222
#include <vector>
2323

24-
#define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo-0613"
24+
#define DEFAULT_OAICOMPAT_MODEL "llama-cpp"
2525

2626
using json = nlohmann::ordered_json;
2727
using llama_tokens = std::vector<llama_token>;
@@ -590,6 +590,12 @@ static json oaicompat_completion_params_parse(
590590
throw std::runtime_error("Only one completion choice is allowed");
591591
}
592592

593+
// Handle "model" field
594+
if (body.contains("model")) {
595+
// this is used by format_*_response_oaicompat
596+
llama_params["model"] = body.at("model").get<std::string>();
597+
}
598+
593599
// Handle "logprobs" field
594600
// TODO: The response format of this option is not yet OAI-compatible, but seems like no one really using it; We may need to fix it in the future
595601
if (json_value(body, "logprobs", false)) {

0 commit comments

Comments
 (0)