We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 7998b11 commit c7f379eCopy full SHA for c7f379e
llama.cpp/server/oai.h
@@ -35,7 +35,7 @@ inline static json oaicompat_completion_params_parse(
35
// https://platform.openai.com/docs/api-reference/chat/create
36
llama_sampling_params default_sparams;
37
llama_params["model"] = json_value(body, "model", std::string("unknown"));
38
- llama_params["prompt"] = format_chat(model, chat_template, body["messages"]);
+ llama_params["prompt"] = format_chat(model, chat_template, body.at("messages"));
39
llama_params["cache_prompt"] = json_value(body, "cache_prompt", false);
40
llama_params["temperature"] = json_value(body, "temperature", 0.0);
41
llama_params["top_k"] = json_value(body, "top_k", default_sparams.top_k);
0 commit comments