Skip to content

Commit 952a0a0

Browse files
authored
Removed code that is not needed for copilot to work.
1 parent dd16438 commit 952a0a0

File tree

1 file changed

+20
-47
lines changed

1 file changed

+20
-47
lines changed

tools/server/server.cpp

Lines changed: 20 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -4077,21 +4077,7 @@ int main(int argc, char ** argv) {
40774077
res_ok(res, {{ "success", true }});
40784078
};
40794079

4080-
const auto handle_api_show = [&ctx_server, &state, &res_ok](const httplib::Request &, httplib::Response & res) {
4081-
server_state current_state = state.load();
4082-
const auto* model = llama_get_model(ctx_server.ctx);
4083-
4084-
// Get basic model info
4085-
char arch_buf[64] = {0};
4086-
char param_size_buf[64] = {0};
4087-
llama_model_meta_val_str(model, "general.architecture", arch_buf, sizeof(arch_buf));
4088-
llama_model_meta_val_str(model, "general.parameter_count", param_size_buf, sizeof(param_size_buf));
4089-
4090-
json model_meta = nullptr;
4091-
if (current_state == SERVER_STATE_READY) {
4092-
model_meta = ctx_server.model_meta();
4093-
}
4094-
4080+
const auto handle_api_show = [&ctx_server, &res_ok](const httplib::Request &, httplib::Response & res) {
40954081
json data = {
40964082
{
40974083
"template", common_chat_templates_source(ctx_server.chat_templates.get()),
@@ -4101,19 +4087,19 @@ int main(int argc, char ** argv) {
41014087
{ "llama.context_length", ctx_server.slots.back().n_ctx, },
41024088
}
41034089
},
4104-
{"modelfile", ""}, // Specific to ollama and does not seem to be needed
4105-
{"parameters", ""}, // TODO: add parameters
4090+
{"modelfile", ""},
4091+
{"parameters", ""},
41064092
{"template", common_chat_templates_source(ctx_server.chat_templates.get())},
41074093
{"details", {
4108-
{"parent_model", ""}, // TODO: add parent model if available
4094+
{"parent_model", ""},
41094095
{"format", "gguf"},
4110-
{"family", arch_buf},
4111-
{"families", {arch_buf}},
4112-
{"parameter_size", param_size_buf},
4113-
{"quantization_level", ""} // TODO: add quantization level if available
4096+
{"family", ""},
4097+
{"families", {""}},
4098+
{"parameter_size", ""},
4099+
{"quantization_level", ""}
41144100
}},
4115-
{"model_info", model_meta},
4116-
{"capabilities", {"completion"}} // TODO: add other capabilities if available
4101+
{"model_info", ""},
4102+
{"capabilities", {"completion"}}
41174103
};
41184104

41194105
res_ok(res, data);
@@ -4437,40 +4423,27 @@ int main(int argc, char ** argv) {
44374423
if (current_state == SERVER_STATE_READY) {
44384424
model_meta = ctx_server.model_meta();
44394425
}
4440-
// Get file metadata
4441-
struct stat file_stat;
4442-
stat(params.model.path.c_str(), &file_stat);
4443-
4444-
// Convert modified time to ISO 8601
4445-
char modified_buf[64];
4446-
strftime(modified_buf, sizeof(modified_buf), "%Y-%m-%dT%H:%M:%S%z", localtime(&file_stat.st_mtime));
4447-
4448-
const auto* model = llama_get_model(ctx_server.ctx);
4449-
char arch_buf[64] = {0};
4450-
char param_size_buf[64] = {0};
4451-
llama_model_meta_val_str(model, "general.architecture", arch_buf, sizeof(arch_buf));
4452-
llama_model_meta_val_str(model, "general.parameter_count", param_size_buf, sizeof(param_size_buf));
44534426

44544427
json models = {
44554428
{"models", {
44564429
{
44574430
{"name", params.model_alias.empty() ? params.model.path : params.model_alias},
44584431
{"model", params.model_alias.empty() ? params.model.path : params.model_alias},
4459-
{"modified_at", modified_buf},
4460-
{"size", file_stat.st_size},
4461-
{"digest", ""}, // TODO: add digest
4432+
{"modified_at", ""},
4433+
{"size", ""},
4434+
{"digest", ""}, // dummy value, llama.cpp does not support managing model file's hash
44624435
{"type", "model"},
44634436
{"description", ""},
4464-
{"tags", {arch_buf}},
4437+
{"tags", {""}},
44654438
{"capabilities", {"completion"}},
4466-
{"parameters", ""}, // TODO: add parameters
4439+
{"parameters", ""},
44674440
{"details", {
4468-
{"parent_model", ""}, // TODO: Add parent_model
4441+
{"parent_model", ""},
44694442
{"format", "gguf"},
4470-
{"family", arch_buf},
4471-
{"families", {arch_buf}},
4472-
{"parameter_size", param_size_buf},
4473-
{"quantization_level", ""} // TODO: add quantization level if available
4443+
{"family", ""},
4444+
{"families", {""}},
4445+
{"parameter_size", ""},
4446+
{"quantization_level", ""}
44744447
}}
44754448
}
44764449
}},

0 commit comments

Comments
 (0)