Skip to content

Commit 78a76de

Browse files
committed
Merge branch 'master' into xsn/server_mtmd
2 parents 5e6c7ba + 526739b commit 78a76de

File tree

12 files changed

+273
-140
lines changed

12 files changed

+273
-140
lines changed

common/common.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -830,7 +830,7 @@ std::string fs_get_cache_directory() {
830830
if (getenv("LLAMA_CACHE")) {
831831
cache_directory = std::getenv("LLAMA_CACHE");
832832
} else {
833-
#if defined(__linux__) || defined(__FreeBSD__)
833+
#if defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)
834834
if (std::getenv("XDG_CACHE_HOME")) {
835835
cache_directory = std::getenv("XDG_CACHE_HOME");
836836
} else {

examples/llava/clip.cpp

Lines changed: 20 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -323,36 +323,43 @@ struct clip_ctx {
323323
std::vector<ggml_backend_t> backend_ptrs;
324324
std::vector<ggml_backend_buffer_type_t> backend_buft;
325325

326-
ggml_backend_ptr backend;
327-
ggml_backend_ptr backend_cpu;
326+
ggml_backend_t backend;
327+
ggml_backend_t backend_cpu;
328328
ggml_backend_buffer_ptr buf;
329329

330330
ggml_backend_sched_ptr sched;
331331

332332
clip_image_size load_image_size;
333333

334334
clip_ctx(clip_context_params & ctx_params) {
335-
backend_cpu = ggml_backend_ptr(ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr));
336-
backend = ggml_backend_ptr(ctx_params.use_gpu
335+
backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
336+
backend = ctx_params.use_gpu
337337
? ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_GPU, nullptr)
338-
: nullptr);
338+
: nullptr;
339339

340340
if (backend) {
341-
LOG_INF("%s: CLIP using %s backend\n", __func__, ggml_backend_name(backend.get()));
342-
backend_ptrs.push_back(backend.get());
343-
backend_buft.push_back(ggml_backend_get_default_buffer_type(backend.get()));
341+
LOG_INF("%s: CLIP using %s backend\n", __func__, ggml_backend_name(backend));
342+
backend_ptrs.push_back(backend);
343+
backend_buft.push_back(ggml_backend_get_default_buffer_type(backend));
344344
} else {
345-
backend = std::move(backend_cpu);
345+
backend = backend_cpu;
346346
LOG_INF("%s: CLIP using CPU backend\n", __func__);
347347
}
348348

349-
backend_ptrs.push_back(backend_cpu.get());
350-
backend_buft.push_back(ggml_backend_get_default_buffer_type(backend_cpu.get()));
349+
backend_ptrs.push_back(backend_cpu);
350+
backend_buft.push_back(ggml_backend_get_default_buffer_type(backend_cpu));
351351

352352
sched.reset(
353353
ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), 8192, false)
354354
);
355355
}
356+
357+
~clip_ctx() {
358+
ggml_backend_free(backend);
359+
if (backend != backend_cpu) {
360+
ggml_backend_free(backend_cpu);
361+
}
362+
}
356363
};
357364

358365
static ggml_cgraph * clip_image_build_graph_siglip(clip_ctx * ctx, const clip_image_f32_batch & imgs) {
@@ -1428,7 +1435,7 @@ struct clip_model_loader {
14281435
}
14291436

14301437
// alloc memory and offload data
1431-
ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(ctx_clip.backend.get());
1438+
ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(ctx_clip.backend);
14321439
ctx_clip.buf.reset(ggml_backend_alloc_ctx_tensors_from_buft(ctx_clip.ctx_data.get(), buft));
14331440
ggml_backend_buffer_set_usage(ctx_clip.buf.get(), GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
14341441
for (auto & t : tensors_to_load) {
@@ -2610,7 +2617,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
26102617
}
26112618
}
26122619

2613-
ggml_backend_cpu_set_n_threads(ctx->backend_cpu.get(), n_threads);
2620+
ggml_backend_cpu_set_n_threads(ctx->backend_cpu, n_threads);
26142621

26152622
auto status = ggml_backend_sched_graph_compute(ctx->sched.get(), gf);
26162623
if (status != GGML_STATUS_SUCCESS) {

examples/quantize/quantize.cpp

Lines changed: 115 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,15 @@
99
#include <fstream>
1010
#include <cmath>
1111
#include <cctype>
12+
#include <algorithm>
1213

1314
struct quant_option {
1415
std::string name;
1516
llama_ftype ftype;
1617
std::string desc;
1718
};
1819

19-
static const std::vector<struct quant_option> QUANT_OPTIONS = {
20+
static const std::vector<quant_option> QUANT_OPTIONS = {
2021
{ "Q4_0", LLAMA_FTYPE_MOSTLY_Q4_0, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
2122
{ "Q4_1", LLAMA_FTYPE_MOSTLY_Q4_1, " 4.78G, +0.4511 ppl @ Llama-3-8B", },
2223
{ "Q5_0", LLAMA_FTYPE_MOSTLY_Q5_0, " 5.21G, +0.1316 ppl @ Llama-3-8B", },
@@ -105,7 +106,8 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
105106
//
106107
[[noreturn]]
107108
static void usage(const char * executable) {
108-
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type] [--token-embedding-type] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
109+
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] [--pure] [--imatrix] [--include-weights] [--exclude-weights] [--output-tensor-type]\n", executable);
110+
printf(" [--token-embedding-type] [--tensor-type] [--keep-split] [--override-kv] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n");
109111
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
110112
printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
111113
printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n");
@@ -114,6 +116,8 @@ static void usage(const char * executable) {
114116
printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n");
115117
printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor\n");
116118
printf(" --token-embedding-type ggml_type: use this ggml_type for the token embeddings tensor\n");
119+
printf(" --tensor-type TENSOR=TYPE: quantize this tensor to this ggml_type. example: --tensor-type attn_q=q8_0\n");
120+
printf(" Advanced option to selectively quantize tensors. May be specified multiple times.\n");
117121
printf(" --keep-split: will generate quantized model in the same shards as input\n");
118122
printf(" --override-kv KEY=TYPE:VALUE\n");
119123
printf(" Advanced option to override model metadata by key in the quantized model. May be specified multiple times.\n");
@@ -244,6 +248,107 @@ static ggml_type parse_ggml_type(const char * arg) {
244248
return GGML_TYPE_COUNT;
245249
}
246250

251+
// Allowed tensors for arbitrary quantization with --tensor-type option
252+
static const std::vector<std::string> ALLOWED_TENSOR_TYPE = {
253+
"attn_k",
254+
"attn_kv_a_mqa",
255+
"attn_kv_b",
256+
"attn_o",
257+
"attn_output",
258+
"attn_q",
259+
"attn_q_a",
260+
"attn_q_b",
261+
"attn_qkv",
262+
"attn_v",
263+
"channel_mix_key",
264+
"channel_mix_receptance",
265+
"channel_mix_value",
266+
"cls",
267+
"cls.output",
268+
"cross_attn_k",
269+
"cross_attn_o",
270+
"cross_attn_q",
271+
"cross_attn_v",
272+
"ffn_act",
273+
"ffn_down",
274+
"ffn_down_exps",
275+
"ffn_down_shexp",
276+
"ffn_gate",
277+
"ffn_gate_exps",
278+
"ffn_gate_shexp",
279+
"ffn_up",
280+
"ffn_up_exps",
281+
"ffn_up_shexp",
282+
"ssm_in",
283+
"ssm_out",
284+
"time_mix_gate",
285+
"time_mix_key",
286+
"time_mix_output",
287+
"time_mix_receptance",
288+
"time_mix_value",
289+
};
290+
291+
// changes to this struct must be replicated in llama-quant.cpp
292+
struct tensor_quantization {
293+
std::string name;
294+
ggml_type quant = GGML_TYPE_COUNT;
295+
};
296+
297+
static bool parse_tensor_type(const char * data, std::vector<tensor_quantization> & tensor_type) {
298+
const char * sep = strchr(data, '=');
299+
if (sep == nullptr) {
300+
printf("\n%s: malformed tensor type '%s'\n\n", __func__, data);
301+
return false;
302+
}
303+
304+
const size_t tn_len = sep - data;
305+
if (tn_len == 0) {
306+
printf("\n%s: missing tensor name\n\n", __func__);
307+
return false;
308+
}
309+
310+
if (const size_t qt_len = strlen(sep); qt_len == 1) {
311+
printf("\n%s: missing quantization type\n\n", __func__);
312+
return false;
313+
}
314+
315+
std::string tn(data, tn_len);
316+
std::transform(tn.begin(), tn.end(), tn.begin(), tolower);
317+
sep++;
318+
const std::string qt(sep);
319+
320+
bool found = false;
321+
for (const auto & allowed : ALLOWED_TENSOR_TYPE) {
322+
std::string tensor;
323+
tensor = tn.rfind('.') != std::string::npos ? tn.substr(tn.rfind('.') + 1) : tn;
324+
// handle special case of cls.output
325+
std::string cls_output = "cls.output";
326+
if (tn.find(cls_output) != std::string::npos) {
327+
tensor = "cls.output";
328+
}
329+
// check if an allowed tensor exists and it's at the end of the kv string
330+
if (tensor == allowed) {
331+
found = true;
332+
break;
333+
}
334+
}
335+
if (!found) {
336+
printf("\n%s: invalid tensor name '%s'\n\n", __func__, tn.c_str());
337+
return false;
338+
}
339+
340+
if (parse_ggml_type(qt.c_str()) == GGML_TYPE_COUNT) {
341+
printf("\n%s: invalid quantization type '%s'\n\n", __func__, qt.c_str());
342+
return false;
343+
}
344+
345+
tensor_quantization tqz;
346+
tqz.name = tn;
347+
tqz.quant = parse_ggml_type(qt.c_str());
348+
tensor_type.emplace_back(std::move(tqz));
349+
return true;
350+
}
351+
247352
int main(int argc, char ** argv) {
248353
if (argc < 3) {
249354
usage(argv[0]);
@@ -255,6 +360,7 @@ int main(int argc, char ** argv) {
255360
std::string imatrix_file;
256361
std::vector<std::string> included_weights, excluded_weights;
257362
std::vector<llama_model_kv_override> kv_overrides;
363+
std::vector<tensor_quantization> tensor_types;
258364

259365
for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) {
260366
if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) {
@@ -277,6 +383,10 @@ int main(int argc, char ** argv) {
277383
} else {
278384
usage(argv[0]);
279385
}
386+
} else if (strcmp(argv[arg_idx], "--tensor-type") == 0) {
387+
if (arg_idx == argc-1 || !parse_tensor_type(argv[++arg_idx], tensor_types)) {
388+
usage(argv[0]);
389+
}
280390
} else if (strcmp(argv[arg_idx], "--override-kv") == 0) {
281391
if (arg_idx == argc-1 || !string_parse_kv_override(argv[++arg_idx], kv_overrides)) {
282392
usage(argv[0]);
@@ -361,6 +471,9 @@ int main(int argc, char ** argv) {
361471
kv_overrides.back().key[0] = 0;
362472
params.kv_overrides = &kv_overrides;
363473
}
474+
if (!tensor_types.empty()) {
475+
params.tensor_types = &tensor_types;
476+
}
364477

365478
llama_backend_init();
366479

examples/rpc/rpc-server.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ static std::string fs_get_cache_directory() {
126126
if (getenv("LLAMA_CACHE")) {
127127
cache_directory = std::getenv("LLAMA_CACHE");
128128
} else {
129-
#ifdef __linux__
129+
#if defined(__linux__) || defined(__FreeBSD__) || defined(_AIX)
130130
if (std::getenv("XDG_CACHE_HOME")) {
131131
cache_directory = std::getenv("XDG_CACHE_HOME");
132132
} else {
@@ -136,7 +136,9 @@ static std::string fs_get_cache_directory() {
136136
cache_directory = std::getenv("HOME") + std::string("/Library/Caches/");
137137
#elif defined(_WIN32)
138138
cache_directory = std::getenv("LOCALAPPDATA");
139-
#endif // __linux__
139+
#else
140+
# error Unknown architecture
141+
#endif
140142
cache_directory = ensure_trailing_slash(cache_directory);
141143
cache_directory += "llama.cpp";
142144
}

examples/server/server.cpp

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3975,6 +3975,21 @@ int main(int argc, char ** argv) {
39753975
res_ok(res, {{ "success", true }});
39763976
};
39773977

3978+
const auto handle_api_show = [&ctx_server, &res_ok](const httplib::Request &, httplib::Response & res) {
3979+
json data = {
3980+
{
3981+
"template", common_chat_templates_source(ctx_server.chat_templates.get()),
3982+
},
3983+
{
3984+
"model_info", {
3985+
{ "llama.context_length", ctx_server.slots.back().n_ctx, },
3986+
}
3987+
},
3988+
};
3989+
3990+
res_ok(res, data);
3991+
};
3992+
39783993
// handle completion-like requests (completion, chat, infill)
39793994
// we can optionally provide a custom format for partial results and final results
39803995
const auto handle_completions_impl = [&ctx_server, &res_error, &res_ok](
@@ -4590,6 +4605,7 @@ int main(int argc, char ** argv) {
45904605
svr->Get ("/metrics", handle_metrics);
45914606
svr->Get ("/props", handle_props);
45924607
svr->Post("/props", handle_props_change);
4608+
svr->Post("/api/show", handle_api_show);
45934609
svr->Get ("/models", handle_models); // public endpoint (no API key check)
45944610
svr->Get ("/v1/models", handle_models); // public endpoint (no API key check)
45954611
svr->Post("/completion", handle_completions); // legacy

0 commit comments

Comments
 (0)