Skip to content

Commit df082f5

Browse files
authored
nitpick : correct MB to MiB (ggml-org#15934)
MB was incorrectly used for 1024 x 1024 bytes instead of MiB
1 parent 24a6734 commit df082f5

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

src/llama-quant.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -920,7 +920,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
920920
new_type = tensor->type;
921921
new_data = tensor->data;
922922
new_size = ggml_nbytes(tensor);
923-
LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0);
923+
LLAMA_LOG_INFO("size = %8.3f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0);
924924
} else {
925925
const int64_t nelements = ggml_nelements(tensor);
926926

@@ -1037,8 +1037,8 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
10371037
}
10381038
close_ofstream();
10391039

1040-
LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
1041-
LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
1040+
LLAMA_LOG_INFO("%s: model size = %8.2f MiB\n", __func__, total_size_org/1024.0/1024.0);
1041+
LLAMA_LOG_INFO("%s: quant size = %8.2f MiB\n", __func__, total_size_new/1024.0/1024.0);
10421042

10431043
if (qs.n_fallback > 0) {
10441044
LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) required fallback quantization\n",

0 commit comments

Comments
 (0)