Skip to content

Commit 2ec90cb

Browse files
ggerganovNeoZhangJianyu
authored andcommitted
cont : fix mmap flag print (ggml-org#11699)
1 parent d746ee0 commit 2ec90cb

File tree

2 files changed

+1
-2
lines changed

2 files changed

+1
-2
lines changed

src/llama-model.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1275,7 +1275,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
12751275

12761276
const bool use_mmap_buffer = true;
12771277

1278-
LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s)\n", __func__, use_mmap_buffer ? "true" : "false");
1278+
LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s)\n", __func__, ml.use_mmap ? "true" : "false");
12791279

12801280
// build a list of buffer types for the CPU and GPU devices
12811281
pimpl->cpu_buft_list = make_cpu_buft_list(devices);

src/llama.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9434,7 +9434,6 @@ static struct llama_model * llama_model_load_from_file_impl(
94349434
struct llama_model_params params) {
94359435
ggml_time_init();
94369436

9437-
94389437
unsigned cur_percentage = 0;
94399438
if (params.progress_callback == NULL) {
94409439
params.progress_callback_user_data = &cur_percentage;

0 commit comments

Comments
 (0)