Skip to content

Commit 5db1110

Browse files
committed
minor : clean-up
ggml-ci
1 parent c699abc commit 5db1110

File tree

2 files changed

+1
-5
lines changed

2 files changed

+1
-5
lines changed

src/llama-graph.cpp

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1290,10 +1290,6 @@ ggml_tensor * llm_graph_context::build_attn(
12901290
cur = build_lora_mm(wo, cur);
12911291
}
12921292

1293-
if (wo_b) {
1294-
//cb(cur, "kqv_wo", il);
1295-
}
1296-
12971293
if (wo_b) {
12981294
cur = ggml_add(ctx0, cur, wo_b);
12991295
}

src/llama-kv-cache.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ llama_kv_cache_unified::llama_kv_cache_unified(
129129
const size_t memory_size_k = size_k_bytes();
130130
const size_t memory_size_v = size_v_bytes();
131131

132-
LLAMA_LOG_INFO("%s: size = %7.2f (%6d cells, %3d layers) MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
132+
LLAMA_LOG_INFO("%s: size = %7.2f MiB (%6d cells, %3d layers), K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
133133
(float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f), kv_size, (int) layers.size(),
134134
ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
135135
ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));

0 commit comments

Comments
 (0)