Skip to content

Commit 6277a0e

Browse files
committed
debug
1 parent dafd46a commit 6277a0e

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

src/llama-model.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2963,13 +2963,13 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
29632963
int k_start = h * (n_embd_head_qk_nope + n_embd_head_v);
29642964
for (int row = 0; row < kv_lora_rank; ++row) {
29652965
for (int col = 0; col < n_embd_head_qk_nope; ++col) {
2966+
LLAMA_LOG_DEBUG("333 row: %d, col: %d\n", row, col);
29662967
int src_idx = row * src_stride + k_start + col;
29672968
GGML_ASSERT(src_idx < ggml_nelements(wkv_b));
29682969

29692970
int dst_row = h * kv_lora_rank + row;
29702971
int dst_col = col;
29712972
dst[dst_row * n_embd_head_qk_nope + dst_col] = src[src_idx];
2972-
LLAMA_LOG_DEBUG("333 row: %d, col: %d\n", row, col);
29732973
}
29742974
}
29752975
}
@@ -2992,6 +2992,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
29922992
int v_start = h * (n_embd_head_qk_nope + n_embd_head_v) + n_embd_head_qk_nope;
29932993
for (int row = 0; row < kv_lora_rank; ++row) {
29942994
for (int col = 0; col < n_embd_head_v; ++col) {
2995+
LLAMA_LOG_DEBUG("666 row: %d, col: %d\n", row, col);
29952996
// 源索引计算
29962997
int src_idx = row * src_stride + v_start + col;
29972998
GGML_ASSERT(src_idx < ggml_nelements(wkv_b));
@@ -3000,7 +3001,6 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
30003001
int dst_row = h * n_embd_head_v + col; // 合并头和特征维度
30013002
int dst_col = row; // LoRA 秩维度
30023003
dst[dst_row * kv_lora_rank + dst_col] = src[src_idx];
3003-
LLAMA_LOG_DEBUG("666 row: %d, col: %d\n", row, col);
30043004
}
30053005
}
30063006
}

0 commit comments

Comments
 (0)