Skip to content

Commit 643e5e8

Browse files
committed
move comments after bracket to its own line
1 parent 12aded6 commit 643e5e8

File tree

1 file changed

+5
-3
lines changed

1 file changed

+5
-3
lines changed

src/llama.cpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11431,7 +11431,8 @@ struct llm_build_context {
1143111431
const int64_t n_head_kv = hparams.n_head_kv(il);
1143211432
const int64_t n_head = hparams.n_head(il);
1143311433

11434-
if (n_head == 0) { // attention-free layer of Llama-3_1-Nemotron-51B
11434+
if (n_head == 0) {
11435+
// attention-free layer of Llama-3_1-Nemotron-51B
1143511436
cur = inpL;
1143611437
} else {
1143711438
// norm
@@ -11441,11 +11442,12 @@ struct llm_build_context {
1144111442
cb(cur, "attn_norm", il);
1144211443
}
1144311444

11444-
if (n_head > 0 && n_head_kv == 0) { // "linear attention" of Llama-3_1-Nemotron-51B
11445+
if (n_head > 0 && n_head_kv == 0) {
11446+
// "linear attention" of Llama-3_1-Nemotron-51B
1144511447
cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wo, cur);
1144611448
cb(cur, "wo", il);
1144711449
} else if (n_head > 0) {
11448-
// self-attention
11450+
// self-attention
1144911451
// rope freq factors for llama3; may return nullptr for llama2 and other models
1145011452
struct ggml_tensor * rope_factors = build_rope_factors(il);
1145111453

0 commit comments

Comments
 (0)