Skip to content

Commit c3c27a7

Browse files
committed
Remove extra semicolons left from refactoring
1 parent a0b8c17 commit c3c27a7

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+1
-226
lines changed

src/models/baichuan.cpp

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,6 @@ llm_build_baichuan::llm_build_baichuan(const llama_model & model, const llm_grap
6161
default:
6262
GGML_ABORT("fatal error");
6363
}
64-
;
6564

6665
cb(Qcur, "Qcur", il);
6766
cb(Kcur, "Kcur", il);
@@ -71,13 +70,11 @@ llm_build_baichuan::llm_build_baichuan(const llama_model & model, const llm_grap
7170
model.layers[il].wo, NULL,
7271
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
7372
}
74-
;
7573

7674
if (il == n_layer - 1 && inp_out_ids) {
7775
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
7876
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
7977
}
80-
;
8178

8279
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
8380
cb(ffn_inp, "ffn_inp", il);
@@ -97,7 +94,6 @@ llm_build_baichuan::llm_build_baichuan(const llama_model & model, const llm_grap
9794
LLM_FFN_SILU, LLM_FFN_PAR, il);
9895
cb(cur, "ffn_out", il);
9996
}
100-
;
10197

10298
cur = ggml_add(ctx0, cur, ffn_inp);
10399

@@ -107,7 +103,6 @@ llm_build_baichuan::llm_build_baichuan(const llama_model & model, const llm_grap
107103
// input for next layer
108104
inpL = cur;
109105
}
110-
;
111106

112107
cur = inpL;
113108

src/models/gemma.cpp

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -64,12 +64,10 @@ llm_build_gemma::llm_build_gemma(const llama_model & model, const llm_graph_para
6464
model.layers[il].wo, NULL,
6565
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f, il);
6666
}
67-
;
6867
if (il == n_layer - 1 && inp_out_ids) {
6968
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
7069
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
7170
}
72-
;
7371
ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL);
7472
cb(sa_out, "sa_out", il);
7573

@@ -88,7 +86,6 @@ llm_build_gemma::llm_build_gemma(const llama_model & model, const llm_graph_para
8886
LLM_FFN_GELU, LLM_FFN_PAR, il);
8987
cb(cur, "ffn_out", il);
9088
}
91-
;
9289
cur = ggml_add(ctx0, cur, sa_out);
9390

9491
cur = build_cvec(cur, il);
@@ -97,7 +94,6 @@ llm_build_gemma::llm_build_gemma(const llama_model & model, const llm_graph_para
9794
// input for next layer
9895
inpL = cur;
9996
}
100-
;
10197
cur = inpL;
10298

10399
cur = build_norm(cur,

src/models/gemma2_iswa.cpp

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -62,12 +62,10 @@ llm_build_gemma2_iswa::llm_build_gemma2_iswa(const llama_model & model, const ll
6262
model.layers[il].wo, NULL,
6363
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f, il);
6464
}
65-
;
6665
if (il == n_layer - 1 && inp_out_ids) {
6766
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
6867
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
6968
}
70-
;
7169
cur = build_norm(cur,
7270
model.layers[il].attn_post_norm, NULL,
7371
LLM_NORM_RMS, il);
@@ -91,7 +89,6 @@ llm_build_gemma2_iswa::llm_build_gemma2_iswa(const llama_model & model, const ll
9189
LLM_FFN_GELU, LLM_FFN_PAR, il);
9290
cb(cur, "ffn_out", il);
9391
}
94-
;
9592
cur = build_norm(cur,
9693
model.layers[il].ffn_post_norm, NULL,
9794
LLM_NORM_RMS, -1);
@@ -105,7 +102,6 @@ llm_build_gemma2_iswa::llm_build_gemma2_iswa(const llama_model & model, const ll
105102
// input for next layer
106103
inpL = cur;
107104
}
108-
;
109105
cur = inpL;
110106

111107
cur = build_norm(cur,

src/models/gemma3_iswa.cpp

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@ llm_build_gemma3_iswa::llm_build_gemma3_iswa(const llama_model & model, const ll
1414
inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
1515
cb(inpL, "inp_scaled", -1);
1616
}
17-
;
1817
// inp_pos - contains the positions
1918
ggml_tensor * inp_pos = build_inp_pos();
2019

@@ -74,12 +73,10 @@ llm_build_gemma3_iswa::llm_build_gemma3_iswa(const llama_model & model, const ll
7473
model.layers[il].wo, NULL,
7574
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f, il);
7675
}
77-
;
7876
if (il == n_layer - 1 && inp_out_ids) {
7977
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
8078
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
8179
}
82-
;
8380
cur = build_norm(cur,
8481
model.layers[il].attn_post_norm, NULL,
8582
LLM_NORM_RMS, il);
@@ -103,7 +100,6 @@ llm_build_gemma3_iswa::llm_build_gemma3_iswa(const llama_model & model, const ll
103100
LLM_FFN_GELU, LLM_FFN_PAR, il);
104101
cb(cur, "ffn_out", il);
105102
}
106-
;
107103
cur = build_norm(cur,
108104
model.layers[il].ffn_post_norm, NULL,
109105
LLM_NORM_RMS, -1);
@@ -117,7 +113,6 @@ llm_build_gemma3_iswa::llm_build_gemma3_iswa(const llama_model & model, const ll
117113
// input for next layer
118114
inpL = cur;
119115
}
120-
;
121116
cur = inpL;
122117

123118
cur = build_norm(cur,

src/models/glm4_moe.cpp

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,6 @@ llm_build_glm4_moe::llm_build_glm4_moe(const llama_model & model, const llm_grap
6161
Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
6262
cb(Kcur, "Kcur_normed", il);
6363
}
64-
;
6564
Qcur = ggml_rope_ext(
6665
ctx0, Qcur, inp_pos, nullptr,
6766
n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
@@ -82,12 +81,10 @@ llm_build_glm4_moe::llm_build_glm4_moe(const llama_model & model, const llm_grap
8281
model.layers[il].wo, NULL,
8382
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
8483
}
85-
;
8684
if (il == n_transformer_layers - 1 && inp_out_ids) {
8785
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
8886
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
8987
}
90-
;
9188
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
9289
cb(ffn_inp, "ffn_inp", il);
9390

@@ -133,7 +130,6 @@ llm_build_glm4_moe::llm_build_glm4_moe(const llama_model & model, const llm_grap
133130
cur = ggml_add(ctx0, routed_out, shared_out);
134131
cb(cur, "ffn_out", il);
135132
}
136-
;
137133
cur = ggml_add(ctx0, cur, ffn_inp);
138134

139135
cur = build_cvec(cur, il);
@@ -142,7 +138,6 @@ llm_build_glm4_moe::llm_build_glm4_moe(const llama_model & model, const llm_grap
142138
// input for next layer
143139
inpL = cur;
144140
}
145-
;
146141
cur = inpL;
147142
cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);
148143

src/models/gptneox.cpp

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,13 +59,11 @@ llm_build_gptneox::llm_build_gptneox(const llama_model & model, const llm_graph_
5959
model.layers[il].wo, model.layers[il].bo,
6060
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
6161
}
62-
;
6362

6463
if (il == n_layer - 1 && inp_out_ids) {
6564
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
6665
inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
6766
}
68-
;
6967

7068
// ffn
7169
if (hparams.use_par_res) {
@@ -129,7 +127,6 @@ llm_build_gptneox::llm_build_gptneox(const llama_model & model, const llm_graph_
129127
inpL = cur;
130128
}
131129
}
132-
;
133130

134131
cur = build_norm(inpL,
135132
model.output_norm,

src/models/granite.cpp

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ llm_build_granite::llm_build_granite(
2222
if (hparams.rope_finetuned) {
2323
inp_pos = build_inp_pos();
2424
}
25-
;
2625
auto * inp_attn = build_attn_inp_kv();
2726

2827
ggml_tensor * inp_out_ids = build_inp_out_ids();
@@ -45,14 +44,12 @@ llm_build_granite::llm_build_granite(
4544
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
4645
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
4746
}
48-
;
4947
// ffn
5048
cur = build_layer_ffn(cur, inpSA, model, il);
5149

5250
// input for next layer
5351
inpL = cur;
5452
}
55-
;
5653
cur = inpL;
5754

5855
cur = build_norm(cur,
@@ -88,23 +85,20 @@ ggml_tensor * llm_build_granite::build_attention_layer(
8885
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
8986
cb(Qcur, "Qcur", il);
9087
}
91-
;
9288

9389
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
9490
cb(Kcur, "Kcur", il);
9591
if (model.layers[il].bk) {
9692
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
9793
cb(Kcur, "Kcur", il);
9894
}
99-
;
10095

10196
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
10297
cb(Vcur, "Vcur", il);
10398
if (model.layers[il].bv) {
10499
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
105100
cb(Vcur, "Vcur", il);
106101
}
107-
;
108102

109103
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, hparams.n_head(il), n_tokens);
110104
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, hparams.n_head_kv(il), n_tokens);
@@ -125,7 +119,6 @@ ggml_tensor * llm_build_granite::build_attention_layer(
125119
ext_factor, attn_factor, beta_fast, beta_slow
126120
);
127121
}
128-
;
129122

130123
cb(Qcur, "Qcur", il);
131124
cb(Kcur, "Kcur", il);
@@ -204,7 +197,6 @@ ggml_tensor * llm_build_granite::build_layer_ffn(
204197
cur = moe_out;
205198
}
206199
}
207-
;
208200

209201
// For Granite architectures - scale residual
210202
if (hparams.f_residual_scale) {

src/models/grok.cpp

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -38,21 +38,18 @@ llm_build_grok::llm_build_grok(const llama_model & model, const llm_graph_params
3838
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
3939
cb(Qcur, "Qcur", il);
4040
}
41-
;
4241
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
4342
cb(Kcur, "Kcur", il);
4443
if (model.layers[il].bk) {
4544
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
4645
cb(Kcur, "Kcur", il);
4746
}
48-
;
4947
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
5048
cb(Vcur, "Vcur", il);
5149
if (model.layers[il].bv) {
5250
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
5351
cb(Vcur, "Vcur", il);
5452
}
55-
;
5653
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
5754
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
5855
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
@@ -77,12 +74,10 @@ llm_build_grok::llm_build_grok(const llama_model & model, const llm_graph_params
7774
model.layers[il].wo, model.layers[il].bo,
7875
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f, il);
7976
}
80-
;
8177
if (il == n_layer - 1 && inp_out_ids) {
8278
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
8379
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
8480
}
85-
;
8681
cur = build_norm(cur,
8782
model.layers[il].attn_out_norm, NULL,
8883
LLM_NORM_RMS, il);
@@ -125,7 +120,6 @@ llm_build_grok::llm_build_grok(const llama_model & model, const llm_graph_params
125120
} else {
126121
cur = moe_out;
127122
}
128-
;
129123
cur = build_norm(cur,
130124
model.layers[il].ffn_post_norm, NULL,
131125
LLM_NORM_RMS, il);
@@ -140,7 +134,6 @@ llm_build_grok::llm_build_grok(const llama_model & model, const llm_graph_params
140134
// input for next layer
141135
inpL = cur;
142136
}
143-
;
144137
cur = inpL;
145138

146139
cur = build_norm(cur,
@@ -161,7 +154,6 @@ llm_build_grok::llm_build_grok(const llama_model & model, const llm_graph_params
161154
cur = ggml_tanh(ctx0, cur);
162155
cur = ggml_scale(ctx0, cur, hparams.f_final_logit_softcapping);
163156
}
164-
;
165157
cb(cur, "result_output", -1);
166158
res->t_logits = cur;
167159

src/models/hunyuan_moe.cpp

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -42,21 +42,18 @@ llm_build_hunyuan_moe::llm_build_hunyuan_moe(const llama_model & model, const ll
4242
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
4343
cb(Qcur, "Qcur", il);
4444
}
45-
;
4645
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
4746
cb(Kcur, "Kcur", il);
4847
if (model.layers[il].bk) {
4948
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
5049
cb(Kcur, "Kcur", il);
5150
}
52-
;
5351
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
5452
cb(Vcur, "Vcur", il);
5553
if (model.layers[il].bv) {
5654
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
5755
cb(Vcur, "Vcur", il);
5856
}
59-
;
6057
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
6158
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
6259
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
@@ -92,12 +89,10 @@ llm_build_hunyuan_moe::llm_build_hunyuan_moe(const llama_model & model, const ll
9289
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
9390
cb(cur, "attn_out", il);
9491
}
95-
;
9692
if (il == n_layer - 1 && inp_out_ids) {
9793
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
9894
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
9995
}
100-
;
10196
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
10297
cb(ffn_inp, "ffn_inp", il);
10398

@@ -142,7 +137,6 @@ llm_build_hunyuan_moe::llm_build_hunyuan_moe(const llama_model & model, const ll
142137
// input for next layer
143138
inpL = cur;
144139
}
145-
;
146140
cur = inpL;
147141

148142
cur = build_norm(cur,

src/models/internlm2.cpp

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -38,21 +38,18 @@ llm_build_internlm2::llm_build_internlm2(const llama_model & model, const llm_gr
3838
Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
3939
cb(Qcur, "Qcur", il);
4040
}
41-
;
4241
ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
4342
cb(Kcur, "Kcur", il);
4443
if (model.layers[il].bk) {
4544
Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
4645
cb(Kcur, "Kcur", il);
4746
}
48-
;
4947
ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
5048
cb(Vcur, "Vcur", il);
5149
if (model.layers[il].bv) {
5250
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
5351
cb(Vcur, "Vcur", il);
5452
}
55-
;
5653
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
5754
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
5855
Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
@@ -77,12 +74,10 @@ llm_build_internlm2::llm_build_internlm2(const llama_model & model, const llm_gr
7774
model.layers[il].wo, model.layers[il].bo,
7875
Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
7976
}
80-
;
8177
if (il == n_layer - 1 && inp_out_ids) {
8278
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
8379
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
8480
}
85-
;
8681
ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
8782
cb(ffn_inp, "ffn_inp", il);
8883

@@ -108,7 +103,6 @@ llm_build_internlm2::llm_build_internlm2(const llama_model & model, const llm_gr
108103
// input for next layer
109104
inpL = cur;
110105
}
111-
;
112106
cur = inpL;
113107

114108
cur = build_norm(cur,

0 commit comments

Comments
 (0)