Skip to content

Commit f583a57

Browse files
committed
fix ubuntu gcc build warning
1 parent f78c724 commit f583a57

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

src/llama-model.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3676,9 +3676,9 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
36763676

36773677
// Dense linear weights
36783678
dense_2_out_layers = create_tensor(tn(LLM_TENSOR_DENSE_2_OUT, "weight"), {n_embd, hparams.dense_2_feat_out}, TENSOR_NOT_REQUIRED);
3679-
GGML_ASSERT(dense_2_out_layers == nullptr || hparams.dense_2_feat_in == n_embd && "dense_2_feat_in must be equal to n_embd");
3679+
GGML_ASSERT((dense_2_out_layers == nullptr || hparams.dense_2_feat_in == n_embd) && "dense_2_feat_in must be equal to n_embd");
36803680
dense_3_out_layers = create_tensor(tn(LLM_TENSOR_DENSE_3_OUT, "weight"), {hparams.dense_3_feat_in, n_embd}, TENSOR_NOT_REQUIRED);
3681-
GGML_ASSERT(dense_3_out_layers == nullptr || hparams.dense_3_feat_out == n_embd && "dense_3_feat_out must be equal to n_embd");
3681+
GGML_ASSERT((dense_3_out_layers == nullptr || hparams.dense_3_feat_out == n_embd) && "dense_3_feat_out must be equal to n_embd");
36823682

36833683

36843684
for (int i = 0; i < n_layer; ++i) {

0 commit comments

Comments
 (0)