@@ -2747,37 +2747,37 @@ struct llama_cparams {
27472747
27482748struct llama_layer_posnet {
27492749 // resnet
2750- struct ggml_tensor * norm1 = nullptr ;
2751- struct ggml_tensor * norm1_b = nullptr ;
2750+ struct ggml_tensor * norm1;
2751+ struct ggml_tensor * norm1_b;
27522752
2753- struct ggml_tensor * conv1 = nullptr ;
2754- struct ggml_tensor * conv1_b = nullptr ;
2753+ struct ggml_tensor * conv1;
2754+ struct ggml_tensor * conv1_b;
27552755
2756- struct ggml_tensor * norm2 = nullptr ;
2757- struct ggml_tensor * norm2_b = nullptr ;
2756+ struct ggml_tensor * norm2;
2757+ struct ggml_tensor * norm2_b;
27582758
2759- struct ggml_tensor * conv2 = nullptr ;
2760- struct ggml_tensor * conv2_b = nullptr ;
2759+ struct ggml_tensor * conv2;
2760+ struct ggml_tensor * conv2_b;
27612761
27622762 // attention
2763- struct ggml_tensor * attn_norm = nullptr ;
2764- struct ggml_tensor * attn_norm_b = nullptr ;
2763+ struct ggml_tensor * attn_norm;
2764+ struct ggml_tensor * attn_norm_b;
27652765
2766- struct ggml_tensor * attn_q = nullptr ;
2767- struct ggml_tensor * attn_q_b = nullptr ;
2766+ struct ggml_tensor * attn_q;
2767+ struct ggml_tensor * attn_q_b;
27682768
2769- struct ggml_tensor * attn_k = nullptr ;
2770- struct ggml_tensor * attn_k_b = nullptr ;
2769+ struct ggml_tensor * attn_k;
2770+ struct ggml_tensor * attn_k_b;
27712771
2772- struct ggml_tensor * attn_v = nullptr ;
2773- struct ggml_tensor * attn_v_b = nullptr ;
2772+ struct ggml_tensor * attn_v;
2773+ struct ggml_tensor * attn_v_b;
27742774
2775- struct ggml_tensor * attn_o = nullptr ;
2776- struct ggml_tensor * attn_o_b = nullptr ;
2775+ struct ggml_tensor * attn_o;
2776+ struct ggml_tensor * attn_o_b;
27772777
27782778 // normalize
2779- struct ggml_tensor * norm = nullptr ;
2780- struct ggml_tensor * norm_b = nullptr ;
2779+ struct ggml_tensor * norm;
2780+ struct ggml_tensor * norm_b;
27812781};
27822782
27832783struct llama_layer_convnext {
@@ -2796,7 +2796,6 @@ struct llama_layer_convnext {
27962796 struct ggml_tensor * gamma;
27972797};
27982798
2799- // TODO: separate into "llama_layer_enc" and "llama_layer_dec"
28002799struct llama_layer {
28012800 llama_layer() {
28022801 // initialize all pointers to NULL
@@ -3167,6 +3166,7 @@ struct llama_sbatch {
31673166 // batch indices of the output
31683167 std::vector<size_t> out_ids;
31693168 std::vector<llama_sbatch_seq> seq;
3169+
31703170 const llama_batch * batch = nullptr;
31713171
31723172 // buffers for the ubatch
0 commit comments