@@ -98,8 +98,8 @@ struct callback_data {
9898 // NOTE: final layer is ignored. we only have (n_layers - 1) to process
9999 std::vector<struct ggml_tensor *> calc_diff () {
100100 for (float il = 0 ; il < v_pos.size (); il++) {
101- float * a = (float *) v_pos[il]-> data ;
102- float * b = (float *) v_neg[il]-> data ;
101+ float * a = (float *) tensor_data ( v_pos[il]) ;
102+ float * b = (float *) tensor_data ( v_neg[il]) ;
103103 size_t n_elem = ggml_nelements (v_pos[il]);
104104 for (size_t j = 0 ; j < n_elem; j++) {
105105 a[j] -= b[j];
@@ -141,7 +141,7 @@ struct callback_data {
141141 struct ggml_tensor * diff_filtered = ggml_new_tensor_2d (
142142 ctx_ggml, GGML_TYPE_F32, n_embd, n_nonzero_rows);
143143 ggml_format_name (diff_filtered, " diff_filtered_%s" , a->name );
144- diff_filtered-> data = malloc (ggml_nbytes (diff_filtered));
144+ tensor_set_data ( diff_filtered, malloc (ggml_nbytes (diff_filtered) ));
145145
146146 // copy non-zero rows
147147 for (int dest_row = 0 ; dest_row < n_nonzero_rows; dest_row++) {
@@ -159,9 +159,9 @@ struct callback_data {
159159
160160 // we don't implement destructor, because we want to reuse callback_data. we just want to free the tensors
161161 void reset () {
162- for (auto ptr : v_pos) free (ptr-> data );
163- for (auto ptr : v_neg) free (ptr-> data );
164- for (auto ptr : v_diff_filtered) free (ptr-> data );
162+ for (auto ptr : v_pos) free (tensor_data ( ptr) );
163+ for (auto ptr : v_neg) free (tensor_data ( ptr) );
164+ for (auto ptr : v_diff_filtered) free (tensor_data ( ptr) );
165165 v_pos.clear ();
166166 v_neg.clear ();
167167 v_diff_filtered.clear ();
@@ -208,7 +208,7 @@ struct train_context {
208208 std::vector<uint8_t > empty;
209209 v_diff_tmp.push_back (empty);
210210 auto t = ggml_new_tensor_1d (ctx_ggml, GGML_TYPE_F32, n_embd);
211- t-> data = malloc (ggml_nbytes (t)); // TODO: get rid of malloc if possible
211+ tensor_set_data (t, malloc (ggml_nbytes (t) )); // TODO: get rid of malloc if possible
212212 v_final.push_back (t);
213213 }
214214 }
@@ -221,7 +221,7 @@ struct train_context {
221221 auto & diff_tmp = v_diff_tmp[il];
222222 size_t curr_size = diff_tmp.size ();
223223 diff_tmp.resize (curr_size + ggml_nbytes (t));
224- memcpy (diff_tmp.data () + curr_size, t-> data , ggml_nbytes (t));
224+ memcpy (diff_tmp.data () + curr_size, tensor_data (t) , ggml_nbytes (t));
225225 }
226226 }
227227
@@ -238,7 +238,7 @@ struct train_context {
238238 ? ggml_new_tensor_2d (ctx_ggml, GGML_TYPE_F32, n_rows, n_embd)
239239 : ggml_new_tensor_2d (ctx_ggml, GGML_TYPE_F32, n_embd, n_rows);
240240 ggml_set_name (diff, (std::string (" diff_" ) + std::to_string (il)).c_str ());
241- diff-> data = malloc (ggml_nbytes (diff)); // TODO: get rid of this malloc if possible
241+ tensor_set_data ( diff, malloc (ggml_nbytes (diff) )); // TODO: get rid of this malloc if possible
242242 if (transpose) {
243243 // copy data & transpose
244244 float * arr = (float *) diff_tmp.data ();
@@ -250,7 +250,7 @@ struct train_context {
250250 }
251251 } else {
252252 // only copy
253- memcpy (diff-> data , diff_tmp.data (), ggml_nbytes (diff));
253+ memcpy (tensor_data ( diff) , diff_tmp.data (), ggml_nbytes (diff));
254254 }
255255 v_diff.push_back (diff);
256256 print_debug_tensor (diff);
@@ -260,8 +260,8 @@ struct train_context {
260260 }
261261
262262 ~train_context () {
263- for (auto ptr : v_final) free (ptr-> data );
264- for (auto ptr : v_diff) free (ptr-> data );
263+ for (auto ptr : v_final) free (tensor_data ( ptr) );
264+ for (auto ptr : v_diff) free (tensor_data ( ptr) );
265265 // no need to free v_diff_tmp, since we didn't use malloc
266266 ggml_free (ctx_ggml);
267267 }
0 commit comments