Skip to content

Commit 0704760

Browse files
committed
fix more refs
1 parent ebaf5cd commit 0704760

File tree

4 files changed

+21
-21
lines changed

4 files changed

+21
-21
lines changed

tools/cvector-generator/cvector-generator.cpp

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -98,8 +98,8 @@ struct callback_data {
9898
// NOTE: final layer is ignored. we only have (n_layers - 1) to process
9999
std::vector<struct ggml_tensor *> calc_diff() {
100100
for (float il = 0; il < v_pos.size(); il++) {
101-
float * a = (float *) v_pos[il]->data;
102-
float * b = (float *) v_neg[il]->data;
101+
float * a = (float *) tensor_data(v_pos[il]);
102+
float * b = (float *) tensor_data(v_neg[il]);
103103
size_t n_elem = ggml_nelements(v_pos[il]);
104104
for (size_t j = 0; j < n_elem; j++) {
105105
a[j] -= b[j];
@@ -141,7 +141,7 @@ struct callback_data {
141141
struct ggml_tensor * diff_filtered = ggml_new_tensor_2d(
142142
ctx_ggml, GGML_TYPE_F32, n_embd, n_nonzero_rows);
143143
ggml_format_name(diff_filtered, "diff_filtered_%s", a->name);
144-
diff_filtered->data = malloc(ggml_nbytes(diff_filtered));
144+
tensor_set_data(diff_filtered, malloc(ggml_nbytes(diff_filtered)));
145145

146146
// copy non-zero rows
147147
for (int dest_row = 0; dest_row < n_nonzero_rows; dest_row++) {
@@ -159,9 +159,9 @@ struct callback_data {
159159

160160
// we don't implement destructor, because we want to reuse callback_data. we just want to free the tensors
161161
void reset() {
162-
for (auto ptr : v_pos) free(ptr->data);
163-
for (auto ptr : v_neg) free(ptr->data);
164-
for (auto ptr : v_diff_filtered) free(ptr->data);
162+
for (auto ptr : v_pos) free(tensor_data(ptr));
163+
for (auto ptr : v_neg) free(tensor_data(ptr));
164+
for (auto ptr : v_diff_filtered) free(tensor_data(ptr));
165165
v_pos.clear();
166166
v_neg.clear();
167167
v_diff_filtered.clear();
@@ -208,7 +208,7 @@ struct train_context {
208208
std::vector<uint8_t> empty;
209209
v_diff_tmp.push_back(empty);
210210
auto t = ggml_new_tensor_1d(ctx_ggml, GGML_TYPE_F32, n_embd);
211-
t->data = malloc(ggml_nbytes(t)); // TODO: get rid of malloc if possible
211+
tensor_set_data(t, malloc(ggml_nbytes(t))); // TODO: get rid of malloc if possible
212212
v_final.push_back(t);
213213
}
214214
}
@@ -221,7 +221,7 @@ struct train_context {
221221
auto & diff_tmp = v_diff_tmp[il];
222222
size_t curr_size = diff_tmp.size();
223223
diff_tmp.resize(curr_size + ggml_nbytes(t));
224-
memcpy(diff_tmp.data() + curr_size, t->data, ggml_nbytes(t));
224+
memcpy(diff_tmp.data() + curr_size, tensor_data(t), ggml_nbytes(t));
225225
}
226226
}
227227

@@ -238,7 +238,7 @@ struct train_context {
238238
? ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_rows, n_embd)
239239
: ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_embd, n_rows);
240240
ggml_set_name(diff, (std::string("diff_") + std::to_string(il)).c_str());
241-
diff->data = malloc(ggml_nbytes(diff)); // TODO: get rid of this malloc if possible
241+
tensor_set_data(diff, malloc(ggml_nbytes(diff))); // TODO: get rid of this malloc if possible
242242
if (transpose) {
243243
// copy data & transpose
244244
float * arr = (float *) diff_tmp.data();
@@ -250,7 +250,7 @@ struct train_context {
250250
}
251251
} else {
252252
// only copy
253-
memcpy(diff->data, diff_tmp.data(), ggml_nbytes(diff));
253+
memcpy(tensor_data(diff), diff_tmp.data(), ggml_nbytes(diff));
254254
}
255255
v_diff.push_back(diff);
256256
print_debug_tensor(diff);
@@ -260,8 +260,8 @@ struct train_context {
260260
}
261261

262262
~train_context() {
263-
for (auto ptr : v_final) free(ptr->data);
264-
for (auto ptr : v_diff) free(ptr->data);
263+
for (auto ptr : v_final) free(tensor_data(ptr));
264+
for (auto ptr : v_diff) free(tensor_data(ptr));
265265
// no need to free v_diff_tmp, since we didn't use malloc
266266
ggml_free(ctx_ggml);
267267
}

tools/cvector-generator/pca.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ struct pca_model {
102102
ggml_set_name(dev_square, "dev_square");
103103
ggml_set_name(dev_eigenvector, "dev_eigenvector");
104104
buffer = ggml_backend_alloc_ctx_tensors(ctx, backend);
105-
ggml_backend_tensor_set(dev_input, t_input->data, 0, ggml_nbytes(t_input));
105+
ggml_backend_tensor_set(dev_input, tensor_data(t_input), 0, ggml_nbytes(t_input));
106106

107107
// initialize eigenvector to random normalized vector
108108
{
@@ -285,7 +285,7 @@ static void power_iteration(
285285

286286
// get output tensor
287287
GGML_ASSERT(last_eigenvector);
288-
ggml_backend_tensor_get(last_eigenvector, output->data, 0, ggml_nbytes(last_eigenvector));
288+
ggml_backend_tensor_get(last_eigenvector, tensor_data(output), 0, ggml_nbytes(last_eigenvector));
289289
//print_debug_tensor(output);
290290
ggml_gallocr_free(allocr);
291291

tools/imatrix/imatrix.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -247,7 +247,7 @@ bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void *
247247
ggml_backend_tensor_get(src1, m_src1_data.data(), 0, src1_nbytes);
248248
}
249249

250-
const char * data = is_host ? (const char *) src1->data : m_src1_data.data();
250+
const char * data = is_host ? (const char *) tensor_data(src1) : m_src1_data.data();
251251
GGML_ASSERT(src1->nb[0] == ggml_element_size(src1));
252252

253253
// TODO: 4d? (is that even used in practice?)
@@ -576,10 +576,10 @@ void IMatrixCollector::save_imatrix(int32_t n_chunk) const {
576576
ggml_format_name(counts, "%s.counts", name.c_str());
577577

578578
for (int32_t j = 0; j < nval; ++j) {
579-
((float *) in_sum2->data)[j] = (float) stat.values[j];
579+
((float *) tensor_data(in_sum2))[j] = (float) stat.values[j];
580580
}
581581
for (int32_t j = 0; j < nmat; ++j) {
582-
((float *) counts->data)[j] = (float) stat.counts[j];
582+
((float *) tensor_data(counts))[j] = (float) stat.counts[j];
583583
}
584584

585585
gguf_add_tensor(ctx_gguf, in_sum2);
@@ -786,10 +786,10 @@ bool IMatrixCollector::load_imatrix(const char * file_name) {
786786

787787
// Recreate the state as expected by save_imatrix()
788788
for (int64_t j = 0; j < nval; j++) {
789-
e.values[j] += ((const float *) in_sum2->data)[j];
789+
e.values[j] += ((const float *) tensor_data(in_sum2))[j];
790790
}
791791
for (int64_t j = 0; j < ncounts; j++) {
792-
e.counts[j] += std::lround(((const float *) counts->data)[j]);
792+
e.counts[j] += std::lround(((const float *) tensor_data(counts))[j]);
793793
}
794794
}
795795

tools/quantize/quantize.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -286,10 +286,10 @@ static int load_imatrix(const std::string & imatrix_file, std::vector<std::strin
286286
e.resize(ggml_nelements(sums));
287287
float max_count = 0.0f;
288288
for (int64_t j = 0; j < ne1; ++j) {
289-
const float count = ((const float *) counts->data)[j];
289+
const float count = ((const float *) tensor_data(counts))[j];
290290
if (count > 0.0f) {
291291
for (int64_t i = 0; i < ne0; ++i) {
292-
e[j*ne0 + i] = ((const float *) sums->data)[j*ne0 + i] / count;
292+
e[j*ne0 + i] = ((const float *) tensor_data(sums))[j*ne0 + i] / count;
293293
}
294294
} else {
295295
// Partial imatrix data, this tensor never got any input during calibration

0 commit comments

Comments
 (0)