Skip to content

Commit 7447ad3

Browse files
committed
llama : de-shadow (wip) [no ci]
1 parent e7564c5 commit 7447ad3

File tree

3 files changed

+97
-95
lines changed

3 files changed

+97
-95
lines changed

examples/gguf/gguf.cpp

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -204,13 +204,15 @@ static bool gguf_ex_read_1(const std::string & fname, bool check_data) {
204204
__func__, i, ggml_n_dims(cur), int(cur->ne[0]), int(cur->ne[1]), int(cur->ne[2]), int(cur->ne[3]), cur->name, cur->data);
205205

206206
// print first 10 elements
207-
const float * data = (const float *) cur->data;
207+
{
208+
const float * data = (const float *) cur->data;
208209

209-
printf("%s data[:10] : ", name);
210-
for (int j = 0; j < MIN(10, ggml_nelements(cur)); ++j) {
211-
printf("%f ", data[j]);
210+
printf("%s data[:10] : ", name);
211+
for (int j = 0; j < MIN(10, ggml_nelements(cur)); ++j) {
212+
printf("%f ", data[j]);
213+
}
214+
printf("\n\n");
212215
}
213-
printf("\n\n");
214216

215217
// check data
216218
if (check_data) {

src/llama-kv-cache.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -58,12 +58,12 @@ struct llama_kv_cache {
5858
std::vector<ggml_backend_buffer_ptr> bufs;
5959

6060
size_t total_size() const {
61-
size_t size = 0;
61+
size_t size_all = 0;
6262
for (const auto & buf : bufs) {
63-
size += ggml_backend_buffer_get_size(buf.get());
63+
size_all += ggml_backend_buffer_get_size(buf.get());
6464
}
6565

66-
return size;
66+
return size_all;
6767
}
6868

6969
// TODO: better data structures to reduce the cost of this operation

0 commit comments

Comments
 (0)