1- #include " common.h"
21#include " ggml.h"
32#include " llama.h"
4- #include " llama-impl.h"
3+ #include " llama-context.h"
4+ #include " common.h"
55
66#include < algorithm>
77#include < cassert>
88#include < cinttypes>
99#include < cmath>
1010#include < cstdio>
1111#include < cstring>
12- #include < map>
1312#include < numeric>
1413#include < regex>
1514#include < string>
16- #include < unordered_map>
1715#include < vector>
1816#include < thread>
1917#include < mutex>
@@ -330,13 +328,13 @@ int main(int argc, char ** argv) {
330328 }
331329 }
332330
333- const auto &tensors = llama_internal_get_tensor_map (ctx);
331+ const auto & tensors = llama_internal_get_tensor_map (ctx);
334332
335333 // check layer tensors
336334 int included_layers = 0 ;
337335 int64_t max_nelements = 0 ;
338336 bool is_f16 = false ;
339- for (const auto & kv_tensor : tensors) {
337+ for (const auto & kv_tensor : tensors) {
340338 if (!layer_included (params, kv_tensor.first )) {
341339 continue ;
342340 }
@@ -371,8 +369,8 @@ int main(int argc, char ** argv) {
371369 if (!params.include_types .empty () && std::find (params.include_types .begin (), params.include_types .end (), i) == params.include_types .end ()) {
372370 continue ;
373371 }
374- const auto * qfns = ggml_get_type_traits (type);
375- const auto * qfns_cpu = ggml_get_type_traits_cpu (type);
372+ const auto * qfns = ggml_get_type_traits (type);
373+ const auto * qfns_cpu = ggml_get_type_traits_cpu (type);
376374 if (qfns_cpu->from_float && qfns->to_float ) {
377375 if (params.verbose ) {
378376 printf (" testing %s ...\n " , ggml_type_name (type));
@@ -382,7 +380,7 @@ int main(int argc, char ** argv) {
382380
383381 error_stats global_stats {};
384382
385- for (const auto & kv_tensor : tensors) {
383+ for (const auto & kv_tensor : tensors) {
386384 if (!layer_included (params, kv_tensor.first )) {
387385 continue ;
388386 }
0 commit comments