File tree Expand file tree Collapse file tree 2 files changed +42
-0
lines changed Expand file tree Collapse file tree 2 files changed +42
-0
lines changed Original file line number Diff line number Diff line change @@ -197,6 +197,7 @@ llama_build_and_test(test-gguf.cpp)
197
197
llama_build_and_test (test -backend-ops.cpp )
198
198
199
199
llama_build_and_test (test -model-load-cancel.cpp LABEL "model" )
200
+ llama_build_and_test (test -model-load-disk.cpp LABEL "model" )
200
201
llama_build_and_test (test -autorelease.cpp LABEL "model" )
201
202
202
203
if (NOT GGML_BACKEND_DL )
Original file line number Diff line number Diff line change
1
+ #include < cstdlib>
2
+
3
+ #include " get-model.h"
4
+ #include " llama.h"
5
+
6
+ int main (int argc, char * argv[]) {
7
+ auto * model_path = get_model_or_exit (argc, argv);
8
+ auto * file = fopen (model_path, " r" );
9
+ if (file == nullptr ) {
10
+ fprintf (stderr, " no model at '%s' found\n " , model_path);
11
+ return EXIT_FAILURE;
12
+ }
13
+
14
+ fprintf (stderr, " using '%s'\n " , model_path);
15
+ fclose (file);
16
+
17
+ llama_backend_init ();
18
+ auto params = llama_model_params{};
19
+ params.use_mmap = false ;
20
+ params.progress_callback = [](float progress, void * ctx) {
21
+ (void ) ctx;
22
+ fprintf (stderr, " %.2f%% " , progress * 100 .0f );
23
+ // true means: Don't cancel the load
24
+ return true ;
25
+ };
26
+ auto * model = llama_model_load_from_file (model_path, params);
27
+
28
+ // Add newline after progress output
29
+ fprintf (stderr, " \n " );
30
+
31
+ if (model == nullptr ) {
32
+ fprintf (stderr, " Failed to load model\n " );
33
+ llama_backend_free ();
34
+ return EXIT_FAILURE;
35
+ }
36
+
37
+ fprintf (stderr, " Model loaded successfully\n " );
38
+ llama_model_free (model);
39
+ llama_backend_free ();
40
+ return EXIT_SUCCESS;
41
+ }
You can’t perform that action at this time.
0 commit comments