11#if defined(_WIN32)
22# include < windows.h>
33#else
4+ # include < sys/ioctl.h>
45# include < unistd.h>
56#endif
67
2930class Opt {
3031 public:
3132 int init (int argc, const char ** argv) {
32- construct_help_str_ ();
3333 // Parse arguments
3434 if (parse (argc, argv)) {
3535 printe (" Error: Failed to parse arguments.\n " );
@@ -48,14 +48,54 @@ class Opt {
4848
4949 std::string model_;
5050 std::string user_;
51- int context_size_ = 2048 , ngl_ = -1 ;
51+ int context_size_ = -1 , ngl_ = -1 ;
52+ bool verbose_ = false ;
5253
5354 private:
54- std::string help_str_;
5555 bool help_ = false ;
5656
57- void construct_help_str_ () {
58- help_str_ =
57+ int parse (int argc, const char ** argv) {
58+ int positional_args_i = 0 ;
59+ for (int i = 1 ; i < argc; ++i) {
60+ if (strcmp (argv[i], " -c" ) == 0 || strcmp (argv[i], " --context-size" ) == 0 ) {
61+ if (i + 1 >= argc) {
62+ return 1 ;
63+ }
64+
65+ context_size_ = std::atoi (argv[++i]);
66+ } else if (strcmp (argv[i], " -n" ) == 0 || strcmp (argv[i], " --ngl" ) == 0 ) {
67+ if (i + 1 >= argc) {
68+ return 1 ;
69+ }
70+
71+ ngl_ = std::atoi (argv[++i]);
72+ } else if (strcmp (argv[i], " -v" ) == 0 || strcmp (argv[i], " --verbose" ) == 0 ||
73+ strcmp (argv[i], " --log-verbose" ) == 0 ) {
74+ verbose_ = true ;
75+ } else if (strcmp (argv[i], " -h" ) == 0 || strcmp (argv[i], " --help" ) == 0 ) {
76+ help_ = true ;
77+ return 0 ;
78+ } else if (!positional_args_i) {
79+ if (!argv[i][0 ] || argv[i][0 ] == ' -' ) {
80+ return 1 ;
81+ }
82+
83+ ++positional_args_i;
84+ model_ = argv[i];
85+ } else if (positional_args_i == 1 ) {
86+ ++positional_args_i;
87+ user_ = argv[i];
88+ } else {
89+ user_ += " " + std::string (argv[i]);
90+ }
91+ }
92+
93+ return model_.empty (); // model_ is the only required value
94+ }
95+
96+ // -v, --verbose, --log-verbose
97+ void help () const {
98+ printf (
5999 " Description:\n "
60100 " Runs a llm\n "
61101 " \n "
@@ -64,15 +104,9 @@ class Opt {
64104 " \n "
65105 " Options:\n "
66106 " -c, --context-size <value>\n "
67- " Context size (default: " +
68- std::to_string (context_size_);
69- help_str_ +=
70- " )\n "
107+ " Context size (default: %d)\n "
71108 " -n, --ngl <value>\n "
72- " Number of GPU layers (default: " +
73- std::to_string (ngl_);
74- help_str_ +=
75- " )\n "
109+ " Number of GPU layers (default: %d)\n "
76110 " -h, --help\n "
77111 " Show help message\n "
78112 " \n "
@@ -96,43 +130,10 @@ class Opt {
96130 " llama-run https://example.com/some-file1.gguf\n "
97131 " llama-run some-file2.gguf\n "
98132 " llama-run file://some-file3.gguf\n "
99- " llama-run --ngl 99 some-file4.gguf\n "
100- " llama-run --ngl 99 some-file5.gguf Hello World\n " ;
133+ " llama-run --ngl 999 some-file4.gguf\n "
134+ " llama-run --ngl 999 some-file5.gguf Hello World\n " ,
135+ llama_context_default_params ().n_batch , llama_model_default_params ().n_gpu_layers );
101136 }
102-
103- int parse (int argc, const char ** argv) {
104- int positional_args_i = 0 ;
105- for (int i = 1 ; i < argc; ++i) {
106- if (strcmp (argv[i], " -c" ) == 0 || strcmp (argv[i], " --context-size" ) == 0 ) {
107- if (i + 1 >= argc) {
108- return 1 ;
109- }
110-
111- context_size_ = std::atoi (argv[++i]);
112- } else if (strcmp (argv[i], " -n" ) == 0 || strcmp (argv[i], " --ngl" ) == 0 ) {
113- if (i + 1 >= argc) {
114- return 1 ;
115- }
116-
117- ngl_ = std::atoi (argv[++i]);
118- } else if (strcmp (argv[i], " -h" ) == 0 || strcmp (argv[i], " --help" ) == 0 ) {
119- help_ = true ;
120- return 0 ;
121- } else if (!positional_args_i) {
122- ++positional_args_i;
123- model_ = argv[i];
124- } else if (positional_args_i == 1 ) {
125- ++positional_args_i;
126- user_ = argv[i];
127- } else {
128- user_ += " " + std::string (argv[i]);
129- }
130- }
131-
132- return model_.empty (); // model_ is the only required value
133- }
134-
135- void help () const { printf (" %s" , help_str_.c_str ()); }
136137};
137138
138139struct progress_data {
@@ -151,6 +152,18 @@ struct FileDeleter {
151152
152153typedef std::unique_ptr<FILE, FileDeleter> FILE_ptr;
153154
155+ static int get_terminal_width () {
156+ #if defined(_WIN32)
157+ CONSOLE_SCREEN_BUFFER_INFO csbi;
158+ GetConsoleScreenBufferInfo (GetStdHandle (STD_OUTPUT_HANDLE), &csbi);
159+ return csbi.srWindow .Right - csbi.srWindow .Left + 1 ;
160+ #else
161+ struct winsize w;
162+ ioctl (STDOUT_FILENO, TIOCGWINSZ, &w);
163+ return w.ws_col ;
164+ #endif
165+ }
166+
154167#ifdef LLAMA_USE_CURL
155168class CurlWrapper {
156169 public:
@@ -270,9 +283,9 @@ class CurlWrapper {
270283
271284 static std::string human_readable_size (curl_off_t size) {
272285 static const char * suffix[] = { " B" , " KB" , " MB" , " GB" , " TB" };
273- char length = sizeof (suffix) / sizeof (suffix[0 ]);
274- int i = 0 ;
275- double dbl_size = size;
286+ char length = sizeof (suffix) / sizeof (suffix[0 ]);
287+ int i = 0 ;
288+ double dbl_size = size;
276289 if (size > 1024 ) {
277290 for (i = 0 ; (size / 1024 ) > 0 && i < length - 1 ; i++, size /= 1024 ) {
278291 dbl_size = size / 1024.0 ;
@@ -293,27 +306,75 @@ class CurlWrapper {
293306
294307 total_to_download += data->file_size ;
295308 const curl_off_t now_downloaded_plus_file_size = now_downloaded + data->file_size ;
296- const curl_off_t percentage = (now_downloaded_plus_file_size * 100 ) / total_to_download;
297- const curl_off_t pos = (percentage / 5 );
309+ const curl_off_t percentage = calculate_percentage (now_downloaded_plus_file_size, total_to_download);
310+ std::string progress_prefix = generate_progress_prefix (percentage);
311+
312+ const double speed = calculate_speed (now_downloaded, data->start_time );
313+ const double tim = (total_to_download - now_downloaded) / speed;
314+ std::string progress_suffix =
315+ generate_progress_suffix (now_downloaded_plus_file_size, total_to_download, speed, tim);
316+
317+ const int progress_bar_width = calculate_progress_bar_width (progress_prefix, progress_suffix);
298318 std::string progress_bar;
299- for (int i = 0 ; i < 20 ; ++i) {
300- progress_bar.append ((i < pos) ? " █" : " " );
301- }
319+ generate_progress_bar (progress_bar_width, percentage, progress_bar);
302320
303- // Calculate download speed and estimated time to completion
304- const auto now = std::chrono::steady_clock::now ();
305- const std::chrono::duration<double > elapsed_seconds = now - data->start_time ;
306- const double speed = now_downloaded / elapsed_seconds.count ();
307- const double estimated_time = (total_to_download - now_downloaded) / speed;
308- printe (" \r %ld%% |%s| %s/%s %.2f MB/s %s " , percentage, progress_bar.c_str (),
309- human_readable_size (now_downloaded).c_str (), human_readable_size (total_to_download).c_str (),
310- speed / (1024 * 1024 ), human_readable_time (estimated_time).c_str ());
311- fflush (stderr);
321+ print_progress (progress_prefix, progress_bar, progress_suffix);
312322 data->printed = true ;
313323
314324 return 0 ;
315325 }
316326
327+ static curl_off_t calculate_percentage (curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download) {
328+ return (now_downloaded_plus_file_size * 100 ) / total_to_download;
329+ }
330+
331+ static std::string generate_progress_prefix (curl_off_t percentage) {
332+ std::ostringstream progress_output;
333+ progress_output << percentage << " % |" ;
334+ return progress_output.str ();
335+ }
336+
337+ static double calculate_speed (curl_off_t now_downloaded, const std::chrono::steady_clock::time_point & start_time) {
338+ const auto now = std::chrono::steady_clock::now ();
339+ const std::chrono::duration<double > elapsed_seconds = now - start_time;
340+ return now_downloaded / elapsed_seconds.count ();
341+ }
342+
343+ static std::string generate_progress_suffix (curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download,
344+ double speed, double estimated_time) {
345+ std::ostringstream progress_output;
346+ progress_output << human_readable_size (now_downloaded_plus_file_size).c_str () << " /"
347+ << human_readable_size (total_to_download).c_str () << " " << std::fixed << std::setprecision (2 )
348+ << speed / (1024 * 1024 ) << " MB/s " << human_readable_time (estimated_time).c_str ();
349+ return progress_output.str ();
350+ }
351+
352+ static int calculate_progress_bar_width (const std::string & progress_prefix, const std::string & progress_suffix) {
353+ int progress_bar_width = get_terminal_width () - progress_prefix.size () - progress_suffix.size () - 5 ;
354+ if (progress_bar_width < 10 ) {
355+ progress_bar_width = 10 ;
356+ }
357+ return progress_bar_width;
358+ }
359+
360+ static std::string generate_progress_bar (int progress_bar_width, curl_off_t percentage,
361+ std::string & progress_bar) {
362+ const curl_off_t pos = (percentage * progress_bar_width) / 100 ;
363+ for (int i = 0 ; i < progress_bar_width; ++i) {
364+ progress_bar.append ((i < pos) ? " █" : " " );
365+ }
366+
367+ return progress_bar;
368+ }
369+
370+ static void print_progress (const std::string & progress_prefix, const std::string & progress_bar,
371+ const std::string & progress_suffix) {
372+ std::ostringstream progress_output;
373+ progress_output << progress_prefix << progress_bar << " | " << progress_suffix;
374+ printe (" \r %*s\r %s" , get_terminal_width (), " " , progress_output.str ().c_str ());
375+ fflush (stderr);
376+ }
377+
317378 // Function to write data to a file
318379 static size_t write_data (void * ptr, size_t size, size_t nmemb, void * stream) {
319380 FILE * out = static_cast <FILE *>(stream);
@@ -467,6 +528,7 @@ class LlamaData {
467528 llama_model_params model_params = llama_model_default_params ();
468529 model_params.n_gpu_layers = opt.ngl_ >= 0 ? opt.ngl_ : model_params.n_gpu_layers ;
469530 resolve_model (opt.model_ );
531+ printe (" Loading model" );
470532 llama_model_ptr model (llama_load_model_from_file (opt.model_ .c_str (), model_params));
471533 if (!model) {
472534 printe (" %s: error: unable to load model from file: %s\n " , __func__, opt.model_ .c_str ());
@@ -478,8 +540,7 @@ class LlamaData {
478540 // Initializes the context with the specified parameters
479541 llama_context_ptr initialize_context (const llama_model_ptr & model, const int n_ctx) {
480542 llama_context_params ctx_params = llama_context_default_params ();
481- ctx_params.n_ctx = n_ctx;
482- ctx_params.n_batch = n_ctx;
543+ ctx_params.n_ctx = ctx_params.n_batch = n_ctx >= 0 ? n_ctx : ctx_params.n_batch ;
483544 llama_context_ptr context (llama_new_context_with_model (model.get (), ctx_params));
484545 if (!context) {
485546 printe (" %s: error: failed to create the llama_context\n " , __func__);
@@ -642,8 +703,9 @@ static int handle_user_input(std::string & user_input, const std::string & user_
642703 }
643704
644705 printf (
645- " \r "
646- " \r\033 [32m> \033 [0m" );
706+ " \r %*s"
707+ " \r\033 [32m> \033 [0m" ,
708+ get_terminal_width (), " " );
647709 return read_user_input (user_input); // Returns true if input ends the loop
648710}
649711
@@ -682,8 +744,9 @@ static int chat_loop(LlamaData & llama_data, const std::string & user_) {
682744 return 0 ;
683745}
684746
685- static void log_callback (const enum ggml_log_level level, const char * text, void *) {
686- if (level == GGML_LOG_LEVEL_ERROR) {
747+ static void log_callback (const enum ggml_log_level level, const char * text, void * p) {
748+ const Opt * opt = static_cast <Opt *>(p);
749+ if (opt->verbose_ || level == GGML_LOG_LEVEL_ERROR) {
687750 printe (" %s" , text);
688751 }
689752}
@@ -721,7 +784,7 @@ int main(int argc, const char ** argv) {
721784 opt.user_ += read_pipe_data ();
722785 }
723786
724- llama_log_set (log_callback, nullptr );
787+ llama_log_set (log_callback, &opt );
725788 LlamaData llama_data;
726789 if (llama_data.init (opt)) {
727790 return 1 ;
0 commit comments