11#if defined(_WIN32)
22# include < windows.h>
33#else
4+ # include < sys/ioctl.h>
45# include < unistd.h>
56#endif
67
2930class Opt {
3031 public:
3132 int init (int argc, const char ** argv) {
32- construct_help_str_ ();
3333 // Parse arguments
3434 if (parse (argc, argv)) {
3535 printe (" Error: Failed to parse arguments.\n " );
@@ -48,14 +48,54 @@ class Opt {
4848
4949 std::string model_;
5050 std::string user_;
51- int context_size_ = 2048 , ngl_ = -1 ;
51+ int context_size_ = -1 , ngl_ = -1 ;
52+ bool verbose_ = false ;
5253
5354 private:
54- std::string help_str_;
5555 bool help_ = false ;
5656
57- void construct_help_str_ () {
58- help_str_ =
57+ int parse (int argc, const char ** argv) {
58+ int positional_args_i = 0 ;
59+ for (int i = 1 ; i < argc; ++i) {
60+ if (strcmp (argv[i], " -c" ) == 0 || strcmp (argv[i], " --context-size" ) == 0 ) {
61+ if (i + 1 >= argc) {
62+ return 1 ;
63+ }
64+
65+ context_size_ = std::atoi (argv[++i]);
66+ } else if (strcmp (argv[i], " -n" ) == 0 || strcmp (argv[i], " --ngl" ) == 0 ) {
67+ if (i + 1 >= argc) {
68+ return 1 ;
69+ }
70+
71+ ngl_ = std::atoi (argv[++i]);
72+ } else if (strcmp (argv[i], " -v" ) == 0 || strcmp (argv[i], " --verbose" ) == 0 ||
73+ strcmp (argv[i], " --log-verbose" ) == 0 ) {
74+ verbose_ = true ;
75+ } else if (strcmp (argv[i], " -h" ) == 0 || strcmp (argv[i], " --help" ) == 0 ) {
76+ help_ = true ;
77+ return 0 ;
78+ } else if (!positional_args_i) {
79+ if (!argv[i][0 ] || argv[i][0 ] == ' -' ) {
80+ return 1 ;
81+ }
82+
83+ ++positional_args_i;
84+ model_ = argv[i];
85+ } else if (positional_args_i == 1 ) {
86+ ++positional_args_i;
87+ user_ = argv[i];
88+ } else {
89+ user_ += " " + std::string (argv[i]);
90+ }
91+ }
92+
93+ return model_.empty (); // model_ is the only required value
94+ }
95+
96+ // -v, --verbose, --log-verbose
97+ void help () const {
98+ printf (
5999 " Description:\n "
60100 " Runs a llm\n "
61101 " \n "
@@ -64,15 +104,11 @@ class Opt {
64104 " \n "
65105 " Options:\n "
66106 " -c, --context-size <value>\n "
67- " Context size (default: " +
68- std::to_string (context_size_);
69- help_str_ +=
70- " )\n "
107+ " Context size (default: %d)\n "
71108 " -n, --ngl <value>\n "
72- " Number of GPU layers (default: " +
73- std::to_string (ngl_);
74- help_str_ +=
75- " )\n "
109+ " Number of GPU layers (default: %d)\n "
110+ " -v, --verbose, --log-verbose\n "
111+ " Set verbosity level to infinity (i.e. log all messages, useful for debugging)\n "
76112 " -h, --help\n "
77113 " Show help message\n "
78114 " \n "
@@ -96,43 +132,10 @@ class Opt {
96132 " llama-run https://example.com/some-file1.gguf\n "
97133 " llama-run some-file2.gguf\n "
98134 " llama-run file://some-file3.gguf\n "
99- " llama-run --ngl 99 some-file4.gguf\n "
100- " llama-run --ngl 99 some-file5.gguf Hello World\n " ;
135+ " llama-run --ngl 999 some-file4.gguf\n "
136+ " llama-run --ngl 999 some-file5.gguf Hello World\n " ,
137+ llama_context_default_params ().n_batch , llama_model_default_params ().n_gpu_layers );
101138 }
102-
103- int parse (int argc, const char ** argv) {
104- int positional_args_i = 0 ;
105- for (int i = 1 ; i < argc; ++i) {
106- if (strcmp (argv[i], " -c" ) == 0 || strcmp (argv[i], " --context-size" ) == 0 ) {
107- if (i + 1 >= argc) {
108- return 1 ;
109- }
110-
111- context_size_ = std::atoi (argv[++i]);
112- } else if (strcmp (argv[i], " -n" ) == 0 || strcmp (argv[i], " --ngl" ) == 0 ) {
113- if (i + 1 >= argc) {
114- return 1 ;
115- }
116-
117- ngl_ = std::atoi (argv[++i]);
118- } else if (strcmp (argv[i], " -h" ) == 0 || strcmp (argv[i], " --help" ) == 0 ) {
119- help_ = true ;
120- return 0 ;
121- } else if (!positional_args_i) {
122- ++positional_args_i;
123- model_ = argv[i];
124- } else if (positional_args_i == 1 ) {
125- ++positional_args_i;
126- user_ = argv[i];
127- } else {
128- user_ += " " + std::string (argv[i]);
129- }
130- }
131-
132- return model_.empty (); // model_ is the only required value
133- }
134-
135- void help () const { printf (" %s" , help_str_.c_str ()); }
136139};
137140
138141struct progress_data {
@@ -151,6 +154,18 @@ struct FileDeleter {
151154
152155typedef std::unique_ptr<FILE, FileDeleter> FILE_ptr;
153156
157+ static int get_terminal_width () {
158+ #if defined(_WIN32)
159+ CONSOLE_SCREEN_BUFFER_INFO csbi;
160+ GetConsoleScreenBufferInfo (GetStdHandle (STD_OUTPUT_HANDLE), &csbi);
161+ return csbi.srWindow .Right - csbi.srWindow .Left + 1 ;
162+ #else
163+ struct winsize w;
164+ ioctl (STDOUT_FILENO, TIOCGWINSZ, &w);
165+ return w.ws_col ;
166+ #endif
167+ }
168+
154169#ifdef LLAMA_USE_CURL
155170class CurlWrapper {
156171 public:
@@ -270,9 +285,9 @@ class CurlWrapper {
270285
271286 static std::string human_readable_size (curl_off_t size) {
272287 static const char * suffix[] = { " B" , " KB" , " MB" , " GB" , " TB" };
273- char length = sizeof (suffix) / sizeof (suffix[0 ]);
274- int i = 0 ;
275- double dbl_size = size;
288+ char length = sizeof (suffix) / sizeof (suffix[0 ]);
289+ int i = 0 ;
290+ double dbl_size = size;
276291 if (size > 1024 ) {
277292 for (i = 0 ; (size / 1024 ) > 0 && i < length - 1 ; i++, size /= 1024 ) {
278293 dbl_size = size / 1024.0 ;
@@ -293,27 +308,75 @@ class CurlWrapper {
293308
294309 total_to_download += data->file_size ;
295310 const curl_off_t now_downloaded_plus_file_size = now_downloaded + data->file_size ;
296- const curl_off_t percentage = (now_downloaded_plus_file_size * 100 ) / total_to_download;
297- const curl_off_t pos = (percentage / 5 );
311+ const curl_off_t percentage = calculate_percentage (now_downloaded_plus_file_size, total_to_download);
312+ std::string progress_prefix = generate_progress_prefix (percentage);
313+
314+ const double speed = calculate_speed (now_downloaded, data->start_time );
315+ const double tim = (total_to_download - now_downloaded) / speed;
316+ std::string progress_suffix =
317+ generate_progress_suffix (now_downloaded_plus_file_size, total_to_download, speed, tim);
318+
319+ const int progress_bar_width = calculate_progress_bar_width (progress_prefix, progress_suffix);
298320 std::string progress_bar;
299- for (int i = 0 ; i < 20 ; ++i) {
300- progress_bar.append ((i < pos) ? " █" : " " );
301- }
321+ generate_progress_bar (progress_bar_width, percentage, progress_bar);
302322
303- // Calculate download speed and estimated time to completion
304- const auto now = std::chrono::steady_clock::now ();
305- const std::chrono::duration<double > elapsed_seconds = now - data->start_time ;
306- const double speed = now_downloaded / elapsed_seconds.count ();
307- const double estimated_time = (total_to_download - now_downloaded) / speed;
308- printe (" \r %ld%% |%s| %s/%s %.2f MB/s %s " , percentage, progress_bar.c_str (),
309- human_readable_size (now_downloaded).c_str (), human_readable_size (total_to_download).c_str (),
310- speed / (1024 * 1024 ), human_readable_time (estimated_time).c_str ());
311- fflush (stderr);
323+ print_progress (progress_prefix, progress_bar, progress_suffix);
312324 data->printed = true ;
313325
314326 return 0 ;
315327 }
316328
329+ static curl_off_t calculate_percentage (curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download) {
330+ return (now_downloaded_plus_file_size * 100 ) / total_to_download;
331+ }
332+
333+ static std::string generate_progress_prefix (curl_off_t percentage) {
334+ std::ostringstream progress_output;
335+ progress_output << percentage << " % |" ;
336+ return progress_output.str ();
337+ }
338+
339+ static double calculate_speed (curl_off_t now_downloaded, const std::chrono::steady_clock::time_point & start_time) {
340+ const auto now = std::chrono::steady_clock::now ();
341+ const std::chrono::duration<double > elapsed_seconds = now - start_time;
342+ return now_downloaded / elapsed_seconds.count ();
343+ }
344+
345+ static std::string generate_progress_suffix (curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download,
346+ double speed, double estimated_time) {
347+ std::ostringstream progress_output;
348+ progress_output << human_readable_size (now_downloaded_plus_file_size).c_str () << " /"
349+ << human_readable_size (total_to_download).c_str () << " " << std::fixed << std::setprecision (2 )
350+ << speed / (1024 * 1024 ) << " MB/s " << human_readable_time (estimated_time).c_str ();
351+ return progress_output.str ();
352+ }
353+
354+ static int calculate_progress_bar_width (const std::string & progress_prefix, const std::string & progress_suffix) {
355+ int progress_bar_width = get_terminal_width () - progress_prefix.size () - progress_suffix.size () - 5 ;
356+ if (progress_bar_width < 10 ) {
357+ progress_bar_width = 10 ;
358+ }
359+ return progress_bar_width;
360+ }
361+
362+ static std::string generate_progress_bar (int progress_bar_width, curl_off_t percentage,
363+ std::string & progress_bar) {
364+ const curl_off_t pos = (percentage * progress_bar_width) / 100 ;
365+ for (int i = 0 ; i < progress_bar_width; ++i) {
366+ progress_bar.append ((i < pos) ? " █" : " " );
367+ }
368+
369+ return progress_bar;
370+ }
371+
372+ static void print_progress (const std::string & progress_prefix, const std::string & progress_bar,
373+ const std::string & progress_suffix) {
374+ std::ostringstream progress_output;
375+ progress_output << progress_prefix << progress_bar << " | " << progress_suffix;
376+ printe (" \r %*s\r %s" , get_terminal_width (), " " , progress_output.str ().c_str ());
377+ fflush (stderr);
378+ }
379+
317380 // Function to write data to a file
318381 static size_t write_data (void * ptr, size_t size, size_t nmemb, void * stream) {
319382 FILE * out = static_cast <FILE *>(stream);
@@ -467,6 +530,7 @@ class LlamaData {
467530 llama_model_params model_params = llama_model_default_params ();
468531 model_params.n_gpu_layers = opt.ngl_ >= 0 ? opt.ngl_ : model_params.n_gpu_layers ;
469532 resolve_model (opt.model_ );
533+ printe (" Loading model" );
470534 llama_model_ptr model (llama_load_model_from_file (opt.model_ .c_str (), model_params));
471535 if (!model) {
472536 printe (" %s: error: unable to load model from file: %s\n " , __func__, opt.model_ .c_str ());
@@ -478,8 +542,7 @@ class LlamaData {
478542 // Initializes the context with the specified parameters
479543 llama_context_ptr initialize_context (const llama_model_ptr & model, const int n_ctx) {
480544 llama_context_params ctx_params = llama_context_default_params ();
481- ctx_params.n_ctx = n_ctx;
482- ctx_params.n_batch = n_ctx;
545+ ctx_params.n_ctx = ctx_params.n_batch = n_ctx >= 0 ? n_ctx : ctx_params.n_batch ;
483546 llama_context_ptr context (llama_new_context_with_model (model.get (), ctx_params));
484547 if (!context) {
485548 printe (" %s: error: failed to create the llama_context\n " , __func__);
@@ -642,8 +705,9 @@ static int handle_user_input(std::string & user_input, const std::string & user_
642705 }
643706
644707 printf (
645- " \r "
646- " \r\033 [32m> \033 [0m" );
708+ " \r %*s"
709+ " \r\033 [32m> \033 [0m" ,
710+ get_terminal_width (), " " );
647711 return read_user_input (user_input); // Returns true if input ends the loop
648712}
649713
@@ -682,8 +746,9 @@ static int chat_loop(LlamaData & llama_data, const std::string & user_) {
682746 return 0 ;
683747}
684748
685- static void log_callback (const enum ggml_log_level level, const char * text, void *) {
686- if (level == GGML_LOG_LEVEL_ERROR) {
749+ static void log_callback (const enum ggml_log_level level, const char * text, void * p) {
750+ const Opt * opt = static_cast <Opt *>(p);
751+ if (opt->verbose_ || level == GGML_LOG_LEVEL_ERROR) {
687752 printe (" %s" , text);
688753 }
689754}
@@ -721,7 +786,7 @@ int main(int argc, const char ** argv) {
721786 opt.user_ += read_pipe_data ();
722787 }
723788
724- llama_log_set (log_callback, nullptr );
789+ llama_log_set (log_callback, &opt );
725790 LlamaData llama_data;
726791 if (llama_data.init (opt)) {
727792 return 1 ;
0 commit comments