11#if defined(_WIN32)
22# include < windows.h>
33#else
4+ # include < sys/ioctl.h>
45# include < unistd.h>
56#endif
67
2930class Opt {
3031 public:
3132 int init (int argc, const char ** argv) {
32- construct_help_str_ ();
3333 // Parse arguments
3434 if (parse (argc, argv)) {
3535 printe (" Error: Failed to parse arguments.\n " );
@@ -48,14 +48,49 @@ class Opt {
4848
4949 std::string model_;
5050 std::string user_;
51- int context_size_ = 2048 , ngl_ = -1 ;
51+ int context_size_ = - 1 , ngl_ = -1 ;
5252
5353 private:
54- std::string help_str_;
5554 bool help_ = false ;
5655
57- void construct_help_str_ () {
58- help_str_ =
56+ int parse (int argc, const char ** argv) {
57+ int positional_args_i = 0 ;
58+ for (int i = 1 ; i < argc; ++i) {
59+ if (strcmp (argv[i], " -c" ) == 0 || strcmp (argv[i], " --context-size" ) == 0 ) {
60+ if (i + 1 >= argc) {
61+ return 1 ;
62+ }
63+
64+ context_size_ = std::atoi (argv[++i]);
65+ } else if (strcmp (argv[i], " -n" ) == 0 || strcmp (argv[i], " --ngl" ) == 0 ) {
66+ if (i + 1 >= argc) {
67+ return 1 ;
68+ }
69+
70+ ngl_ = std::atoi (argv[++i]);
71+ } else if (strcmp (argv[i], " -h" ) == 0 || strcmp (argv[i], " --help" ) == 0 ) {
72+ help_ = true ;
73+ return 0 ;
74+ } else if (!positional_args_i) {
75+ if (!argv[i][0 ] || argv[i][0 ] == ' -' ) {
76+ return 1 ;
77+ }
78+
79+ ++positional_args_i;
80+ model_ = argv[i];
81+ } else if (positional_args_i == 1 ) {
82+ ++positional_args_i;
83+ user_ = argv[i];
84+ } else {
85+ user_ += " " + std::string (argv[i]);
86+ }
87+ }
88+
89+ return model_.empty (); // model_ is the only required value
90+ }
91+
92+ void help () const {
93+ printf (
5994 " Description:\n "
6095 " Runs a llm\n "
6196 " \n "
@@ -64,15 +99,9 @@ class Opt {
6499 " \n "
65100 " Options:\n "
66101 " -c, --context-size <value>\n "
67- " Context size (default: " +
68- std::to_string (context_size_);
69- help_str_ +=
70- " )\n "
102+ " Context size (default: %d)\n "
71103 " -n, --ngl <value>\n "
72- " Number of GPU layers (default: " +
73- std::to_string (ngl_);
74- help_str_ +=
75- " )\n "
104+ " Number of GPU layers (default: %d)\n "
76105 " -h, --help\n "
77106 " Show help message\n "
78107 " \n "
@@ -96,43 +125,10 @@ class Opt {
96125 " llama-run https://example.com/some-file1.gguf\n "
97126 " llama-run some-file2.gguf\n "
98127 " llama-run file://some-file3.gguf\n "
99- " llama-run --ngl 99 some-file4.gguf\n "
100- " llama-run --ngl 99 some-file5.gguf Hello World\n " ;
128+ " llama-run --ngl 999 some-file4.gguf\n "
129+ " llama-run --ngl 999 some-file5.gguf Hello World\n " ,
130+ llama_context_default_params ().n_batch , llama_model_default_params ().n_gpu_layers );
101131 }
102-
103- int parse (int argc, const char ** argv) {
104- int positional_args_i = 0 ;
105- for (int i = 1 ; i < argc; ++i) {
106- if (strcmp (argv[i], " -c" ) == 0 || strcmp (argv[i], " --context-size" ) == 0 ) {
107- if (i + 1 >= argc) {
108- return 1 ;
109- }
110-
111- context_size_ = std::atoi (argv[++i]);
112- } else if (strcmp (argv[i], " -n" ) == 0 || strcmp (argv[i], " --ngl" ) == 0 ) {
113- if (i + 1 >= argc) {
114- return 1 ;
115- }
116-
117- ngl_ = std::atoi (argv[++i]);
118- } else if (strcmp (argv[i], " -h" ) == 0 || strcmp (argv[i], " --help" ) == 0 ) {
119- help_ = true ;
120- return 0 ;
121- } else if (!positional_args_i) {
122- ++positional_args_i;
123- model_ = argv[i];
124- } else if (positional_args_i == 1 ) {
125- ++positional_args_i;
126- user_ = argv[i];
127- } else {
128- user_ += " " + std::string (argv[i]);
129- }
130- }
131-
132- return model_.empty (); // model_ is the only required value
133- }
134-
135- void help () const { printf (" %s" , help_str_.c_str ()); }
136132};
137133
138134struct progress_data {
@@ -151,6 +147,18 @@ struct FileDeleter {
151147
152148typedef std::unique_ptr<FILE, FileDeleter> FILE_ptr;
153149
150+ static int get_terminal_width () {
151+ #if defined(_WIN32)
152+ CONSOLE_SCREEN_BUFFER_INFO csbi;
153+ GetConsoleScreenBufferInfo (GetStdHandle (STD_OUTPUT_HANDLE), &csbi);
154+ return csbi.srWindow .Right - csbi.srWindow .Left + 1 ;
155+ #else
156+ struct winsize w;
157+ ioctl (STDOUT_FILENO, TIOCGWINSZ, &w);
158+ return w.ws_col ;
159+ #endif
160+ }
161+
154162#ifdef LLAMA_USE_CURL
155163class CurlWrapper {
156164 public:
@@ -270,9 +278,9 @@ class CurlWrapper {
270278
271279 static std::string human_readable_size (curl_off_t size) {
272280 static const char * suffix[] = { " B" , " KB" , " MB" , " GB" , " TB" };
273- char length = sizeof (suffix) / sizeof (suffix[0 ]);
274- int i = 0 ;
275- double dbl_size = size;
281+ char length = sizeof (suffix) / sizeof (suffix[0 ]);
282+ int i = 0 ;
283+ double dbl_size = size;
276284 if (size > 1024 ) {
277285 for (i = 0 ; (size / 1024 ) > 0 && i < length - 1 ; i++, size /= 1024 ) {
278286 dbl_size = size / 1024.0 ;
@@ -293,27 +301,75 @@ class CurlWrapper {
293301
294302 total_to_download += data->file_size ;
295303 const curl_off_t now_downloaded_plus_file_size = now_downloaded + data->file_size ;
296- const curl_off_t percentage = (now_downloaded_plus_file_size * 100 ) / total_to_download;
297- const curl_off_t pos = (percentage / 5 );
304+ const curl_off_t percentage = calculate_percentage (now_downloaded_plus_file_size, total_to_download);
305+ std::string progress_prefix = generate_progress_prefix (percentage);
306+
307+ const double speed = calculate_speed (now_downloaded, data->start_time );
308+ const double tim = (total_to_download - now_downloaded) / speed;
309+ std::string progress_suffix =
310+ generate_progress_suffix (now_downloaded_plus_file_size, total_to_download, speed, tim);
311+
312+ const int progress_bar_width = calculate_progress_bar_width (progress_prefix, progress_suffix);
298313 std::string progress_bar;
299- for (int i = 0 ; i < 20 ; ++i) {
300- progress_bar.append ((i < pos) ? " █" : " " );
301- }
314+ generate_progress_bar (progress_bar_width, percentage, progress_bar);
302315
303- // Calculate download speed and estimated time to completion
304- const auto now = std::chrono::steady_clock::now ();
305- const std::chrono::duration<double > elapsed_seconds = now - data->start_time ;
306- const double speed = now_downloaded / elapsed_seconds.count ();
307- const double estimated_time = (total_to_download - now_downloaded) / speed;
308- printe (" \r %ld%% |%s| %s/%s %.2f MB/s %s " , percentage, progress_bar.c_str (),
309- human_readable_size (now_downloaded).c_str (), human_readable_size (total_to_download).c_str (),
310- speed / (1024 * 1024 ), human_readable_time (estimated_time).c_str ());
311- fflush (stderr);
316+ print_progress (progress_prefix, progress_bar, progress_suffix);
312317 data->printed = true ;
313318
314319 return 0 ;
315320 }
316321
322+ static curl_off_t calculate_percentage (curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download) {
323+ return (now_downloaded_plus_file_size * 100 ) / total_to_download;
324+ }
325+
326+ static std::string generate_progress_prefix (curl_off_t percentage) {
327+ std::ostringstream progress_output;
328+ progress_output << percentage << " % |" ;
329+ return progress_output.str ();
330+ }
331+
332+ static double calculate_speed (curl_off_t now_downloaded, const std::chrono::steady_clock::time_point & start_time) {
333+ const auto now = std::chrono::steady_clock::now ();
334+ const std::chrono::duration<double > elapsed_seconds = now - start_time;
335+ return now_downloaded / elapsed_seconds.count ();
336+ }
337+
338+ static std::string generate_progress_suffix (curl_off_t now_downloaded_plus_file_size, curl_off_t total_to_download,
339+ double speed, double estimated_time) {
340+ std::ostringstream progress_output;
341+ progress_output << human_readable_size (now_downloaded_plus_file_size).c_str () << " /"
342+ << human_readable_size (total_to_download).c_str () << " " << std::fixed << std::setprecision (2 )
343+ << speed / (1024 * 1024 ) << " MB/s " << human_readable_time (estimated_time).c_str ();
344+ return progress_output.str ();
345+ }
346+
347+ static int calculate_progress_bar_width (const std::string & progress_prefix, const std::string & progress_suffix) {
348+ int progress_bar_width = get_terminal_width () - progress_prefix.size () - progress_suffix.size () - 5 ;
349+ if (progress_bar_width < 10 ) {
350+ progress_bar_width = 10 ;
351+ }
352+ return progress_bar_width;
353+ }
354+
355+ static std::string generate_progress_bar (int progress_bar_width, curl_off_t percentage,
356+ std::string & progress_bar) {
357+ const curl_off_t pos = (percentage * progress_bar_width) / 100 ;
358+ for (int i = 0 ; i < progress_bar_width; ++i) {
359+ progress_bar.append ((i < pos) ? " █" : " " );
360+ }
361+
362+ return progress_bar;
363+ }
364+
365+ static void print_progress (const std::string & progress_prefix, const std::string & progress_bar,
366+ const std::string & progress_suffix) {
367+ std::ostringstream progress_output;
368+ progress_output << progress_prefix << progress_bar << " | " << progress_suffix;
369+ printe (" \r %*s\r %s" , get_terminal_width (), " " , progress_output.str ().c_str ());
370+ fflush (stderr);
371+ }
372+
317373 // Function to write data to a file
318374 static size_t write_data (void * ptr, size_t size, size_t nmemb, void * stream) {
319375 FILE * out = static_cast <FILE *>(stream);
@@ -467,6 +523,7 @@ class LlamaData {
467523 llama_model_params model_params = llama_model_default_params ();
468524 model_params.n_gpu_layers = opt.ngl_ >= 0 ? opt.ngl_ : model_params.n_gpu_layers ;
469525 resolve_model (opt.model_ );
526+ printe (" Loading model" );
470527 llama_model_ptr model (llama_load_model_from_file (opt.model_ .c_str (), model_params));
471528 if (!model) {
472529 printe (" %s: error: unable to load model from file: %s\n " , __func__, opt.model_ .c_str ());
@@ -478,8 +535,7 @@ class LlamaData {
478535 // Initializes the context with the specified parameters
479536 llama_context_ptr initialize_context (const llama_model_ptr & model, const int n_ctx) {
480537 llama_context_params ctx_params = llama_context_default_params ();
481- ctx_params.n_ctx = n_ctx;
482- ctx_params.n_batch = n_ctx;
538+ ctx_params.n_ctx = ctx_params.n_batch = n_ctx >= 0 ? n_ctx : ctx_params.n_batch ;
483539 llama_context_ptr context (llama_new_context_with_model (model.get (), ctx_params));
484540 if (!context) {
485541 printe (" %s: error: failed to create the llama_context\n " , __func__);
@@ -642,8 +698,9 @@ static int handle_user_input(std::string & user_input, const std::string & user_
642698 }
643699
644700 printf (
645- " \r "
646- " \r\033 [32m> \033 [0m" );
701+ " \r %*s"
702+ " \r\033 [32m> \033 [0m" ,
703+ get_terminal_width (), " " );
647704 return read_user_input (user_input); // Returns true if input ends the loop
648705}
649706
0 commit comments