@@ -535,8 +535,7 @@ class HttpClient {
535535
536536 static void print_progress (const std::string & progress_prefix, const std::string & progress_bar,
537537 const std::string & progress_suffix) {
538- printe (" \r %*s\r %s%s| %s" , get_terminal_width (), " " , progress_prefix.c_str (), progress_bar.c_str (),
539- progress_suffix.c_str ());
538+ printe (" \r " LOG_CLR_TO_EOL " %s%s| %s" , progress_prefix.c_str (), progress_bar.c_str (), progress_suffix.c_str ());
540539 }
541540 // Function to write data to a file
542541 static size_t write_data (void * ptr, size_t size, size_t nmemb, void * stream) {
@@ -797,16 +796,13 @@ class LlamaData {
797796 llama_model_ptr initialize_model (Opt & opt) {
798797 ggml_backend_load_all ();
799798 resolve_model (opt.model_ );
800- printe (
801- " \r %*s"
802- " \r Loading model" ,
803- get_terminal_width (), " " );
799+ printe (" \r " LOG_CLR_TO_EOL " Loading model" );
804800 llama_model_ptr model (llama_model_load_from_file (opt.model_ .c_str (), opt.model_params ));
805801 if (!model) {
806802 printe (" %s: error: unable to load model from file: %s\n " , __func__, opt.model_ .c_str ());
807803 }
808804
809- printe (" \r %*s \r " , static_cast < int >( sizeof ( " Loading model " )), " " );
805+ printe (" \r " LOG_CLR_TO_EOL );
810806 return model;
811807 }
812808
@@ -969,10 +965,7 @@ static int generate(LlamaData & llama_data, const std::string & prompt, std::str
969965static int read_user_input (std::string & user_input) {
970966 static const char * prompt_prefix = " > " ;
971967#ifdef WIN32
972- printf (
973- " \r %*s"
974- " \r " LOG_COL_DEFAULT " %s" ,
975- get_terminal_width (), " " , prompt_prefix);
968+ printf (" \r " LOG_CLR_TO_EOL LOG_COL_DEFAULT " %s" , prompt_prefix);
976969
977970 std::getline (std::cin, user_input);
978971 if (std::cin.eof ()) {
0 commit comments