@@ -339,6 +339,17 @@ impl BenchmarkCommand {
339339 let this_exe =
340340 std:: env:: current_exe ( ) . context ( "failed to get the current executable's path" ) ?;
341341
342+ let wasm_files: Vec < _ > = self . benchmarks . iter ( ) . flat_map ( |b| b. paths ( ) ) . collect ( ) ;
343+ eprintln ! (
344+ "\n Running {} total iterations ({} engines * {} benchmarks * {} processes * {} iterations per process)" ,
345+ self . engines. len( ) * wasm_files. len( ) * self . processes * self . iterations_per_process,
346+ self . engines. len( ) ,
347+ wasm_files. len( ) ,
348+ self . processes,
349+ self . iterations_per_process
350+ ) ;
351+ eprint ! ( "\n [Done] [Elapsed ] [Est. Rem. ]" ) ;
352+
342353 // Shuffle the order in which we spawn benchmark processes. This helps
343354 // us avoid some measurement bias from CPU state transitions that aren't
344355 // constrained within the duration of process execution, like dynamic
@@ -355,14 +366,18 @@ impl BenchmarkCommand {
355366 // and therefore potentially invalidating relative paths used here).
356367 let engine = check_engine_path ( engine) ?;
357368
358- for wasm in self . benchmarks . iter ( ) . flat_map ( |s| s . paths ( ) ) {
369+ for wasm in wasm_files . iter ( ) . cloned ( ) {
359370 choices. push ( ( engine. clone ( ) , wasm, self . processes ) ) ;
360371 }
361372 }
362373
363374 // Accumulated measurements from all of our subprocesses.
364375 let mut measurements = vec ! [ ] ;
365376
377+ let mut i = 0 ;
378+ let n = choices. len ( ) * self . processes ;
379+ let start = std:: time:: Instant :: now ( ) ;
380+
366381 while !choices. is_empty ( ) {
367382 let index = rng. gen_range ( 0 , choices. len ( ) ) ;
368383 let ( engine, wasm, procs_left) = & mut choices[ index] ;
@@ -420,7 +435,33 @@ impl BenchmarkCommand {
420435 String :: from_utf8_lossy( & output. stdout)
421436 ) ;
422437
423- eprintln ! ( "." ) ;
438+ const PRINT_EVERY : usize = 10 ;
439+ i += 1 ;
440+ debug_assert ! ( i <= n) ;
441+ if i % PRINT_EVERY == 1 {
442+ let percent_done = ( i as f64 ) / ( n as f64 ) * 100.0 ;
443+
444+ let elapsed = start. elapsed ( ) . as_secs ( ) ;
445+ let elapsed_hours = elapsed / 60 / 60 ;
446+ let elapsed_mins = elapsed / 60 % 60 ;
447+ let elapsed_secs = elapsed % 60 ;
448+
449+ let secs_per_proc = ( elapsed as f64 ) / ( i as f64 ) ;
450+ let procs_left = n - i;
451+ let eta = ( procs_left as f64 ) * secs_per_proc;
452+ let eta = eta. round ( ) as u64 ;
453+ let eta_hours = eta / 60 / 60 ;
454+ let eta_mins = eta / 60 % 60 ;
455+ let eta_secs = eta % 60 ;
456+
457+ eprint ! (
458+ "\n \
459+ [{percent_done:>3.0}%] \
460+ [{elapsed_hours:02}h:{elapsed_mins:02}m:{elapsed_secs:02}s] \
461+ [{eta_hours:02}h:{eta_mins:02}m:{eta_secs:02}s] ",
462+ )
463+ }
464+ eprint ! ( "." ) ;
424465
425466 // Parse the subprocess's output and add its measurements to our
426467 // accumulation.
@@ -435,6 +476,12 @@ impl BenchmarkCommand {
435476 }
436477 }
437478
479+ let elapsed = start. elapsed ( ) ;
480+ let hours = elapsed. as_secs ( ) / 60 / 60 ;
481+ let mins = elapsed. as_secs ( ) / 60 % 60 ;
482+ let secs = elapsed. as_secs ( ) % 60 ;
483+ eprintln ! ( "\n \n Finished benchmarking in {hours:02}h:{mins:02}m:{secs:02}s" ) ;
484+
438485 self . write_results ( & measurements, & mut output_file) ?;
439486 Ok ( ( ) )
440487 }
0 commit comments