diff --git a/src/benchmark/executor.rs b/src/benchmark/executor.rs index 03e2b6a19..14653823e 100644 --- a/src/benchmark/executor.rs +++ b/src/benchmark/executor.rs @@ -37,7 +37,7 @@ pub trait Executor { fn run_command_and_measure( &self, command: &Command<'_>, - iteration: BenchmarkIteration, + iteration: &BenchmarkIteration, command_failure_action: Option, output_policy: &CommandOutputPolicy, ) -> Result<(TimingResult, ExitStatus)>; @@ -57,7 +57,7 @@ pub trait Executor { fn run_command_and_measure_common( mut command: std::process::Command, - iteration: BenchmarkIteration, + iteration: &BenchmarkIteration, command_failure_action: CmdFailureAction, command_input_policy: &CommandInputPolicy, command_output_policy: &CommandOutputPolicy, @@ -115,7 +115,7 @@ impl Executor for RawExecutor<'_> { fn run_command_and_measure( &self, command: &Command<'_>, - iteration: BenchmarkIteration, + iteration: &BenchmarkIteration, command_failure_action: Option, output_policy: &CommandOutputPolicy, ) -> Result<(TimingResult, ExitStatus)> { @@ -168,7 +168,7 @@ impl Executor for ShellExecutor<'_> { fn run_command_and_measure( &self, command: &Command<'_>, - iteration: BenchmarkIteration, + iteration: &BenchmarkIteration, command_failure_action: Option, output_policy: &CommandOutputPolicy, ) -> Result<(TimingResult, ExitStatus)> { @@ -232,7 +232,7 @@ impl Executor for ShellExecutor<'_> { // Just run the shell without any command let res = self.run_command_and_measure( &Command::new(None, ""), - BenchmarkIteration::NonBenchmarkRun, + &BenchmarkIteration::NonBenchmarkRun, None, &CommandOutputPolicy::Null, ); @@ -305,7 +305,7 @@ impl Executor for MockExecutor { fn run_command_and_measure( &self, command: &Command<'_>, - _iteration: BenchmarkIteration, + _iteration: &BenchmarkIteration, _command_failure_action: Option, _output_policy: &CommandOutputPolicy, ) -> Result<(TimingResult, ExitStatus)> { diff --git a/src/benchmark/mod.rs b/src/benchmark/mod.rs index e3534a7bc..7a5b596d4 100644 --- a/src/benchmark/mod.rs +++ b/src/benchmark/mod.rs @@ -59,11 +59,12 @@ impl<'a> Benchmark<'a> { command: &Command<'_>, error_output: &'static str, output_policy: &CommandOutputPolicy, + iteration: &executor::BenchmarkIteration, ) -> Result { self.executor .run_command_and_measure( command, - executor::BenchmarkIteration::NonBenchmarkRun, + iteration, Some(CmdFailureAction::RaiseError), output_policy, ) @@ -76,6 +77,7 @@ impl<'a> Benchmark<'a> { &self, parameters: impl IntoIterator>, output_policy: &CommandOutputPolicy, + iteration: executor::BenchmarkIteration, ) -> Result { let command = self .options @@ -87,7 +89,7 @@ impl<'a> Benchmark<'a> { Append ' || true' to the command if you are sure that this can be ignored."; Ok(command - .map(|cmd| self.run_intermediate_command(&cmd, error_output, output_policy)) + .map(|cmd| self.run_intermediate_command(&cmd, error_output, output_policy, &iteration)) .transpose()? .unwrap_or_default()) } @@ -97,6 +99,7 @@ impl<'a> Benchmark<'a> { &self, parameters: impl IntoIterator>, output_policy: &CommandOutputPolicy, + iteration: executor::BenchmarkIteration, ) -> Result { let command = self .options @@ -108,7 +111,7 @@ impl<'a> Benchmark<'a> { Append ' || true' to the command if you are sure that this can be ignored."; Ok(command - .map(|cmd| self.run_intermediate_command(&cmd, error_output, output_policy)) + .map(|cmd| self.run_intermediate_command(&cmd, error_output, output_policy, &iteration)) .transpose()? .unwrap_or_default()) } @@ -118,11 +121,12 @@ impl<'a> Benchmark<'a> { &self, command: &Command<'_>, output_policy: &CommandOutputPolicy, + iteration: &executor::BenchmarkIteration, ) -> Result { let error_output = "The preparation command terminated with a non-zero exit code. \ Append ' || true' to the command if you are sure that this can be ignored."; - self.run_intermediate_command(command, error_output, output_policy) + self.run_intermediate_command(command, error_output, output_policy, iteration) } /// Run the command specified by `--conclude`. @@ -130,11 +134,12 @@ impl<'a> Benchmark<'a> { &self, command: &Command<'_>, output_policy: &CommandOutputPolicy, + iteration: executor::BenchmarkIteration, ) -> Result { let error_output = "The conclusion command terminated with a non-zero exit code. \ Append ' || true' to the command if you are sure that this can be ignored."; - self.run_intermediate_command(command, error_output, output_policy) + self.run_intermediate_command(command, error_output, output_policy, &iteration) } /// Run the benchmark for a single command @@ -170,10 +175,10 @@ impl<'a> Benchmark<'a> { ) }); - let run_preparation_command = || { + let run_preparation_command = |iteration: &executor::BenchmarkIteration| { preparation_command .as_ref() - .map(|cmd| self.run_preparation_command(cmd, output_policy)) + .map(|cmd| self.run_preparation_command(cmd, output_policy, iteration)) .transpose() }; @@ -189,14 +194,18 @@ impl<'a> Benchmark<'a> { self.command.get_parameters().iter().cloned(), ) }); - let run_conclusion_command = || { + let run_conclusion_command = |iteration: executor::BenchmarkIteration| { conclusion_command .as_ref() - .map(|cmd| self.run_conclusion_command(cmd, output_policy)) + .map(|cmd| self.run_conclusion_command(cmd, output_policy, iteration)) .transpose() }; - self.run_setup_command(self.command.get_parameters().iter().cloned(), output_policy)?; + self.run_setup_command( + self.command.get_parameters().iter().cloned(), + output_policy, + executor::BenchmarkIteration::NonBenchmarkRun, + )?; // Warmup phase if self.options.warmup_count > 0 { @@ -211,14 +220,15 @@ impl<'a> Benchmark<'a> { }; for i in 0..self.options.warmup_count { - let _ = run_preparation_command()?; + let warmup_iteration = BenchmarkIteration::Warmup(i); + let _ = run_preparation_command(&warmup_iteration)?; let _ = self.executor.run_command_and_measure( self.command, - BenchmarkIteration::Warmup(i), + &warmup_iteration, None, output_policy, )?; - let _ = run_conclusion_command()?; + let _ = run_conclusion_command(warmup_iteration)?; if let Some(bar) = progress_bar.as_ref() { bar.inc(1) } @@ -239,20 +249,21 @@ impl<'a> Benchmark<'a> { None }; - let preparation_result = run_preparation_command()?; + let benchmark_iteration = BenchmarkIteration::Benchmark(0); + let preparation_result = run_preparation_command(&benchmark_iteration)?; let preparation_overhead = preparation_result.map_or(0.0, |res| res.time_real + self.executor.time_overhead()); // Initial timing run let (res, status) = self.executor.run_command_and_measure( self.command, - BenchmarkIteration::Benchmark(0), + &benchmark_iteration, None, output_policy, )?; let success = status.success(); - let conclusion_result = run_conclusion_command()?; + let conclusion_result = run_conclusion_command(benchmark_iteration)?; let conclusion_overhead = conclusion_result.map_or(0.0, |res| res.time_real + self.executor.time_overhead()); @@ -295,7 +306,8 @@ impl<'a> Benchmark<'a> { // Gather statistics (perform the actual benchmark) for i in 0..count_remaining { - run_preparation_command()?; + let benchmark_iteration = BenchmarkIteration::Benchmark(i + 1); + run_preparation_command(&benchmark_iteration)?; let msg = { let mean = format_duration(mean(×_real), self.options.time_unit); @@ -308,7 +320,7 @@ impl<'a> Benchmark<'a> { let (res, status) = self.executor.run_command_and_measure( self.command, - BenchmarkIteration::Benchmark(i + 1), + &benchmark_iteration, None, output_policy, )?; @@ -326,7 +338,7 @@ impl<'a> Benchmark<'a> { bar.inc(1) } - run_conclusion_command()?; + run_conclusion_command(benchmark_iteration)?; } if let Some(bar) = progress_bar.as_ref() { @@ -441,7 +453,11 @@ impl<'a> Benchmark<'a> { println!(" "); } - self.run_cleanup_command(self.command.get_parameters().iter().cloned(), output_policy)?; + self.run_cleanup_command( + self.command.get_parameters().iter().cloned(), + output_policy, + executor::BenchmarkIteration::NonBenchmarkRun, + )?; Ok(BenchmarkResult { command: self.command.get_name(), diff --git a/tests/integration_tests.rs b/tests/integration_tests.rs index 4efe20c63..750fe5ecf 100644 --- a/tests/integration_tests.rs +++ b/tests/integration_tests.rs @@ -638,3 +638,119 @@ fn windows_quote_before_quote_args() { .assert() .success(); } + +#[cfg(unix)] +#[test] +fn hyperfine_iteration_env_var_in_prepare_command() { + use tempfile::tempdir; + + let tempdir = tempdir().unwrap(); + let output_path = tempdir.path().join("iteration_output.txt"); + + // Write HYPERFINE_ITERATION value to a file during prepare + hyperfine() + .arg("--runs=2") + .arg("--warmup=1") + .arg(format!( + "--prepare=echo $HYPERFINE_ITERATION >> {}", + output_path.to_string_lossy() + )) + .arg("echo test") + .assert() + .success(); + + let contents = std::fs::read_to_string(output_path).unwrap(); + let lines: Vec<&str> = contents.lines().collect(); + + assert_eq!(lines.len(), 3); + assert_eq!(lines[0], "warmup-0"); + assert_eq!(lines[1], "0"); + assert_eq!(lines[2], "1"); +} + +#[cfg(windows)] +#[test] +fn hyperfine_iteration_env_var_in_prepare_command() { + use tempfile::tempdir; + + let tempdir = tempdir().unwrap(); + let output_path = tempdir.path().join("iteration_output.txt"); + + // Write HYPERFINE_ITERATION value to a file during prepare + hyperfine() + .arg("--runs=2") + .arg("--warmup=1") + .arg(format!( + "--prepare=echo %HYPERFINE_ITERATION% >> {}", + output_path.to_string_lossy() + )) + .arg("echo test") + .assert() + .success(); + + let contents = std::fs::read_to_string(output_path).unwrap(); + let lines: Vec = contents.lines().map(|l| l.trim().to_string()).collect(); + + assert_eq!(lines.len(), 3); + assert_eq!(lines[0], "warmup-0"); + assert_eq!(lines[1], "0"); + assert_eq!(lines[2], "1"); +} + +#[cfg(unix)] +#[test] +fn hyperfine_iteration_env_var_in_conclude_command() { + use tempfile::tempdir; + + let tempdir = tempdir().unwrap(); + let output_path = tempdir.path().join("iteration_output.txt"); + + // Write HYPERFINE_ITERATION value to a file during conclude + hyperfine() + .arg("--runs=2") + .arg("--warmup=1") + .arg(format!( + "--conclude=echo $HYPERFINE_ITERATION >> {}", + output_path.to_string_lossy() + )) + .arg("echo test") + .assert() + .success(); + + let contents = std::fs::read_to_string(output_path).unwrap(); + let lines: Vec<&str> = contents.lines().collect(); + + assert_eq!(lines.len(), 3); + assert_eq!(lines[0], "warmup-0"); + assert_eq!(lines[1], "0"); + assert_eq!(lines[2], "1"); +} + +#[cfg(windows)] +#[test] +fn hyperfine_iteration_env_var_in_conclude_command() { + use tempfile::tempdir; + + let tempdir = tempdir().unwrap(); + let output_path = tempdir.path().join("iteration_output.txt"); + + // Write HYPERFINE_ITERATION value to a file during conclude + hyperfine() + .arg("--runs=2") + .arg("--warmup=1") + .arg(format!( + "--conclude=echo %HYPERFINE_ITERATION% >> {}", + output_path.to_string_lossy() + )) + .arg("echo test") + .assert() + .success(); + + let contents = std::fs::read_to_string(output_path).unwrap(); + let lines: Vec = contents.lines().map(|l| l.trim().to_string()).collect(); + + assert_eq!(lines.len(), 3); + assert_eq!(lines[0], "warmup-0"); + assert_eq!(lines[1], "0"); + assert_eq!(lines[2], "1"); +}