Skip to content

Commit

Permalink
Add iteration information to failure error message
Browse files Browse the repository at this point in the history
  • Loading branch information
sharkdp committed Nov 10, 2024
1 parent e3e8617 commit 4d7e229
Show file tree
Hide file tree
Showing 2 changed files with 67 additions and 11 deletions.
49 changes: 44 additions & 5 deletions src/benchmark/executor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,28 @@ use super::timing_result::TimingResult;
use anyhow::{bail, Context, Result};
use statistical::mean;

pub enum BenchmarkIteration {
NonBenchmarkRun,
Warmup(u64),
Benchmark(u64),
}

impl BenchmarkIteration {
pub fn to_env_var_value(&self) -> Option<String> {
match self {
BenchmarkIteration::NonBenchmarkRun => None,
BenchmarkIteration::Warmup(i) => Some(format!("warmup-{}", i)),
BenchmarkIteration::Benchmark(i) => Some(format!("{}", i)),
}
}
}

pub trait Executor {
/// Run the given command and measure the execution time
fn run_command_and_measure(
&self,
command: &Command<'_>,
iteration: BenchmarkIteration,
command_failure_action: Option<CmdFailureAction>,
) -> Result<(TimingResult, ExitStatus)>;

Expand All @@ -39,6 +56,7 @@ pub trait Executor {

fn run_command_and_measure_common(
mut command: std::process::Command,
iteration: BenchmarkIteration,
command_failure_action: CmdFailureAction,
command_input_policy: &CommandInputPolicy,
command_output_policy: &CommandOutputPolicy,
Expand All @@ -53,17 +71,29 @@ fn run_command_and_measure_common(
randomized_environment_offset::value(),
);

if let Some(value) = iteration.to_env_var_value() {
command.env("HYPERFINE_ITERATION", value);
}

let result = execute_and_measure(command)
.with_context(|| format!("Failed to run command '{command_name}'"))?;

if command_failure_action == CmdFailureAction::RaiseError && !result.status.success() {
let when = match iteration {
BenchmarkIteration::NonBenchmarkRun => "a non-benchmark run".to_string(),
BenchmarkIteration::Warmup(0) => "the first warmup run".to_string(),
BenchmarkIteration::Warmup(i) => format!("warmup iteration {i}"),
BenchmarkIteration::Benchmark(0) => "the first benchmark run".to_string(),
BenchmarkIteration::Benchmark(i) => format!("benchmark iteration {i}"),
};
bail!(
"{}. Use the '-i'/'--ignore-failure' option if you want to ignore this. \
"{cause} in {when}. Use the '-i'/'--ignore-failure' option if you want to ignore this. \
Alternatively, use the '--show-output' option to debug what went wrong.",
result.status.code().map_or(
cause=result.status.code().map_or(
"The process has been terminated by a signal".into(),
|c| format!("Command terminated with non-zero exit code: {c}")
)
|c| format!("Command terminated with non-zero exit code {c}")

),
);
}

Expand All @@ -84,10 +114,12 @@ impl<'a> Executor for RawExecutor<'a> {
fn run_command_and_measure(
&self,
command: &Command<'_>,
iteration: BenchmarkIteration,
command_failure_action: Option<CmdFailureAction>,
) -> Result<(TimingResult, ExitStatus)> {
let result = run_command_and_measure_common(
command.get_command()?,
iteration,
command_failure_action.unwrap_or(self.options.command_failure_action),
&self.options.command_input_policy,
&self.options.command_output_policy,
Expand Down Expand Up @@ -133,6 +165,7 @@ impl<'a> Executor for ShellExecutor<'a> {
fn run_command_and_measure(
&self,
command: &Command<'_>,
iteration: BenchmarkIteration,
command_failure_action: Option<CmdFailureAction>,
) -> Result<(TimingResult, ExitStatus)> {
let on_windows_cmd = cfg!(windows) && *self.shell == Shell::Default("cmd.exe");
Expand All @@ -149,6 +182,7 @@ impl<'a> Executor for ShellExecutor<'a> {

let mut result = run_command_and_measure_common(
command_builder,
iteration,
command_failure_action.unwrap_or(self.options.command_failure_action),
&self.options.command_input_policy,
&self.options.command_output_policy,
Expand Down Expand Up @@ -191,7 +225,11 @@ impl<'a> Executor for ShellExecutor<'a> {

for _ in 0..COUNT {
// Just run the shell without any command
let res = self.run_command_and_measure(&Command::new(None, ""), None);
let res = self.run_command_and_measure(
&Command::new(None, ""),
BenchmarkIteration::NonBenchmarkRun,
None,
);

match res {
Err(_) => {
Expand Down Expand Up @@ -260,6 +298,7 @@ impl Executor for MockExecutor {
fn run_command_and_measure(
&self,
command: &Command<'_>,
_iteration: BenchmarkIteration,
_command_failure_action: Option<CmdFailureAction>,
) -> Result<(TimingResult, ExitStatus)> {
#[cfg(unix)]
Expand Down
29 changes: 23 additions & 6 deletions src/benchmark/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ pub mod timing_result;

use std::cmp;

use crate::benchmark::executor::BenchmarkIteration;
use crate::command::Command;
use crate::options::{CmdFailureAction, ExecutorKind, Options, OutputStyleOption};
use crate::outlier_detection::{modified_zscores, OUTLIER_THRESHOLD};
Expand Down Expand Up @@ -57,7 +58,11 @@ impl<'a> Benchmark<'a> {
error_output: &'static str,
) -> Result<TimingResult> {
self.executor
.run_command_and_measure(command, Some(CmdFailureAction::RaiseError))
.run_command_and_measure(
command,
executor::BenchmarkIteration::NonBenchmarkRun,
Some(CmdFailureAction::RaiseError),
)
.map(|r| r.0)
.map_err(|_| anyhow!(error_output))
}
Expand Down Expand Up @@ -187,9 +192,13 @@ impl<'a> Benchmark<'a> {
None
};

for _ in 0..self.options.warmup_count {
for i in 0..self.options.warmup_count {
let _ = run_preparation_command()?;
let _ = self.executor.run_command_and_measure(self.command, None)?;
let _ = self.executor.run_command_and_measure(
self.command,
BenchmarkIteration::Warmup(i),
None,
)?;
let _ = run_conclusion_command()?;
if let Some(bar) = progress_bar.as_ref() {
bar.inc(1)
Expand All @@ -216,7 +225,11 @@ impl<'a> Benchmark<'a> {
preparation_result.map_or(0.0, |res| res.time_real + self.executor.time_overhead());

// Initial timing run
let (res, status) = self.executor.run_command_and_measure(self.command, None)?;
let (res, status) = self.executor.run_command_and_measure(
self.command,
BenchmarkIteration::Benchmark(0),
None,
)?;
let success = status.success();

let conclusion_result = run_conclusion_command()?;
Expand Down Expand Up @@ -260,7 +273,7 @@ impl<'a> Benchmark<'a> {
}

// Gather statistics (perform the actual benchmark)
for _ in 0..count_remaining {
for i in 0..count_remaining {
run_preparation_command()?;

let msg = {
Expand All @@ -272,7 +285,11 @@ impl<'a> Benchmark<'a> {
bar.set_message(msg.to_owned())
}

let (res, status) = self.executor.run_command_and_measure(self.command, None)?;
let (res, status) = self.executor.run_command_and_measure(
self.command,
BenchmarkIteration::Benchmark(i + 1),
None,
)?;
let success = status.success();

times_real.push(res.time_real);
Expand Down

0 comments on commit 4d7e229

Please sign in to comment.