Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions crates/cargo-codspeed/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,26 @@ Options:
-V, --version Print version information
```

### Running benchmarks with details

Use the `--details` flag to see timing information for each benchmark:

```bash
cargo codspeed run --details
```

This will show execution times for each benchmark:
```
Checked: benches/example.rs::fibonacci (5.6 us)
Checked: benches/example.rs::factorial (368 ns)
```

The output also shows the total number of benchmarks executed:
```
Done running benchmark_suite (5 benchmarks)
Finished running 2 benchmark suite(s) (10 benchmarks total)
```

## Development

### Troubleshooting
Expand Down
6 changes: 6 additions & 0 deletions crates/cargo-codspeed/src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,10 @@ enum Commands {

#[command(flatten)]
bench_target_filters: BenchTargetFilters,

/// Print per-benchmark duration details
#[arg(long)]
details: bool,
},
}

Expand Down Expand Up @@ -141,12 +145,14 @@ pub fn run(args: impl Iterator<Item = OsString>) -> Result<()> {
benchname,
package_filters,
bench_target_filters,
details,
} => run_benches(
&metadata,
benchname,
package_filters,
bench_target_filters,
measurement_mode,
details,
),
};

Expand Down
101 changes: 73 additions & 28 deletions crates/cargo-codspeed/src/run.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ use codspeed::walltime_results::WalltimeResults;
use std::{
io::{self, Write},
path::{Path, PathBuf},
process::Stdio,
};

#[cfg(unix)]
Expand Down Expand Up @@ -99,6 +100,7 @@ pub fn run_benches(
package_filters: PackageFilters,
bench_target_filters: BenchTargetFilters,
measurement_mode: MeasurementMode,
show_details: bool,
) -> Result<()> {
let codspeed_target_dir = get_codspeed_target_dir(metadata, measurement_mode);
let workspace_root = metadata.workspace_root.as_std_path();
Expand All @@ -113,16 +115,29 @@ pub fn run_benches(

eprintln!("Collected {} benchmark suite(s) to run", benches.len());

let mut total_benchmark_count = 0;

for bench in benches.iter() {
let bench_target_name = &bench.bench_target_name;
// workspace_root is needed since file! returns the path relatively to the workspace root
// while CARGO_MANIFEST_DIR returns the path to the sub package
let workspace_root = metadata.workspace_root.clone();
eprintln!("Running {} {}", &bench.package_name, bench_target_name);
eprintln!(
"Running {} {} ({})",
&bench.package_name,
bench_target_name,
bench.bench_path.display()
);
let mut command = std::process::Command::new(&bench.bench_path);
command
.env("CODSPEED_CARGO_WORKSPACE_ROOT", workspace_root)
.current_dir(&bench.working_directory);
.current_dir(&bench.working_directory)
.stdout(Stdio::piped())
.stderr(Stdio::inherit());

if show_details {
command.env("CODSPEED_SHOW_DETAILS", "1");
}

if measurement_mode == MeasurementMode::Walltime {
command.arg("--bench"); // Walltime targets need this additional argument (inherited from running them with `cargo bench`)
Expand All @@ -132,33 +147,63 @@ pub fn run_benches(
command.arg(bench_name_filter);
}

command
.status()
.map_err(|e| anyhow!("failed to execute the benchmark process: {}", e))
.and_then(|status| {
if status.success() {
Ok(())
} else {
#[cfg(unix)]
{
let code = status
.code()
.or(status.signal().map(|s| 128 + s)) // 128+N indicates that a command was interrupted by signal N (see: https://tldp.org/LDP/abs/html/exitcodes.html)
.unwrap_or(1);

eprintln!("failed to execute the benchmark process, exit code: {code}");

std::process::exit(code);
}
#[cfg(not(unix))]
{
bail!("failed to execute the benchmark process: {}", status)
}
}
})?;
eprintln!("Done running {bench_target_name}");
let output = command
.output()
.map_err(|e| anyhow!("failed to execute the benchmark process: {}", e))?;

// Count benchmarks by looking for "Measured:" or "Checked:" lines
let stdout = String::from_utf8_lossy(&output.stdout);
let benchmark_count = stdout
.lines()
.filter(|line| {
line.trim_start().starts_with("Measured:")
|| line.trim_start().starts_with("Checked:")
|| (show_details && line.trim_start().starts_with(" Checked:"))
|| (show_details && line.trim_start().starts_with(" Measured:"))
})
.count();
total_benchmark_count += benchmark_count;

// Print captured output
print!("{stdout}");
io::stdout().flush().unwrap();

if !output.status.success() {
#[cfg(unix)]
{
let code = output
.status
.code()
.or(output.status.signal().map(|s| 128 + s)) // 128+N indicates that a command was interrupted by signal N (see: https://tldp.org/LDP/abs/html/exitcodes.html)
.unwrap_or(1);

eprintln!("failed to execute the benchmark process, exit code: {code}");

std::process::exit(code);
}
#[cfg(not(unix))]
{
bail!("failed to execute the benchmark process: {}", output.status)
}
}

if benchmark_count == 0 && !stdout.is_empty() {
eprintln!("Warning: No benchmarks detected in output for {bench_target_name}");
}
if show_details {
eprintln!("Done running {bench_target_name} ({benchmark_count} benchmarks)");
} else {
eprintln!("Done running {bench_target_name}");
}
}
if show_details {
eprintln!(
"Finished running {} benchmark suite(s) ({total_benchmark_count} benchmarks total)",
benches.len()
);
} else {
eprintln!("Finished running {} benchmark suite(s)", benches.len());
}
eprintln!("Finished running {} benchmark suite(s)", benches.len());

if measurement_mode == MeasurementMode::Walltime {
aggregate_raw_walltime_data(workspace_root)?;
Expand Down
76 changes: 76 additions & 0 deletions crates/cargo-codspeed/tests/simple-bencher.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use assert_cmd::assert::OutputAssertExt;
use predicates::prelude::*;
use predicates::str::contains;

mod helpers;
Expand Down Expand Up @@ -82,3 +83,78 @@ fn test_simple_cargo_bench_no_run() {
.success();
teardown(dir);
}

#[test]
fn test_simple_run_without_details() {
let dir = setup(DIR, Project::Simple);
cargo_codspeed(&dir).arg("build").assert().success();
cargo_codspeed(&dir)
.arg("run")
.assert()
.success()
.stderr(contains("Finished running 2 benchmark suite(s)"))
.stderr(predicates::str::contains("benchmarks total").not())
.stdout(
predicates::str::is_match(r" Checked: .* \([0-9]+(\.[0-9]+)? (ns|us|ms|s)\)")
.unwrap()
.not(),
);
teardown(dir);
}

#[test]
fn test_simple_run_with_details() {
let dir = setup(DIR, Project::Simple);
cargo_codspeed(&dir).arg("build").assert().success();
cargo_codspeed(&dir)
.arg("run")
.arg("--details")
.assert()
.success()
.stderr(contains("benchmarks total"))
.stderr(contains("Done running"))
.stdout(
predicates::str::is_match(r" Checked: .* \([0-9]+(\.[0-9]+)? (ns|us|ms|s)\)").unwrap(),
);
teardown(dir);
}

#[test]
fn test_benchmark_counting_with_details() {
let dir = setup(DIR, Project::Simple);
cargo_codspeed(&dir).arg("build").assert().success();
cargo_codspeed(&dir)
.arg("run")
.arg("--details")
.assert()
.success()
.stderr(contains("Done running bencher_example (2 benchmarks)"))
.stderr(contains(
"Done running another_bencher_example (2 benchmarks)",
))
.stderr(contains(
"Finished running 2 benchmark suite(s) (4 benchmarks total)",
));
teardown(dir);
}

#[test]
fn test_single_benchmark_counting_with_details() {
let dir = setup(DIR, Project::Simple);
cargo_codspeed(&dir)
.arg("build")
.args(["--bench", "bencher_example"])
.assert()
.success();
cargo_codspeed(&dir)
.arg("run")
.arg("--details")
.args(["--bench", "bencher_example"])
.assert()
.success()
.stderr(contains("Done running bencher_example (2 benchmarks)"))
.stderr(contains(
"Finished running 1 benchmark suite(s) (2 benchmarks total)",
));
teardown(dir);
}
36 changes: 36 additions & 0 deletions crates/cargo-codspeed/tests/simple-criterion.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
use assert_cmd::assert::OutputAssertExt;
use predicates::prelude::*;
use predicates::str::contains;

mod helpers;
Expand Down Expand Up @@ -108,3 +109,38 @@ fn test_criterion_cargo_bench_no_run() {
.success();
teardown(dir);
}

#[test]
fn test_criterion_run_without_details() {
let dir = setup(DIR, Project::Simple);
cargo_codspeed(&dir).arg("build").assert().success();
cargo_codspeed(&dir)
.arg("run")
.assert()
.success()
.stderr(contains("Finished running 2 benchmark suite(s)"))
.stderr(predicates::str::contains("benchmarks total").not())
.stdout(
predicates::str::is_match(r" Checked: .* \([0-9]+(\.[0-9]+)? (ns|us|ms|s)\)")
.unwrap()
.not(),
);
teardown(dir);
}

#[test]
fn test_criterion_run_with_details() {
let dir = setup(DIR, Project::Simple);
cargo_codspeed(&dir).arg("build").assert().success();
cargo_codspeed(&dir)
.arg("run")
.arg("--details")
.assert()
.success()
.stderr(contains("benchmarks total"))
.stderr(contains("Done running"))
.stdout(
predicates::str::is_match(r" Checked: .* \([0-9]+(\.[0-9]+)? (ns|us|ms|s)\)").unwrap(),
);
teardown(dir);
}
Loading