diff --git a/crates/cargo-codspeed/README.md b/crates/cargo-codspeed/README.md index 070e1b5b..3c1b0023 100644 --- a/crates/cargo-codspeed/README.md +++ b/crates/cargo-codspeed/README.md @@ -48,6 +48,26 @@ Options: -V, --version Print version information ``` +### Running benchmarks with details + +Use the `--details` flag to see timing information for each benchmark: + +```bash +cargo codspeed run --details +``` + +This will show execution times for each benchmark: +``` + Checked: benches/example.rs::fibonacci (5.6 us) + Checked: benches/example.rs::factorial (368 ns) +``` + +The output also shows the total number of benchmarks executed: +``` +Done running benchmark_suite (5 benchmarks) +Finished running 2 benchmark suite(s) (10 benchmarks total) +``` + ## Development ### Troubleshooting diff --git a/crates/cargo-codspeed/src/app.rs b/crates/cargo-codspeed/src/app.rs index a1786bfd..51191f77 100644 --- a/crates/cargo-codspeed/src/app.rs +++ b/crates/cargo-codspeed/src/app.rs @@ -85,6 +85,10 @@ enum Commands { #[command(flatten)] bench_target_filters: BenchTargetFilters, + + /// Print per-benchmark duration details + #[arg(long)] + details: bool, }, } @@ -141,12 +145,14 @@ pub fn run(args: impl Iterator) -> Result<()> { benchname, package_filters, bench_target_filters, + details, } => run_benches( &metadata, benchname, package_filters, bench_target_filters, measurement_mode, + details, ), }; diff --git a/crates/cargo-codspeed/src/run.rs b/crates/cargo-codspeed/src/run.rs index aaa4914b..993af031 100644 --- a/crates/cargo-codspeed/src/run.rs +++ b/crates/cargo-codspeed/src/run.rs @@ -10,6 +10,7 @@ use codspeed::walltime_results::WalltimeResults; use std::{ io::{self, Write}, path::{Path, PathBuf}, + process::Stdio, }; #[cfg(unix)] @@ -99,6 +100,7 @@ pub fn run_benches( package_filters: PackageFilters, bench_target_filters: BenchTargetFilters, measurement_mode: MeasurementMode, + show_details: bool, ) -> Result<()> { let codspeed_target_dir = get_codspeed_target_dir(metadata, measurement_mode); let workspace_root = metadata.workspace_root.as_std_path(); @@ -113,16 +115,29 @@ pub fn run_benches( eprintln!("Collected {} benchmark suite(s) to run", benches.len()); + let mut total_benchmark_count = 0; + for bench in benches.iter() { let bench_target_name = &bench.bench_target_name; // workspace_root is needed since file! returns the path relatively to the workspace root // while CARGO_MANIFEST_DIR returns the path to the sub package let workspace_root = metadata.workspace_root.clone(); - eprintln!("Running {} {}", &bench.package_name, bench_target_name); + eprintln!( + "Running {} {} ({})", + &bench.package_name, + bench_target_name, + bench.bench_path.display() + ); let mut command = std::process::Command::new(&bench.bench_path); command .env("CODSPEED_CARGO_WORKSPACE_ROOT", workspace_root) - .current_dir(&bench.working_directory); + .current_dir(&bench.working_directory) + .stdout(Stdio::piped()) + .stderr(Stdio::inherit()); + + if show_details { + command.env("CODSPEED_SHOW_DETAILS", "1"); + } if measurement_mode == MeasurementMode::Walltime { command.arg("--bench"); // Walltime targets need this additional argument (inherited from running them with `cargo bench`) @@ -132,33 +147,63 @@ pub fn run_benches( command.arg(bench_name_filter); } - command - .status() - .map_err(|e| anyhow!("failed to execute the benchmark process: {}", e)) - .and_then(|status| { - if status.success() { - Ok(()) - } else { - #[cfg(unix)] - { - let code = status - .code() - .or(status.signal().map(|s| 128 + s)) // 128+N indicates that a command was interrupted by signal N (see: https://tldp.org/LDP/abs/html/exitcodes.html) - .unwrap_or(1); - - eprintln!("failed to execute the benchmark process, exit code: {code}"); - - std::process::exit(code); - } - #[cfg(not(unix))] - { - bail!("failed to execute the benchmark process: {}", status) - } - } - })?; - eprintln!("Done running {bench_target_name}"); + let output = command + .output() + .map_err(|e| anyhow!("failed to execute the benchmark process: {}", e))?; + + // Count benchmarks by looking for "Measured:" or "Checked:" lines + let stdout = String::from_utf8_lossy(&output.stdout); + let benchmark_count = stdout + .lines() + .filter(|line| { + line.trim_start().starts_with("Measured:") + || line.trim_start().starts_with("Checked:") + || (show_details && line.trim_start().starts_with(" Checked:")) + || (show_details && line.trim_start().starts_with(" Measured:")) + }) + .count(); + total_benchmark_count += benchmark_count; + + // Print captured output + print!("{stdout}"); + io::stdout().flush().unwrap(); + + if !output.status.success() { + #[cfg(unix)] + { + let code = output + .status + .code() + .or(output.status.signal().map(|s| 128 + s)) // 128+N indicates that a command was interrupted by signal N (see: https://tldp.org/LDP/abs/html/exitcodes.html) + .unwrap_or(1); + + eprintln!("failed to execute the benchmark process, exit code: {code}"); + + std::process::exit(code); + } + #[cfg(not(unix))] + { + bail!("failed to execute the benchmark process: {}", output.status) + } + } + + if benchmark_count == 0 && !stdout.is_empty() { + eprintln!("Warning: No benchmarks detected in output for {bench_target_name}"); + } + if show_details { + eprintln!("Done running {bench_target_name} ({benchmark_count} benchmarks)"); + } else { + eprintln!("Done running {bench_target_name}"); + } + } + if show_details { + eprintln!( + "Finished running {} benchmark suite(s) ({total_benchmark_count} benchmarks total)", + benches.len() + ); + } else { + eprintln!("Finished running {} benchmark suite(s)", benches.len()); } - eprintln!("Finished running {} benchmark suite(s)", benches.len()); if measurement_mode == MeasurementMode::Walltime { aggregate_raw_walltime_data(workspace_root)?; diff --git a/crates/cargo-codspeed/tests/simple-bencher.rs b/crates/cargo-codspeed/tests/simple-bencher.rs index e193c61e..3b4869aa 100644 --- a/crates/cargo-codspeed/tests/simple-bencher.rs +++ b/crates/cargo-codspeed/tests/simple-bencher.rs @@ -1,4 +1,5 @@ use assert_cmd::assert::OutputAssertExt; +use predicates::prelude::*; use predicates::str::contains; mod helpers; @@ -82,3 +83,78 @@ fn test_simple_cargo_bench_no_run() { .success(); teardown(dir); } + +#[test] +fn test_simple_run_without_details() { + let dir = setup(DIR, Project::Simple); + cargo_codspeed(&dir).arg("build").assert().success(); + cargo_codspeed(&dir) + .arg("run") + .assert() + .success() + .stderr(contains("Finished running 2 benchmark suite(s)")) + .stderr(predicates::str::contains("benchmarks total").not()) + .stdout( + predicates::str::is_match(r" Checked: .* \([0-9]+(\.[0-9]+)? (ns|us|ms|s)\)") + .unwrap() + .not(), + ); + teardown(dir); +} + +#[test] +fn test_simple_run_with_details() { + let dir = setup(DIR, Project::Simple); + cargo_codspeed(&dir).arg("build").assert().success(); + cargo_codspeed(&dir) + .arg("run") + .arg("--details") + .assert() + .success() + .stderr(contains("benchmarks total")) + .stderr(contains("Done running")) + .stdout( + predicates::str::is_match(r" Checked: .* \([0-9]+(\.[0-9]+)? (ns|us|ms|s)\)").unwrap(), + ); + teardown(dir); +} + +#[test] +fn test_benchmark_counting_with_details() { + let dir = setup(DIR, Project::Simple); + cargo_codspeed(&dir).arg("build").assert().success(); + cargo_codspeed(&dir) + .arg("run") + .arg("--details") + .assert() + .success() + .stderr(contains("Done running bencher_example (2 benchmarks)")) + .stderr(contains( + "Done running another_bencher_example (2 benchmarks)", + )) + .stderr(contains( + "Finished running 2 benchmark suite(s) (4 benchmarks total)", + )); + teardown(dir); +} + +#[test] +fn test_single_benchmark_counting_with_details() { + let dir = setup(DIR, Project::Simple); + cargo_codspeed(&dir) + .arg("build") + .args(["--bench", "bencher_example"]) + .assert() + .success(); + cargo_codspeed(&dir) + .arg("run") + .arg("--details") + .args(["--bench", "bencher_example"]) + .assert() + .success() + .stderr(contains("Done running bencher_example (2 benchmarks)")) + .stderr(contains( + "Finished running 1 benchmark suite(s) (2 benchmarks total)", + )); + teardown(dir); +} diff --git a/crates/cargo-codspeed/tests/simple-criterion.rs b/crates/cargo-codspeed/tests/simple-criterion.rs index dc4b7dd7..b5df559c 100644 --- a/crates/cargo-codspeed/tests/simple-criterion.rs +++ b/crates/cargo-codspeed/tests/simple-criterion.rs @@ -1,4 +1,5 @@ use assert_cmd::assert::OutputAssertExt; +use predicates::prelude::*; use predicates::str::contains; mod helpers; @@ -108,3 +109,38 @@ fn test_criterion_cargo_bench_no_run() { .success(); teardown(dir); } + +#[test] +fn test_criterion_run_without_details() { + let dir = setup(DIR, Project::Simple); + cargo_codspeed(&dir).arg("build").assert().success(); + cargo_codspeed(&dir) + .arg("run") + .assert() + .success() + .stderr(contains("Finished running 2 benchmark suite(s)")) + .stderr(predicates::str::contains("benchmarks total").not()) + .stdout( + predicates::str::is_match(r" Checked: .* \([0-9]+(\.[0-9]+)? (ns|us|ms|s)\)") + .unwrap() + .not(), + ); + teardown(dir); +} + +#[test] +fn test_criterion_run_with_details() { + let dir = setup(DIR, Project::Simple); + cargo_codspeed(&dir).arg("build").assert().success(); + cargo_codspeed(&dir) + .arg("run") + .arg("--details") + .assert() + .success() + .stderr(contains("benchmarks total")) + .stderr(contains("Done running")) + .stdout( + predicates::str::is_match(r" Checked: .* \([0-9]+(\.[0-9]+)? (ns|us|ms|s)\)").unwrap(), + ); + teardown(dir); +} diff --git a/crates/codspeed/src/codspeed.rs b/crates/codspeed/src/codspeed.rs index 937036be..0896881b 100644 --- a/crates/codspeed/src/codspeed.rs +++ b/crates/codspeed/src/codspeed.rs @@ -1,6 +1,7 @@ -use crate::measurement; +use crate::{measurement, utils}; use colored::Colorize; use std::ffi::CString; +use std::time::Instant; pub use std::hint::black_box; @@ -15,11 +16,14 @@ pub struct CodSpeed { current_benchmark: CString, group_stack: Vec, is_instrumented: bool, + start_time: Option, + show_details: bool, } impl CodSpeed { pub fn new() -> Self { let is_instrumented = measurement::is_instrumented(); + let show_details = utils::show_details(); if !is_instrumented { println!( "{} codspeed is enabled, but no performance measurement will be made since it's running in an unknown environment.", @@ -32,6 +36,8 @@ impl CodSpeed { current_benchmark: CString::new("").expect("CString::new failed"), group_stack: Vec::new(), is_instrumented, + start_time: None, + show_details, } } @@ -47,6 +53,11 @@ impl CodSpeed { pub fn start_benchmark(&mut self, name: &str) { self.current_benchmark = CString::new(name).expect("CString::new failed"); measurement::start(); + if self.show_details && !self.is_instrumented { + self.start_time = Some(Instant::now()); + } else { + self.start_time = None; + } } #[inline(always)] @@ -54,22 +65,44 @@ impl CodSpeed { measurement::stop(&self.current_benchmark); self.benchmarked .push(self.current_benchmark.to_str().unwrap().to_string()); - let action_str = if self.is_instrumented { - "Measured" - } else { - "Checked" - }; - let group_str = if self.group_stack.is_empty() { - "".to_string() + if self.show_details { + if !self.is_instrumented { + let elapsed = self + .start_time + .take() + .map(|start| start.elapsed()) + .unwrap_or_default(); + let group_str = if self.group_stack.is_empty() { + "".to_string() + } else { + format!(" (group: {})", self.group_stack.join("/")) + }; + let formatted_duration = crate::utils::format_duration_nanos(elapsed.as_nanos()); + println!( + " Checked: {}{} ({})", + self.current_benchmark.to_string_lossy(), + group_str, + formatted_duration + ); + } } else { - format!(" (group: {})", self.group_stack.join("/")) - }; - println!( - "{}: {}{}", - action_str, - self.current_benchmark.to_string_lossy(), - group_str - ); + let action_str = if self.is_instrumented { + "Measured" + } else { + "Checked" + }; + let group_str = if self.group_stack.is_empty() { + "".to_string() + } else { + format!(" (group: {})", self.group_stack.join("/")) + }; + println!( + "{}: {}{}", + action_str, + self.current_benchmark.to_string_lossy(), + group_str + ); + } } } diff --git a/crates/codspeed/src/utils.rs b/crates/codspeed/src/utils.rs index ffbfb6d3..36f6351e 100644 --- a/crates/codspeed/src/utils.rs +++ b/crates/codspeed/src/utils.rs @@ -46,6 +46,32 @@ pub fn is_perf_enabled() -> bool { std::env::var("CODSPEED_PERF_ENABLED").is_ok() } +pub fn show_details() -> bool { + std::env::var("CODSPEED_SHOW_DETAILS").is_ok() +} + +/// Format a duration value (as f64 nanoseconds) into a human-readable string with appropriate units +pub fn format_duration_nanos_f64(value: f64) -> String { + if value <= 0.0 { + return "<1 ns".to_string(); + } + + if value < 1_000.0 { + format!("{value:.0} ns") + } else if value < 1_000_000.0 { + format!("{:.1} us", value / 1_000.0) + } else if value < 1_000_000_000.0 { + format!("{:.1} ms", value / 1_000_000.0) + } else { + format!("{:.2} s", value / 1_000_000_000.0) + } +} + +/// Format a duration value into a human-readable string with appropriate units +pub fn format_duration_nanos(nanos: u128) -> String { + format_duration_nanos_f64(nanos as f64) +} + #[cfg(test)] mod tests { use super::*; @@ -94,4 +120,65 @@ mod tests { let expected_output = "std::vec::Vec::new".to_string(); assert_eq!(get_formated_function_path(input), expected_output); } + + #[test] + fn test_format_duration_nanos_handles_zero() { + assert_eq!(format_duration_nanos(0), "<1 ns"); + } + + #[test] + fn test_format_duration_nanos_scales_to_microseconds() { + assert_eq!(format_duration_nanos(1_500), "1.5 us"); + } + + #[test] + fn test_format_duration_nanos_scales_to_milliseconds() { + assert_eq!(format_duration_nanos(2_345_000), "2.3 ms"); + } + + #[test] + fn test_format_duration_nanos_scales_to_seconds() { + assert_eq!(format_duration_nanos(2_500_000_000), "2.50 s"); + } + + #[test] + fn test_format_duration_nanos_f64_handles_zero() { + assert_eq!(format_duration_nanos_f64(0.0), "<1 ns"); + } + + #[test] + fn test_format_duration_nanos_f64_scales_to_microseconds() { + assert_eq!(format_duration_nanos_f64(1_500.0), "1.5 us"); + } + + #[test] + fn test_format_duration_nanos_f64_scales_to_milliseconds() { + assert_eq!(format_duration_nanos_f64(2_345_000.0), "2.3 ms"); + } + + #[test] + fn test_format_duration_nanos_f64_scales_to_seconds() { + assert_eq!(format_duration_nanos_f64(2_500_000_000.0), "2.50 s"); + } + + // Additional format_duration_nanos_f64 tests (moved from walltime_results.rs) + #[test] + fn test_format_duration_nanos_f64_formats_small_values() { + assert_eq!(format_duration_nanos_f64(420.0), "420 ns"); + } + + #[test] + fn test_format_duration_nanos_f64_formats_microseconds() { + assert_eq!(format_duration_nanos_f64(12_500.0), "12.5 us"); + } + + #[test] + fn test_format_duration_nanos_f64_formats_milliseconds() { + assert_eq!(format_duration_nanos_f64(3_250_000.0), "3.2 ms"); + } + + #[test] + fn test_format_duration_nanos_f64_formats_seconds() { + assert_eq!(format_duration_nanos_f64(1_750_000_000.0), "1.75 s"); + } } diff --git a/crates/codspeed/src/walltime_results.rs b/crates/codspeed/src/walltime_results.rs index b7f0c491..5eb8ecff 100644 --- a/crates/codspeed/src/walltime_results.rs +++ b/crates/codspeed/src/walltime_results.rs @@ -66,6 +66,15 @@ impl WalltimeBenchmark { /// - `times_per_round_ns`: The measured time for each round in nanoseconds, e.g. `[1000, 2000, 3000]` /// - `max_time_ns`: The time limit for the benchmark in nanoseconds (if defined) /// + /// # Example Output + /// + /// ```text + /// Measured: core/benches/io.rs::benches::read (3.2 ms) + /// ``` + /// + /// The message above is only emitted when `CODSPEED_SHOW_DETAILS=1` + /// (e.g. via `cargo codspeed run --details`). + /// /// # Pseudo-code /// /// ```text @@ -103,6 +112,13 @@ impl WalltimeBenchmark { times_per_round_ns, max_time_ns, ); + if crate::utils::show_details() { + println!( + " Measured: {} ({})", + data.uri(), + crate::utils::format_duration_nanos_f64(data.mean_per_iter_ns()) + ); + } data.dump_to_results(&workspace_root, scope); } @@ -205,6 +221,14 @@ impl WalltimeBenchmark { pub fn name(&self) -> &str { &self.metadata.name } + + pub fn mean_per_iter_ns(&self) -> f64 { + self.stats.mean_ns + } + + pub fn uri(&self) -> &str { + &self.metadata.uri + } } #[derive(Debug, Serialize, Deserialize)]