Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,4 @@ Additional documentation on running and setting up the frontend and backend can
be found in the `README` files in the `collector` and `site` directories.

Additional documentation on the benchmark programs can be found in the `README`
file in the `collector/benchmarks` directory.
file in the `collector/compile-benchmarks` and `collector/runtime-benchmarks` directories.
1 change: 1 addition & 0 deletions collector/benchlib/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ serde_json = "1.0.83"
log = "0.4.17"
env_logger = "0.9.0"
clap = { version = "3.2", features = ["derive"] }
libc = "0.2"

[target.'cfg(unix)'.dependencies]
perf-event = "0.4.7"
53 changes: 36 additions & 17 deletions collector/benchlib/src/benchmark.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use crate::cli::{parse_cli, Args, BenchmarkArgs};
use crate::measure::benchmark_function;
use crate::messages::BenchmarkResult;
use crate::process::raise_process_priority;
use log::LevelFilter;
use std::collections::HashMap;

Expand All @@ -14,9 +15,16 @@ pub fn benchmark_suite<F: FnOnce(&mut BenchmarkSuite)>(define_func: F) {
suite.run().expect("Benchmark suite has failed");
}

/// Type-erased function that performs a benchmark.
struct BenchmarkWrapper {
func: Box<dyn Fn() -> anyhow::Result<BenchmarkResult>>,
}

type BenchmarkMap = HashMap<&'static str, BenchmarkWrapper>;

#[derive(Default)]
pub struct BenchmarkSuite {
benchmarks: HashMap<&'static str, BenchmarkWrapper>,
benchmarks: BenchmarkMap,
}

impl BenchmarkSuite {
Expand Down Expand Up @@ -48,36 +56,47 @@ impl BenchmarkSuite {
/// Execute the benchmark suite. It will parse CLI arguments and decide what to do based on
/// them.
pub fn run(self) -> anyhow::Result<()> {
raise_process_priority();

let args = parse_cli()?;
match args {
Args::Benchmark(args) => {
self.run_benchmark(args)?;
run_benchmark(args, self.benchmarks)?;
}
}

Ok(())
}
}

fn run_benchmark(self, args: BenchmarkArgs) -> anyhow::Result<()> {
let mut items: Vec<_> = self.benchmarks.into_iter().collect();
items.sort_unstable_by_key(|item| item.0);
fn run_benchmark(args: BenchmarkArgs, benchmarks: BenchmarkMap) -> anyhow::Result<()> {
let mut items: Vec<(&'static str, BenchmarkWrapper)> = benchmarks
.into_iter()
.filter(|(name, _)| passes_filter(name, args.exclude.as_deref(), args.include.as_deref()))
.collect();
items.sort_unstable_by_key(|item| item.0);

let mut results: Vec<BenchmarkResult> = Vec::with_capacity(items.len());
for (name, def) in items {
for i in 0..args.iterations {
let result = (def.func)()?;
log::info!("Benchmark (run {i}) `{}` completed: {:?}", name, result);
results.push(result);
}
let mut results: Vec<BenchmarkResult> = Vec::with_capacity(items.len());
for (name, def) in items {
for i in 0..args.iterations {
let result = (def.func)()?;
log::info!("Benchmark (run {i}) `{name}` completed: {result:?}");
results.push(result);
}

println!("{}", serde_json::to_string(&results)?);
Ok(())
}

println!("{}", serde_json::to_string(&results)?);
Ok(())
}

struct BenchmarkWrapper {
func: Box<dyn Fn() -> anyhow::Result<BenchmarkResult>>,
/// Tests if the name of the benchmark passes through the include and exclude filter flags.
fn passes_filter(name: &str, exclude: Option<&str>, include: Option<&str>) -> bool {
match (exclude, include) {
(Some(exclude), Some(include)) => name.starts_with(include) && !name.starts_with(exclude),
(None, Some(include)) => name.starts_with(include),
(Some(exclude), None) => !name.starts_with(&exclude),
(None, None) => true,
}
}

/// Copied from `iai`, so that we don't have to use unstable features.
Expand Down
8 changes: 8 additions & 0 deletions collector/benchlib/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,14 @@ pub struct BenchmarkArgs {
/// How many times should each benchmark be repeated.
#[clap(long, default_value = "5")]
pub iterations: u32,

/// Exclude all benchmarks matching a prefix in this comma-separated list
#[clap(long)]
pub exclude: Option<String>,

/// Include only benchmarks matching a prefix in this comma-separated list
#[clap(long)]
pub include: Option<String>,
}

pub fn parse_cli() -> anyhow::Result<Args> {
Expand Down
6 changes: 4 additions & 2 deletions collector/benchlib/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
//! This library defines an API for performing benchmarks of Rust code. It is tailored for the
//! use-case of `rustc-perf`, that's why we don't use e.g. `criterion` or `iai`.
//! This library defines an API for performing benchmarks of Rust code and various other utilities
//! for measuring and benchmarking. It is tailored for the use-case of `rustc-perf`, that's why we
//! don't use e.g. `criterion` or `iai`.
//!
//! We want to be able to define short benchmarks in code, measure specific perf. counters and most
//! importantly, consume the benchmark results in a programmatic way.
Expand All @@ -16,3 +17,4 @@ pub mod benchmark;
mod cli;
pub mod measure;
pub mod messages;
pub mod process;
16 changes: 16 additions & 0 deletions collector/benchlib/src/process.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#[cfg(unix)]
pub fn raise_process_priority() {
unsafe {
// Try to reduce jitter in wall time by increasing our priority to the
// maximum
for i in (1..21).rev() {
let r = libc::setpriority(libc::PRIO_PROCESS as _, libc::getpid() as libc::id_t, -i);
if r == 0 {
break;
}
}
}
}

#[cfg(windows)]
pub fn raise_process_priority() {}
10 changes: 5 additions & 5 deletions collector/compile-benchmarks/README.md
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
# The Benchmark Suite
# The Compile-time Benchmark Suite

This file describes the programs in the benchmark suite and explains why they
This file describes the programs in the compile-time benchmark suite and explains why they
were included.

The suite changes over time. Sometimes the code for a benchmark is updated, in
which case a small suffix will be added (starting with "-2", then "-3", and so
on.)

There are three categories of benchmarks, **Primary**, **Secondary**, and
There are three categories of compile-time benchmarks, **Primary**, **Secondary**, and
**Stable**.

## Primary
Expand Down Expand Up @@ -198,7 +198,7 @@ Rust code being written today.
applies correctly, e.g. `target/release/collector bench_local +nightly
--id Test --profiles=Check --scenarios=IncrPatched
--include=$NEW_BENCHMARK`
- Add the new entry to `collector/benchmarks/README.md`.
- Add the new entry to `collector/compile-benchmarks/README.md`.
- `git add` the `Cargo.lock` file, if it's not already part of the
benchmark's committed code.
- If the benchmark has a `.gitignore` file that contains `Cargo.lock`,
Expand Down Expand Up @@ -232,7 +232,7 @@ Rust code being written today.
- In the first commit just remove the old code.
- Do this with `git rm -r` on the directory.
- In the second commit do everything else.
- Remove the entry from `collector/benchmarks/README.md`.
- Remove the entry from `collector/compile-benchmarks/README.md`.
- `git grep` for occurrences of the old benchmark name (e.g. in
`.github/workflows/ci.yml` or `ci/check-*.sh`) and see if anything needs
changing... usually not.
Expand Down
1 change: 1 addition & 0 deletions collector/runtime-benchmarks/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

9 changes: 9 additions & 0 deletions collector/runtime-benchmarks/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# The Runtime Benchmark Suite
This directory contains various pieces of code for which we measure how fast do they execute
when they are compiled with a specific version of `rustc`.

The benchmarks are located in crates that are part of the `runtime-benchmarks` workspace. Each crate
contains a set of benchmarks defined using named closures.

Benchmarks are divided into sub-crates so that some benchmarks can use various versions of dependency
crates and also so that they are grouped together by a relevant area (e.g. hashmap benchmarks).
1 change: 1 addition & 0 deletions collector/runtime-benchmarks/hashmap/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ use benchlib::benchmark::BenchmarkSuite;
fn main() {
let mut suite = BenchmarkSuite::new();

/// Measures how long does it take to insert 10 thousand numbers into a `hashbrown` hashmap.
suite.register("hashmap-insert-10k", || {
let mut map =
hashbrown::HashMap::with_capacity_and_hasher(10000, fxhash::FxBuildHasher::default());
Expand Down
24 changes: 22 additions & 2 deletions collector/src/bin/collector.rs
Original file line number Diff line number Diff line change
Expand Up @@ -562,10 +562,19 @@ struct BenchRustcOption {
enum Commands {
/// Benchmarks the performance of programs generated by a local rustc
BenchRuntimeLocal {
/// The path to the local rustc to measure
rustc: String,
/// Identifier to associate benchmark results with
#[clap(long)]
id: Option<String>,

/// Exclude all benchmarks matching a prefix in this comma-separated list
#[clap(long)]
exclude: Option<String>,

/// Include only benchmarks matching a prefix in this comma-separated list
#[clap(long)]
include: Option<String>,
},
/// Benchmarks a local rustc
BenchLocal {
Expand Down Expand Up @@ -692,8 +701,19 @@ fn main_result() -> anyhow::Result<i32> {
let target_triple = format!("{}-unknown-linux-gnu", std::env::consts::ARCH);

match args.command {
Commands::BenchRuntimeLocal { rustc, id } => {
bench_runtime(&rustc, id.as_deref(), runtime_benchmark_dir)?;
Commands::BenchRuntimeLocal {
rustc,
id,
exclude,
include,
} => {
bench_runtime(
&rustc,
id.as_deref(),
exclude,
include,
runtime_benchmark_dir,
)?;
Ok(0)
}
Commands::BenchLocal {
Expand Down
19 changes: 1 addition & 18 deletions collector/src/bin/rustc-fake.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ fn main() {
let wrapper = args.remove(pos);
let wrapper = wrapper.to_str().unwrap();

raise_priority();
benchlib::process::raise_process_priority();

// These strings come from `PerfTool::name()`.
match wrapper {
Expand Down Expand Up @@ -490,20 +490,6 @@ fn exec(cmd: &mut Command) -> ! {
panic!("failed to exec `{}`: {}", cmd_d, error);
}

#[cfg(unix)]
fn raise_priority() {
unsafe {
// Try to reduce jitter in wall time by increasing our priority to the
// maximum
for i in (1..21).rev() {
let r = libc::setpriority(libc::PRIO_PROCESS as _, libc::getpid() as libc::id_t, -i);
if r == 0 {
break;
}
}
}
}

#[cfg(unix)]
fn print_memory() {
use std::mem;
Expand Down Expand Up @@ -555,8 +541,5 @@ fn run_summarize(name: &str, prof_out_dir: &Path, prefix: &str) -> anyhow::Resul
fs::read_to_string(&json).with_context(|| format!("failed to read {:?}", json))
}

#[cfg(windows)]
fn raise_priority() {}

#[cfg(windows)]
fn print_memory() {}
68 changes: 53 additions & 15 deletions collector/src/runtime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,33 +10,71 @@ struct BenchmarkBinary {
path: PathBuf,
}

pub fn bench_runtime(rustc: &str, id: Option<&str>, benchmark_dir: PathBuf) -> anyhow::Result<()> {
/// Perform a series of runtime benchmarks using the provided `rustc` compiler.
/// The runtime benchmarks are looked up in `benchmark_dir`, which is expected to be a path
/// to a Cargo crate. All binaries built by that crate will are expected to be runtime benchmark
/// suites that leverage `benchlib`.
pub fn bench_runtime(
rustc: &str,
id: Option<&str>,
exclude: Option<String>,
include: Option<String>,
benchmark_dir: PathBuf,
) -> anyhow::Result<()> {
let toolchain = get_local_toolchain(&[Profile::Opt], rustc, None, None, id, "")?;
let output = compile_runtime_benchmarks(&toolchain, &benchmark_dir)?;
let binaries = gather_binaries(&output)?;

for binary in binaries {
let name = binary.path.file_name().and_then(|s| s.to_str()).unwrap();

let result = Command::new(&binary.path).arg("benchmark").output()?;
if !result.status.success() {
anyhow::bail!(
"Failed to run runtime benchmark {name}\n{}\n{}",
String::from_utf8_lossy(&result.stdout),
String::from_utf8_lossy(&result.stderr)
);
} else {
log::info!("Successfully ran runtime benchmark {name}",);

let data: Vec<BenchmarkResult> = serde_json::from_slice(&result.stdout)?;
// TODO: do something with the result
println!("{name}: {:?}", data);
}
let data: Vec<BenchmarkResult> =
execute_runtime_binary(&binary.path, name, exclude.as_deref(), include.as_deref())?;
// TODO: do something with the result
println!("{name}: {:?}", data);
}

Ok(())
}

/// Execute a single runtime benchmark suite defined in a binary crate located in
/// `runtime-benchmarks`. The binary is expected to use benchlib's `BenchmarkSuite` to execute
/// a set of runtime benchmarks and return a list of `BenchmarkResult`s encoded as JSON.
fn execute_runtime_binary(
binary: &Path,
name: &str,
exclude: Option<&str>,
include: Option<&str>,
) -> anyhow::Result<Vec<BenchmarkResult>> {
// Turn off ASLR
let mut command = Command::new("setarch");
command.arg(std::env::consts::ARCH);
command.arg("-R");
command.arg(binary);
command.arg("benchmark");

if let Some(exclude) = exclude {
command.args(&["--exclude", exclude]);
}
if let Some(include) = include {
command.args(&["--include", include]);
}

let result = command.output()?;

if !result.status.success() {
return Err(anyhow::anyhow!(
"Failed to run runtime benchmark {name}\n{}\n{}",
String::from_utf8_lossy(&result.stdout),
String::from_utf8_lossy(&result.stderr)
));
}

log::info!("Successfully ran runtime benchmark {name}");

Ok(serde_json::from_slice(&result.stdout)?)
}

/// Compiles all runtime benchmarks and returns the stdout output of Cargo.
fn compile_runtime_benchmarks(toolchain: &LocalToolchain, dir: &Path) -> anyhow::Result<Vec<u8>> {
let result = Command::new(&toolchain.cargo)
Expand Down