diff --git a/README.md b/README.md index e5525dc..1158daf 100644 --- a/README.md +++ b/README.md @@ -492,6 +492,10 @@ assert_eq!(sum, sum2); `ParThreadPool` implementations of several thread pools are provided in this crate as optional features (see [features](#features) section). Provided that the pool supports scoped computations, it is trivial to implement this trait in most cases (see [implementations](https://github.com/orxfun/orx-parallel/tree/main/src/runner/implementations) for examples). +In most of the cases, *rayon-core*, *scoped_threadpool* and *scoped_pool* perform better than others, and get close to native threads performance with `StdDefaultPool`. + +Since parallel computations are generic over the thread pools, performances can be conveniently compared for specific use cases. Such an example benchmark can be found in [collect_filter_map](https://github.com/orxfun/orx-parallel/blob/main/benches/collect_filter_map.rs) file. To have quick tests, you may also use the example [benchmark_pools](https://github.com/orxfun/orx-parallel/blob/main/examples/benchmark_pools.rs). + ### ParallelExecutor: chunk size Once thread pool provides the computation resources, it is [`ParallelExecutor`](https://docs.rs/orx-parallel/latest/orx_parallel/trait.ParallelExecutor)'s task to distribute work to available threads. As mentioned above, all threads receive exactly the same closure. This closure continues to pull elements from the input concurrent iterator and operate on the inputs until all elements are processed. diff --git a/benches/collect_filter.rs b/benches/collect_filter.rs index 6f76d03..07befac 100644 --- a/benches/collect_filter.rs +++ b/benches/collect_filter.rs @@ -84,6 +84,11 @@ fn orx_into_split_vec(inputs: &[Output]) -> SplitVec<&Output> { inputs.into_par().filter(filter).collect() } +#[allow(dead_code)] +fn orx_into_vec_with(inputs: &[Output], pool: P) -> Vec<&Output> { + inputs.into_par().with_pool(pool).filter(filter).collect() +} + fn run(c: &mut Criterion) { let treatments = [65_536 * 2]; @@ -114,6 +119,56 @@ fn run(c: &mut Criterion) { assert_eq!(&expected, &orx_into_split_vec(&input)); b.iter(|| orx_into_split_vec(black_box(&input))) }); + + #[cfg(feature = "rayon-core")] + group.bench_with_input( + BenchmarkId::new("orx-vec (rayon-core::ThreadPool)", n), + n, + |b, _| { + let pool = rayon_core::ThreadPoolBuilder::new() + .num_threads(32) + .build() + .unwrap(); + assert_eq!(&expected, &orx_into_vec_with(&input, &pool)); + b.iter(|| orx_into_vec_with(black_box(&input), &pool)) + }, + ); + + #[cfg(feature = "scoped-pool")] + group.bench_with_input( + BenchmarkId::new("orx-vec (scoped-pool::Pool)", n), + n, + |b, _| { + let pool = scoped_pool::Pool::new(32); + assert_eq!(&expected, &orx_into_vec_with(&input, &pool)); + b.iter(|| orx_into_vec_with(black_box(&input), &pool)) + }, + ); + + #[cfg(feature = "scoped_threadpool")] + group.bench_with_input( + BenchmarkId::new("orx-vec (scoped_threadpool::Pool)", n), + n, + |b, _| { + let pool = || scoped_threadpool::Pool::new(32); + assert_eq!(&expected, &orx_into_vec_with(&input, pool())); + b.iter(|| orx_into_vec_with(black_box(&input), pool())) + }, + ); + + #[cfg(feature = "yastl")] + group.bench_with_input(BenchmarkId::new("orx-vec (yastl::Pool)", n), n, |b, _| { + let pool = YastlPool::new(32); + assert_eq!(&expected, &orx_into_vec_with(&input, &pool)); + b.iter(|| orx_into_vec_with(black_box(&input), &pool)) + }); + + #[cfg(feature = "pond")] + group.bench_with_input(BenchmarkId::new("orx-vec (pond::Pool)", n), n, |b, _| { + let pool = || PondPool::new_threads_unbounded(32); + assert_eq!(&expected, &orx_into_vec_with(&input, pool())); + b.iter(|| orx_into_vec_with(black_box(&input), pool())) + }); } group.finish(); diff --git a/benches/collect_map_filter.rs b/benches/collect_map_filter.rs index 7f8d4c1..9f91cd8 100644 --- a/benches/collect_map_filter.rs +++ b/benches/collect_map_filter.rs @@ -83,6 +83,16 @@ fn orx_into_split_vec(inputs: &[usize]) -> SplitVec { inputs.into_par().map(map).filter(filter).collect() } +#[allow(dead_code)] +fn orx_into_vec_with(inputs: &[usize], pool: P) -> Vec { + inputs + .into_par() + .with_pool(pool) + .map(map) + .filter(filter) + .collect() +} + fn run(c: &mut Criterion) { let treatments = [65_536 * 2]; @@ -113,6 +123,64 @@ fn run(c: &mut Criterion) { assert_eq!(&expected, &orx_into_split_vec(&input)); b.iter(|| orx_into_split_vec(black_box(&input))) }); + + #[cfg(feature = "rayon-core")] + group.bench_with_input( + BenchmarkId::new("orx-into-vec (rayon-core::ThreadPool)", n), + n, + |b, _| { + let pool = rayon_core::ThreadPoolBuilder::new() + .num_threads(32) + .build() + .unwrap(); + assert_eq!(&expected, &orx_into_vec_with(&input, &pool)); + b.iter(|| orx_into_vec_with(black_box(&input), &pool)) + }, + ); + + #[cfg(feature = "scoped-pool")] + group.bench_with_input( + BenchmarkId::new("orx-into-vec (scoped-pool::Pool)", n), + n, + |b, _| { + let pool = scoped_pool::Pool::new(32); + assert_eq!(&expected, &orx_into_vec_with(&input, &pool)); + b.iter(|| orx_into_vec_with(black_box(&input), &pool)) + }, + ); + + #[cfg(feature = "scoped_threadpool")] + group.bench_with_input( + BenchmarkId::new("orx-into-vec (scoped_threadpool::Pool)", n), + n, + |b, _| { + let pool = || scoped_threadpool::Pool::new(32); + assert_eq!(&expected, &orx_into_vec_with(&input, pool())); + b.iter(|| orx_into_vec_with(black_box(&input), pool())) + }, + ); + + #[cfg(feature = "yastl")] + group.bench_with_input( + BenchmarkId::new("orx-into-vec (yastl::Pool)", n), + n, + |b, _| { + let pool = YastlPool::new(32); + assert_eq!(&expected, &orx_into_vec_with(&input, &pool)); + b.iter(|| orx_into_vec_with(black_box(&input), &pool)) + }, + ); + + #[cfg(feature = "pond")] + group.bench_with_input( + BenchmarkId::new("orx-into-vec (pond::Pool)", n), + n, + |b, _| { + let pool = || PondPool::new_threads_unbounded(32); + assert_eq!(&expected, &orx_into_vec_with(&input, pool())); + b.iter(|| orx_into_vec_with(black_box(&input), pool())) + }, + ); } group.finish(); diff --git a/benches/reduce_iter_into_par.rs b/benches/reduce_iter_into_par.rs index 7d00d3a..817c80b 100644 --- a/benches/reduce_iter_into_par.rs +++ b/benches/reduce_iter_into_par.rs @@ -93,6 +93,17 @@ fn orx(inputs: &[usize]) -> Option { .reduce(reduce) } +#[allow(dead_code)] +fn orx_with(inputs: &[usize], pool: P) -> Option { + inputs + .into_iter() + .iter_into_par() + .with_pool(pool) + .map(map) + .filter(filter) + .reduce(reduce) +} + fn run(c: &mut Criterion) { let treatments = [65_536 * 2]; @@ -116,6 +127,52 @@ fn run(c: &mut Criterion) { assert_eq!(&expected, &orx(&input)); b.iter(|| orx(black_box(&input))) }); + + #[cfg(feature = "rayon-core")] + group.bench_with_input( + BenchmarkId::new("orx (rayon-core::ThreadPool)", n), + n, + |b, _| { + let pool = rayon_core::ThreadPoolBuilder::new() + .num_threads(32) + .build() + .unwrap(); + assert_eq!(&expected, &orx_with(&input, &pool)); + b.iter(|| orx_with(black_box(&input), &pool)) + }, + ); + + #[cfg(feature = "scoped-pool")] + group.bench_with_input(BenchmarkId::new("orx (scoped-pool::Pool)", n), n, |b, _| { + let pool = scoped_pool::Pool::new(32); + assert_eq!(&expected, &orx_with(&input, &pool)); + b.iter(|| orx_with(black_box(&input), &pool)) + }); + + #[cfg(feature = "scoped_threadpool")] + group.bench_with_input( + BenchmarkId::new("orx (scoped_threadpool::Pool)", n), + n, + |b, _| { + let pool = || scoped_threadpool::Pool::new(32); + assert_eq!(&expected, &orx_with(&input, pool())); + b.iter(|| orx_with(black_box(&input), pool())) + }, + ); + + #[cfg(feature = "yastl")] + group.bench_with_input(BenchmarkId::new("orx (yastl::Pool)", n), n, |b, _| { + let pool = YastlPool::new(32); + assert_eq!(&expected, &orx_with(&input, &pool)); + b.iter(|| orx_with(black_box(&input), &pool)) + }); + + #[cfg(feature = "pond")] + group.bench_with_input(BenchmarkId::new("orx (pond::Pool)", n), n, |b, _| { + let pool = || PondPool::new_threads_unbounded(32); + assert_eq!(&expected, &orx_with(&input, pool())); + b.iter(|| orx_with(black_box(&input), pool())) + }); } group.finish();